diff mbox series

[v3,07/16] x86emul: support AVX10.2 256-bit embedded rounding / SAE

Message ID 995a7961-2a28-426c-85b8-2ee3dd505f4b@suse.com (mailing list archive)
State New
Headers show
Series x86: support AVX10 | expand

Commit Message

Jan Beulich Dec. 11, 2024, 10:14 a.m. UTC
AVX10.2 (along with APX) assigns new meaning to the bit that previsouly
distinguished EVEX from the Phi co-processor's MVEX. Therefore
evex_encoded() now needs to key off of something else: Use the opcode
mapping field for this, leveraging that map 0 has no assigned opcodes
(and appears unlikely to gain any).

Place the check of EVEX.U such that it'll cover most insns. EVEX.b is
being checked for individual insns as applicable - whenever that's valid
for (register-only) 512-bit forms, it becomes valid for 256-bit forms as
well when AVX10.2 is permitted for a guest. Scalar insns permitting
embedded rounding / SAE, otoh, have individual EVEX.U checks added (where
applicable with minor adjustments to the logic to avoid - where easily
possible - testing the same bit multiple times).

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
To raise the question early: It is entirely unclear to me how we want to
allow control over the AVX10 minor version number from guest configs, as
that's not a boolean field and hence not suitable for simple bit-wise
masking of feature sets.
---
v3: Take care of scalar insns individually.
v2: New.
diff mbox series

Patch

--- a/xen/arch/x86/x86_emulate/decode.c
+++ b/xen/arch/x86/x86_emulate/decode.c
@@ -16,7 +16,7 @@ 
 # define ERR_PTR(val) NULL
 #endif
 
-#define evex_encoded() (s->evex.mbs)
+#define evex_encoded() (s->evex.opcx)
 
 struct x86_emulate_state *
 x86_decode_insn(
@@ -1198,8 +1198,15 @@  int x86emul_decode(struct x86_emulate_st
                         s->evex.raw[1] = s->vex.raw[1];
                         s->evex.raw[2] = insn_fetch_type(uint8_t);
 
-                        generate_exception_if(!s->evex.mbs || s->evex.mbz, X86_EXC_UD);
-                        generate_exception_if(!s->evex.opmsk && s->evex.z, X86_EXC_UD);
+                        /*
+                         * .opcx is being checked here just to be on the safe
+                         * side, especially as long as evex_encoded() uses
+                         * this field.
+                         */
+                        generate_exception_if(s->evex.mbz || !s->evex.opcx,
+                                              X86_EXC_UD);
+                        generate_exception_if(!s->evex.opmsk && s->evex.z,
+                                              X86_EXC_UD);
 
                         if ( !mode_64bit() )
                             s->evex.R = 1;
@@ -1777,6 +1784,16 @@  int x86emul_decode(struct x86_emulate_st
     if ( override_seg != x86_seg_none )
         s->ea.mem.seg = override_seg;
 
+    /*
+     * While this generic check takes care of most insns, scalar ones (with
+     * EVEX.b set) need checking individually (elsewhere).
+     */
+    generate_exception_if((evex_encoded() &&
+                           !s->evex.u &&
+                           (s->modrm_mod != 3 ||
+                            !vcpu_has_avx10(2) || !s->evex.brs)),
+                          X86_EXC_UD);
+
     /* Fetch the immediate operand, if present. */
     switch ( d & SrcMask )
     {
--- a/xen/arch/x86/x86_emulate/private.h
+++ b/xen/arch/x86/x86_emulate/private.h
@@ -225,7 +225,7 @@  union evex {
         uint8_t x:1;     /* X */
         uint8_t r:1;     /* R */
         uint8_t pfx:2;   /* pp */
-        uint8_t mbs:1;
+        uint8_t u:1;     /* U */
         uint8_t reg:4;   /* vvvv */
         uint8_t w:1;     /* W */
         uint8_t opmsk:3; /* aaa */
@@ -594,6 +594,8 @@  amd_like(const struct x86_emulate_ctxt *
 #define vcpu_has_avx_vnni_int16() (ctxt->cpuid->feat.avx_vnni_int16)
 #define vcpu_has_user_msr()    (ctxt->cpuid->feat.user_msr)
 
+#define vcpu_has_avx10(minor)  (ctxt->cpuid->avx10.version >= (minor))
+
 #define vcpu_must_have(feat) \
     generate_exception_if(!vcpu_has_##feat(), X86_EXC_UD)
 
--- a/xen/arch/x86/x86_emulate/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -1241,7 +1241,7 @@  int cf_check x86emul_unhandleable_rw(
 #define lock_prefix (state->lock_prefix)
 #define vex (state->vex)
 #define evex (state->evex)
-#define evex_encoded() (evex.mbs)
+#define evex_encoded() (evex.opcx)
 #define ea (state->ea)
 
 /* Undo DEBUG wrapper. */
@@ -3415,8 +3415,8 @@  x86_emulate(
     CASE_SIMD_ALL_FP(_EVEX, 0x0f, 0x5f):    /* vmax{p,s}{s,d} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */
     avx512f_all_fp:
         generate_exception_if((evex.w != (evex.pfx & VEX_PREFIX_DOUBLE_MASK) ||
-                               (ea.type != OP_REG && evex.brs &&
-                                (evex.pfx & VEX_PREFIX_SCALAR_MASK))),
+                               ((evex.pfx & VEX_PREFIX_SCALAR_MASK) &&
+                                (ea.type != OP_REG ? evex.brs : !evex.u))),
                               X86_EXC_UD);
         visa_check(f);
         if ( ea.type != OP_REG || !evex.brs )
@@ -3622,11 +3622,12 @@  x86_emulate(
         /* fall through */
     CASE_SIMD_SCALAR_FP(_EVEX, 0x0f, 0x2a): /* vcvtsi2s{s,d} r/m,xmm,xmm */
     CASE_SIMD_SCALAR_FP(_EVEX, 0x0f, 0x7b): /* vcvtusi2s{s,d} r/m,xmm,xmm */
-        generate_exception_if(evex.opmsk || (ea.type != OP_REG && evex.brs),
-                              X86_EXC_UD);
+        generate_exception_if(evex.opmsk, X86_EXC_UD);
         visa_check(f);
         if ( !evex.brs )
             avx512_vlen_check(true);
+        else
+            generate_exception_if(ea.type != OP_REG || !evex.u, X86_EXC_UD);
         get_fpu(X86EMUL_FPU_zmm);
 
         if ( ea.type == OP_MEM )
@@ -3741,12 +3742,13 @@  x86_emulate(
     CASE_SIMD_SCALAR_FP(_EVEX, 0x0f, 0x78): /* vcvtts{s,d}2usi xmm/mem,reg */
     CASE_SIMD_SCALAR_FP(_EVEX, 0x0f, 0x79): /* vcvts{s,d}2usi xmm/mem,reg */
         generate_exception_if((evex.reg != 0xf || !evex.RX || !evex.R ||
-                               evex.opmsk ||
-                               (ea.type != OP_REG && evex.brs)),
+                               evex.opmsk),
                               X86_EXC_UD);
         visa_check(f);
         if ( !evex.brs )
             avx512_vlen_check(true);
+        else
+            generate_exception_if(ea.type != OP_REG || !evex.u, X86_EXC_UD);
         get_fpu(X86EMUL_FPU_zmm);
         opc = init_evex(stub);
         goto cvts_2si;
@@ -3816,12 +3818,13 @@  x86_emulate(
     CASE_SIMD_PACKED_FP(_EVEX, 0x0f, 0x2e): /* vucomis{s,d} xmm/mem,xmm */
     CASE_SIMD_PACKED_FP(_EVEX, 0x0f, 0x2f): /* vcomis{s,d} xmm/mem,xmm */
         generate_exception_if((evex.reg != 0xf || !evex.RX || evex.opmsk ||
-                               (ea.type != OP_REG && evex.brs) ||
                                evex.w != evex.pfx),
                               X86_EXC_UD);
         visa_check(f);
         if ( !evex.brs )
             avx512_vlen_check(true);
+        else
+            generate_exception_if(ea.type != OP_REG || !evex.u, X86_EXC_UD);
         get_fpu(X86EMUL_FPU_zmm);
 
         opc = init_evex(stub);
@@ -5389,8 +5392,8 @@  x86_emulate(
 
     CASE_SIMD_ALL_FP(_EVEX, 0x0f, 0xc2): /* vcmp{p,s}{s,d} $imm8,[xyz]mm/mem,[xyz]mm,k{k} */
         generate_exception_if((evex.w != (evex.pfx & VEX_PREFIX_DOUBLE_MASK) ||
-                               (ea.type != OP_REG && evex.brs &&
-                                (evex.pfx & VEX_PREFIX_SCALAR_MASK)) ||
+                               ((evex.pfx & VEX_PREFIX_SCALAR_MASK) &&
+                                (ea.type != OP_REG ? evex.brs : !evex.u)) ||
                                !evex.r || !evex.R || evex.z),
                               X86_EXC_UD);
         visa_check(f);
@@ -6088,9 +6091,10 @@  x86_emulate(
     case X86EMUL_OPC_EVEX_66(0x0f38, 0xbd): /* vfnmadd231s{s,d} xmm/mem,xmm,xmm{k} */
     case X86EMUL_OPC_EVEX_66(0x0f38, 0xbf): /* vfnmsub231s{s,d} xmm/mem,xmm,xmm{k} */
         visa_check(f);
-        generate_exception_if(ea.type != OP_REG && evex.brs, X86_EXC_UD);
         if ( !evex.brs )
             avx512_vlen_check(true);
+        else
+            generate_exception_if(ea.type != OP_REG || !evex.u, X86_EXC_UD);
         goto simd_zmm;
 
     case X86EMUL_OPC_66(0x0f38, 0x37): /* pcmpgtq xmm/m128,xmm */
@@ -7262,7 +7266,8 @@  x86_emulate(
 
     case X86EMUL_OPC_EVEX_66(0x0f3a, 0x0a): /* vrndscaless $imm8,xmm/mem,xmm,xmm{k} */
     case X86EMUL_OPC_EVEX_66(0x0f3a, 0x0b): /* vrndscalesd $imm8,xmm/mem,xmm,xmm{k} */
-        generate_exception_if(ea.type != OP_REG && evex.brs, X86_EXC_UD);
+        generate_exception_if(ea.type != OP_REG ? evex.brs : !evex.u,
+                              X86_EXC_UD);
         /* fall through */
     case X86EMUL_OPC_EVEX_66(0x0f3a, 0x08): /* vrndscaleps $imm8,[xyz]mm/mem,[xyz]mm{k} */
     case X86EMUL_OPC_EVEX_66(0x0f3a, 0x09): /* vrndscalepd $imm8,[xyz]mm/mem,[xyz]mm{k} */
@@ -7272,7 +7277,8 @@  x86_emulate(
         goto simd_imm8_zmm;
 
     case X86EMUL_OPC_EVEX(0x0f3a, 0x0a): /* vrndscalesh $imm8,xmm/mem,xmm,xmm{k} */
-        generate_exception_if(ea.type != OP_REG && evex.brs, X86_EXC_UD);
+        generate_exception_if(ea.type != OP_REG ? evex.brs : !evex.u,
+                              X86_EXC_UD);
         /* fall through */
     case X86EMUL_OPC_EVEX(0x0f3a, 0x08): /* vrndscaleph $imm8,[xyz]mm/mem,[xyz]mm{k} */
         visa_check(_fp16);
@@ -7605,9 +7611,10 @@  x86_emulate(
     case X86EMUL_OPC_EVEX_66(0x0f3a, 0x27): /* vgetmants{s,d} $imm8,xmm/mem,xmm,xmm{k} */
     case X86EMUL_OPC_EVEX_66(0x0f3a, 0x55): /* vfixupimms{s,d} $imm8,xmm/mem,xmm,xmm{k} */
         visa_check(f);
-        generate_exception_if(ea.type != OP_REG && evex.brs, X86_EXC_UD);
         if ( !evex.brs )
             avx512_vlen_check(true);
+        else
+            generate_exception_if(ea.type != OP_REG || !evex.u, X86_EXC_UD);
         goto simd_imm8_zmm;
 
     case X86EMUL_OPC_EVEX(0x0f3a, 0x27): /* vgetmantsh $imm8,xmm/mem,xmm,xmm{k} */
@@ -7617,7 +7624,7 @@  x86_emulate(
         if ( !evex.brs )
             avx512_vlen_check(true);
         else
-            generate_exception_if(ea.type != OP_REG, X86_EXC_UD);
+            generate_exception_if(ea.type != OP_REG || !evex.u, X86_EXC_UD);
         goto simd_imm8_zmm;
 
     case X86EMUL_OPC_VEX_66(0x0f3a, 0x30): /* kshiftr{b,w} $imm8,k,k */
@@ -7805,7 +7812,7 @@  x86_emulate(
         goto avx512f_imm8_no_sae;
 
     case X86EMUL_OPC_EVEX_F3(0x0f3a, 0xc2): /* vcmpsh $imm8,xmm/mem,xmm,k{k} */
-        generate_exception_if(ea.type != OP_REG && evex.brs, X86_EXC_UD);
+        generate_exception_if(ea.type != OP_REG ? evex.brs : !evex.u, X86_EXC_UD);
         /* fall through */
     case X86EMUL_OPC_EVEX(0x0f3a, 0xc2): /* vcmpph $imm8,[xyz]mm/mem,[xyz]mm,k{k} */
         visa_check(_fp16);
@@ -7982,10 +7989,11 @@  x86_emulate(
     case X86EMUL_OPC_EVEX_66(6, 0xbd): /* vfnmadd231sh xmm/m16,xmm,xmm{k} */
     case X86EMUL_OPC_EVEX_66(6, 0xbf): /* vfnmsub231sh xmm/m16,xmm,xmm{k} */
         visa_check(_fp16);
-        generate_exception_if(evex.w || (ea.type != OP_REG && evex.brs),
-                              X86_EXC_UD);
+        generate_exception_if(evex.w, X86_EXC_UD);
         if ( !evex.brs )
             avx512_vlen_check(true);
+        else
+            generate_exception_if(ea.type != OP_REG || !evex.u, X86_EXC_UD);
         goto simd_zmm;
 
     case X86EMUL_OPC_EVEX_66(6, 0x4c): /* vrcpph [xyz]mm/mem,[xyz]mm{k} */
@@ -8015,7 +8023,9 @@  x86_emulate(
         unsigned int src1 = ~evex.reg;
 
         visa_check(_fp16);
-        generate_exception_if(evex.w || ((b & 1) && ea.type != OP_REG && evex.brs),
+        generate_exception_if((evex.w ||
+                               ((b & 1) &&
+                                (ea.type != OP_REG ? evex.brs : !evex.u))),
                               X86_EXC_UD);
         if ( mode_64bit() )
             src1 = (src1 & 0xf) | (!evex.RX << 4);