@@ -591,6 +591,10 @@ static const struct test avx512_vpopcntd
INSN(popcnt, 66, 0f38, 55, vl, dq, vl)
};
+static const struct test vpclmulqdq_all[] = {
+ INSN(pclmulqdq, 66, 0f3a, 44, vl, q_nb, vl)
+};
+
static const unsigned char vl_all[] = { VL_512, VL_128, VL_256 };
static const unsigned char vl_128[] = { VL_128 };
static const unsigned char vl_no128[] = { VL_512, VL_256 };
@@ -968,4 +972,9 @@ void evex_disp8_test(void *instr, struct
RUN(avx512_vbmi2, all);
RUN(avx512_vnni, all);
RUN(avx512_vpopcntdq, all);
+
+ if ( cpu_has_avx512f )
+ {
+ RUN(vpclmulqdq, all);
+ }
}
@@ -144,6 +144,7 @@ static inline bool xcr0_mask(uint64_t ma
#define cpu_has_avx512vl (cp.feat.avx512vl && xcr0_mask(0xe6))
#define cpu_has_avx512_vbmi (cp.feat.avx512_vbmi && xcr0_mask(0xe6))
#define cpu_has_avx512_vbmi2 (cp.feat.avx512_vbmi2 && xcr0_mask(0xe6))
+#define cpu_has_vpclmulqdq (cp.feat.vpclmulqdq && xcr0_mask(6))
#define cpu_has_avx512_vnni (cp.feat.avx512_vnni && xcr0_mask(0xe6))
#define cpu_has_avx512_bitalg (cp.feat.avx512_bitalg && xcr0_mask(0xe6))
#define cpu_has_avx512_vpopcntdq (cp.feat.avx512_vpopcntdq && xcr0_mask(0xe6))
@@ -594,7 +594,7 @@ static const struct ext0f3a_table {
[0x3e ... 0x3f] = { .simd_size = simd_packed_int, .d8s = d8s_vl },
[0x40 ... 0x41] = { .simd_size = simd_packed_fp },
[0x42 ... 0x43] = { .simd_size = simd_packed_int, .d8s = d8s_vl },
- [0x44] = { .simd_size = simd_packed_int },
+ [0x44] = { .simd_size = simd_packed_int, .d8s = d8s_vl },
[0x46] = { .simd_size = simd_packed_int },
[0x48 ... 0x49] = { .simd_size = simd_packed_fp, .four_op = 1 },
[0x4a ... 0x4b] = { .simd_size = simd_packed_fp, .four_op = 1 },
@@ -1890,6 +1890,7 @@ in_protmode(
#define vcpu_has_avx512vl() (ctxt->cpuid->feat.avx512vl)
#define vcpu_has_avx512_vbmi() (ctxt->cpuid->feat.avx512_vbmi)
#define vcpu_has_avx512_vbmi2() (ctxt->cpuid->feat.avx512_vbmi2)
+#define vcpu_has_vpclmulqdq() (ctxt->cpuid->feat.vpclmulqdq)
#define vcpu_has_avx512_vnni() (ctxt->cpuid->feat.avx512_vnni)
#define vcpu_has_avx512_bitalg() (ctxt->cpuid->feat.avx512_bitalg)
#define vcpu_has_avx512_vpopcntdq() (ctxt->cpuid->feat.avx512_vpopcntdq)
@@ -10207,13 +10208,19 @@ x86_emulate(
goto opmask_shift_imm;
case X86EMUL_OPC_66(0x0f3a, 0x44): /* pclmulqdq $imm8,xmm/m128,xmm */
- case X86EMUL_OPC_VEX_66(0x0f3a, 0x44): /* vpclmulqdq $imm8,xmm/m128,xmm,xmm */
+ case X86EMUL_OPC_VEX_66(0x0f3a, 0x44): /* vpclmulqdq $imm8,{x,y}mm/mem,{x,y}mm,{x,y}mm */
host_and_vcpu_must_have(pclmulqdq);
if ( vex.opcx == vex_none )
goto simd_0f3a_common;
- generate_exception_if(vex.l, EXC_UD);
+ if ( vex.l )
+ host_and_vcpu_must_have(vpclmulqdq);
goto simd_0f_imm8_avx;
+ case X86EMUL_OPC_EVEX_66(0x0f3a, 0x44): /* vpclmulqdq $imm8,[xyz]mm/mem,[xyz]mm,[xyz]mm */
+ host_and_vcpu_must_have(vpclmulqdq);
+ generate_exception_if(evex.brs || evex.opmsk, EXC_UD);
+ goto avx512f_imm8_no_sae;
+
case X86EMUL_OPC_VEX_66(0x0f3a, 0x4a): /* vblendvps {x,y}mm,{x,y}mm/mem,{x,y}mm,{x,y}mm */
case X86EMUL_OPC_VEX_66(0x0f3a, 0x4b): /* vblendvpd {x,y}mm,{x,y}mm/mem,{x,y}mm,{x,y}mm */
generate_exception_if(vex.w, EXC_UD);
@@ -111,6 +111,7 @@
/* CPUID level 0x00000007:0.ecx */
#define cpu_has_avx512_vbmi boot_cpu_has(X86_FEATURE_AVX512_VBMI)
#define cpu_has_avx512_vbmi2 boot_cpu_has(X86_FEATURE_AVX512_VBMI2)
+#define cpu_has_vpclmulqdq boot_cpu_has(X86_FEATURE_VPCLMULQDQ)
#define cpu_has_avx512_vnni boot_cpu_has(X86_FEATURE_AVX512_VNNI)
#define cpu_has_avx512_bitalg boot_cpu_has(X86_FEATURE_AVX512_BITALG)
#define cpu_has_avx512_vpopcntdq boot_cpu_has(X86_FEATURE_AVX512_VPOPCNTDQ)
@@ -121,7 +121,7 @@ XEN_CPUFEATURE(PBE, 0*32+31) /
/* Intel-defined CPU features, CPUID level 0x00000001.ecx, word 1 */
XEN_CPUFEATURE(SSE3, 1*32+ 0) /*A Streaming SIMD Extensions-3 */
-XEN_CPUFEATURE(PCLMULQDQ, 1*32+ 1) /*A Carry-less mulitplication */
+XEN_CPUFEATURE(PCLMULQDQ, 1*32+ 1) /*A Carry-less multiplication */
XEN_CPUFEATURE(DTES64, 1*32+ 2) /* 64-bit Debug Store */
XEN_CPUFEATURE(MONITOR, 1*32+ 3) /* Monitor/Mwait support */
XEN_CPUFEATURE(DSCPL, 1*32+ 4) /* CPL Qualified Debug Store */
@@ -229,6 +229,7 @@ XEN_CPUFEATURE(UMIP, 6*32+ 2) /
XEN_CPUFEATURE(PKU, 6*32+ 3) /*H Protection Keys for Userspace */
XEN_CPUFEATURE(OSPKE, 6*32+ 4) /*! OS Protection Keys Enable */
XEN_CPUFEATURE(AVX512_VBMI2, 6*32+ 6) /*A Additional AVX-512 Vector Byte Manipulation Instrs */
+XEN_CPUFEATURE(VPCLMULQDQ, 6*32+10) /*A Vector Carry-less Multiplication Instrs */
XEN_CPUFEATURE(AVX512_VNNI, 6*32+11) /*A Vector Neural Network Instrs */
XEN_CPUFEATURE(AVX512_BITALG, 6*32+12) /*A Support for VPOPCNT[B,W] and VPSHUFBITQMB */
XEN_CPUFEATURE(AVX512_VPOPCNTDQ, 6*32+14) /*A POPCNT for vectors of DW/QW */
@@ -254,8 +254,9 @@ def crunch_numbers(state):
# This is just the dependency between AVX512 and AVX2 of XSTATE
# feature flags. If want to use AVX512, AVX2 must be supported and
- # enabled.
- AVX2: [AVX512F],
+ # enabled. Certain later extensions, acting on 256-bit vectors of
+ # integers, better depend on AVX2 than AVX.
+ AVX2: [AVX512F, VPCLMULQDQ],
# AVX512F is taken to mean hardware support for 512bit registers
# (which in practice depends on the EVEX prefix to encode) as well
@@ -270,6 +271,10 @@ def crunch_numbers(state):
# registers), despite the SDM not formally making this connection.
AVX512BW: [AVX512_BF16, AVX512_BITALG, AVX512_VBMI, AVX512_VBMI2],
+ # Extensions with VEX/EVEX encodings keyed to a separate feature
+ # flag are made dependents of their respective legacy feature.
+ PCLMULQDQ: [VPCLMULQDQ],
+
# The features:
# * Single Thread Indirect Branch Predictors
# * Speculative Store Bypass Disable