@@ -241,6 +241,7 @@ int libxl_cpuid_parse_config(libxl_cpuid
{"wrmsrns", 0x00000007, 1, CPUID_REG_EAX, 19, 1},
{"avx-ifma", 0x00000007, 1, CPUID_REG_EAX, 23, 1},
+ {"avx-vnni-int8",0x00000007, 1, CPUID_REG_EDX, 4, 1},
{"cet-sss", 0x00000007, 1, CPUID_REG_EDX, 18, 1},
{"intel-psfd", 0x00000007, 2, CPUID_REG_EDX, 0, 1},
@@ -214,6 +214,8 @@ static const char *const str_7c1[32] =
static const char *const str_7d1[32] =
{
+ [ 4] = "avx-vnni-int8",
+
[18] = "cet-sss",
};
@@ -1337,8 +1337,14 @@ static const struct vex {
{ { 0x45 }, 2, T, R, pfx_66, Wn, Ln }, /* vpsrlv{d,q} */
{ { 0x46 }, 2, T, R, pfx_66, W0, Ln }, /* vpsravd */
{ { 0x47 }, 2, T, R, pfx_66, Wn, Ln }, /* vpsllv{d,q} */
+ { { 0x50 }, 2, T, R, pfx_no, W0, Ln }, /* vpdpbuud */
{ { 0x50 }, 2, T, R, pfx_66, W0, Ln }, /* vpdpbusd */
+ { { 0x50 }, 2, T, R, pfx_f3, W0, Ln }, /* vpdpbsud */
+ { { 0x50 }, 2, T, R, pfx_f2, W0, Ln }, /* vpdpbssd */
+ { { 0x51 }, 2, T, R, pfx_no, W0, Ln }, /* vpdpbuuds */
{ { 0x51 }, 2, T, R, pfx_66, W0, Ln }, /* vpdpbusds */
+ { { 0x51 }, 2, T, R, pfx_f3, W0, Ln }, /* vpdpbsuds */
+ { { 0x51 }, 2, T, R, pfx_f2, W0, Ln }, /* vpdpbssds */
{ { 0x52 }, 2, T, R, pfx_66, W0, Ln }, /* vpdpwssd */
{ { 0x53 }, 2, T, R, pfx_66, W0, Ln }, /* vpdpwssds */
{ { 0x58 }, 2, T, R, pfx_66, W0, Ln }, /* vpbroadcastd */
@@ -187,6 +187,7 @@ void wrpkru(unsigned int val);
#define cpu_has_avx512_bf16 (cp.feat.avx512_bf16 && xcr0_mask(0xe6))
#define cpu_has_cmpccxadd cp.feat.cmpccxadd
#define cpu_has_avx_ifma (cp.feat.avx_ifma && xcr0_mask(6))
+#define cpu_has_avx_vnni_int8 (cp.feat.avx_vnni_int8 && xcr0_mask(6))
#define cpu_has_xgetbv1 (cpu_has_xsave && cp.xstate.xgetbv1)
@@ -173,6 +173,9 @@ extern struct cpuinfo_x86 boot_cpu_data;
#define cpu_has_cmpccxadd boot_cpu_has(X86_FEATURE_CMPCCXADD)
#define cpu_has_avx_ifma boot_cpu_has(X86_FEATURE_AVX_IFMA)
+/* CPUID level 0x00000007:1.edx */
+#define cpu_has_avx_vnni_int8 boot_cpu_has(X86_FEATURE_AVX_VNNI_INT8)
+
/* Synthesized. */
#define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON)
#define cpu_has_cpuid_faulting boot_cpu_has(X86_FEATURE_CPUID_FAULTING)
@@ -600,6 +600,7 @@ amd_like(const struct x86_emulate_ctxt *
#define vcpu_has_lkgs() (ctxt->cpuid->feat.lkgs)
#define vcpu_has_wrmsrns() (ctxt->cpuid->feat.wrmsrns)
#define vcpu_has_avx_ifma() (ctxt->cpuid->feat.avx_ifma)
+#define vcpu_has_avx_vnni_int8() (ctxt->cpuid->feat.avx_vnni_int8)
#define vcpu_must_have(feat) \
generate_exception_if(!vcpu_has_##feat(), X86_EXC_UD)
@@ -6077,13 +6077,23 @@ x86_emulate(
generate_exception_if(vex.l, EXC_UD);
goto simd_0f_avx;
+ case X86EMUL_OPC_VEX (0x0f38, 0x50): /* vpdpbuud [xy]mm/mem,[xy]mm,[xy]mm */
+ case X86EMUL_OPC_VEX_F3(0x0f38, 0x50): /* vpdpbsud [xy]mm/mem,[xy]mm,[xy]mm */
+ case X86EMUL_OPC_VEX_F2(0x0f38, 0x50): /* vpdpbssd [xy]mm/mem,[xy]mm,[xy]mm */
+ case X86EMUL_OPC_VEX (0x0f38, 0x51): /* vpdpbuuds [xy]mm/mem,[xy]mm,[xy]mm */
+ case X86EMUL_OPC_VEX_F3(0x0f38, 0x51): /* vpdpbsuds [xy]mm/mem,[xy]mm,[xy]mm */
+ case X86EMUL_OPC_VEX_F2(0x0f38, 0x51): /* vpdpbssds [xy]mm/mem,[xy]mm,[xy]mm */
+ host_and_vcpu_must_have(avx_vnni_int8);
+ generate_exception_if(vex.w, EXC_UD);
+ goto simd_0f_ymm;
+
case X86EMUL_OPC_VEX_66(0x0f38, 0x50): /* vpdpbusd [xy]mm/mem,[xy]mm,[xy]mm */
case X86EMUL_OPC_VEX_66(0x0f38, 0x51): /* vpdpbusds [xy]mm/mem,[xy]mm,[xy]mm */
case X86EMUL_OPC_VEX_66(0x0f38, 0x52): /* vpdpwssd [xy]mm/mem,[xy]mm,[xy]mm */
case X86EMUL_OPC_VEX_66(0x0f38, 0x53): /* vpdpwssds [xy]mm/mem,[xy]mm,[xy]mm */
host_and_vcpu_must_have(avx_vnni);
generate_exception_if(vex.w, EXC_UD);
- goto simd_0f_avx;
+ goto simd_0f_ymm;
case X86EMUL_OPC_EVEX_66(0x0f38, 0x50): /* vpdpbusd [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */
case X86EMUL_OPC_EVEX_66(0x0f38, 0x51): /* vpdpbusds [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */
@@ -305,6 +305,7 @@ XEN_CPUFEATURE(MCDT_NO, 13*32
/* Intel-defined CPU features, CPUID level 0x00000007:1.ecx, word 14 */
/* Intel-defined CPU features, CPUID level 0x00000007:1.edx, word 15 */
+XEN_CPUFEATURE(AVX_VNNI_INT8, 15*32+ 4) /*A AVX-VNNI-INT8 Instructions */
XEN_CPUFEATURE(CET_SSS, 15*32+18) /* CET Supervisor Shadow Stacks safe to use */
#endif /* XEN_CPUFEATURE */
@@ -254,7 +254,7 @@ def crunch_numbers(state):
# feature flags. If want to use AVX512, AVX2 must be supported and
# enabled. Certain later extensions, acting on 256-bit vectors of
# integers, better depend on AVX2 than AVX.
- AVX2: [AVX512F, VAES, VPCLMULQDQ, AVX_VNNI, AVX_IFMA],
+ AVX2: [AVX512F, VAES, VPCLMULQDQ, AVX_VNNI, AVX_IFMA, AVX_VNNI_INT8],
# AVX512F is taken to mean hardware support for 512bit registers
# (which in practice depends on the EVEX prefix to encode) as well
These are close relatives of the AVX-VNNI ISA extension. Since the insns here and in particular their memory access patterns follow the usual scheme (and especially the byte variants of AVX-VNNI), I didn't think it was necessary to add a contrived test specifically for them. While making the addition also re-wire AVX-VNNI's handling to simd_0f_ymm: There's no reason to check the AVX feature alongside the one actually of interest (there are a few features where two checks are actually necessary, e.g. GFNI+AVX, but this isn't the case here). Signed-off-by: Jan Beulich <jbeulich@suse.com>