@@ -239,6 +239,7 @@ int libxl_cpuid_parse_config(libxl_cpuid
{"fred", 0x00000007, 1, CPUID_REG_EAX, 17, 1},
{"lkgs", 0x00000007, 1, CPUID_REG_EAX, 18, 1},
{"wrmsrns", 0x00000007, 1, CPUID_REG_EAX, 19, 1},
+ {"avx-ifma", 0x00000007, 1, CPUID_REG_EAX, 23, 1},
{"cet-sss", 0x00000007, 1, CPUID_REG_EDX, 18, 1},
@@ -193,6 +193,8 @@ static const char *const str_7a1[32] =
/* 16 */ [17] = "fred",
[18] = "lkgs", [19] = "wrmsrns",
+
+ /* 22 */ [23] = "avx-ifma",
};
static const char *const str_e21a[32] =
@@ -1372,6 +1372,8 @@ static const struct vex {
{ { 0xad }, 2, T, R, pfx_66, Wn, LIG }, /* vnmadd213s{s,d} */
{ { 0xae }, 2, T, R, pfx_66, Wn, Ln }, /* vnmsub213p{s,d} */
{ { 0xaf }, 2, T, R, pfx_66, Wn, LIG }, /* vnmsub213s{s,d} */
+ { { 0xb4 }, 2, T, R, pfx_66, W1, Ln }, /* vpmadd52luq */
+ { { 0xb5 }, 2, T, R, pfx_66, W1, Ln }, /* vpmadd52huq */
{ { 0xb6 }, 2, T, R, pfx_66, Wn, Ln }, /* vmaddsub231p{s,d} */
{ { 0xb7 }, 2, T, R, pfx_66, Wn, Ln }, /* vmsubadd231p{s,d} */
{ { 0xb8 }, 2, T, R, pfx_66, Wn, Ln }, /* vmadd231p{s,d} */
@@ -186,6 +186,7 @@ void wrpkru(unsigned int val);
#define cpu_has_avx_vnni (cp.feat.avx_vnni && xcr0_mask(6))
#define cpu_has_avx512_bf16 (cp.feat.avx512_bf16 && xcr0_mask(0xe6))
#define cpu_has_cmpccxadd cp.feat.cmpccxadd
+#define cpu_has_avx_ifma (cp.feat.avx_ifma && xcr0_mask(6))
#define cpu_has_xgetbv1 (cpu_has_xsave && cp.xstate.xgetbv1)
@@ -171,6 +171,7 @@ extern struct cpuinfo_x86 boot_cpu_data;
#define cpu_has_avx_vnni boot_cpu_has(X86_FEATURE_AVX_VNNI)
#define cpu_has_avx512_bf16 boot_cpu_has(X86_FEATURE_AVX512_BF16)
#define cpu_has_cmpccxadd boot_cpu_has(X86_FEATURE_CMPCCXADD)
+#define cpu_has_avx_ifma boot_cpu_has(X86_FEATURE_AVX_IFMA)
/* Synthesized. */
#define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON)
@@ -599,6 +599,7 @@ amd_like(const struct x86_emulate_ctxt *
#define vcpu_has_cmpccxadd() (ctxt->cpuid->feat.cmpccxadd)
#define vcpu_has_lkgs() (ctxt->cpuid->feat.lkgs)
#define vcpu_has_wrmsrns() (ctxt->cpuid->feat.wrmsrns)
+#define vcpu_has_avx_ifma() (ctxt->cpuid->feat.avx_ifma)
#define vcpu_must_have(feat) \
generate_exception_if(!vcpu_has_##feat(), X86_EXC_UD)
@@ -6727,6 +6727,12 @@ x86_emulate(
break;
}
+ case X86EMUL_OPC_VEX_66(0x0f38, 0xb4): /* vpmadd52luq [xy]mm/mem,[xy]mm,[xy]mm */
+ case X86EMUL_OPC_VEX_66(0x0f38, 0xb5): /* vpmadd52huq [xy]mm/mem,[xy]mm,[xy]mm */
+ host_and_vcpu_must_have(avx_ifma);
+ generate_exception_if(!vex.w, EXC_UD);
+ goto simd_0f_ymm;
+
case X86EMUL_OPC_EVEX_66(0x0f38, 0xb4): /* vpmadd52luq [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */
case X86EMUL_OPC_EVEX_66(0x0f38, 0xb5): /* vpmadd52huq [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */
host_and_vcpu_must_have(avx512_ifma);
@@ -285,6 +285,7 @@ XEN_CPUFEATURE(FSRCS, 10*32+12) /
XEN_CPUFEATURE(FRED, 10*32+17) /* Flexible Return and Event Delivery */
XEN_CPUFEATURE(LKGS, 10*32+18) /*S Load Kernel GS Base */
XEN_CPUFEATURE(WRMSRNS, 10*32+19) /*A WRMSR Non-Serialising */
+XEN_CPUFEATURE(AVX_IFMA, 10*32+23) /*A AVX-IFMA Instructions */
/* AMD-defined CPU features, CPUID level 0x80000021.eax, word 11 */
XEN_CPUFEATURE(LFENCE_DISPATCH, 11*32+ 2) /*A LFENCE always serializing */
@@ -254,7 +254,7 @@ def crunch_numbers(state):
# feature flags. If want to use AVX512, AVX2 must be supported and
# enabled. Certain later extensions, acting on 256-bit vectors of
# integers, better depend on AVX2 than AVX.
- AVX2: [AVX512F, VAES, VPCLMULQDQ, AVX_VNNI],
+ AVX2: [AVX512F, VAES, VPCLMULQDQ, AVX_VNNI, AVX_IFMA],
# AVX512F is taken to mean hardware support for 512bit registers
# (which in practice depends on the EVEX prefix to encode) as well
As in a few cases before (in particular: AVX512_IFMA), since the insns here and in particular their memory access patterns follow the usual scheme, I didn't think it was necessary to add a contrived test specifically for them. Signed-off-by: Jan Beulich <jbeulich@suse.com>