@@ -242,6 +242,7 @@ int libxl_cpuid_parse_config(libxl_cpuid
{"avx-ifma", 0x00000007, 1, CPUID_REG_EAX, 23, 1},
{"avx-vnni-int8",0x00000007, 1, CPUID_REG_EDX, 4, 1},
+ {"avx-ne-convert",0x00000007, 1, CPUID_REG_EDX, 5, 1},
{"cet-sss", 0x00000007, 1, CPUID_REG_EDX, 18, 1},
{"intel-psfd", 0x00000007, 2, CPUID_REG_EDX, 0, 1},
@@ -214,7 +214,7 @@ static const char *const str_7c1[32] =
static const char *const str_7d1[32] =
{
- [ 4] = "avx-vnni-int8",
+ [ 4] = "avx-vnni-int8", [ 5] = "avx-ne-convert",
[18] = "cet-sss",
};
@@ -1350,6 +1350,7 @@ static const struct vex {
{ { 0x58 }, 2, T, R, pfx_66, W0, Ln }, /* vpbroadcastd */
{ { 0x59 }, 2, T, R, pfx_66, W0, Ln }, /* vpbroadcastq */
{ { 0x5a }, 2, F, R, pfx_66, W0, L1 }, /* vbroadcasti128 */
+ { { 0x72 }, 2, T, R, pfx_f3, W0, Ln }, /* vcvtneps2bf16 */
{ { 0x78 }, 2, T, R, pfx_66, W0, Ln }, /* vpbroadcastb */
{ { 0x79 }, 2, T, R, pfx_66, W0, Ln }, /* vpbroadcastw */
{ { 0x8c }, 2, F, R, pfx_66, Wn, Ln }, /* vpmaskmov{d,q} */
@@ -1378,6 +1379,12 @@ static const struct vex {
{ { 0xad }, 2, T, R, pfx_66, Wn, LIG }, /* vnmadd213s{s,d} */
{ { 0xae }, 2, T, R, pfx_66, Wn, Ln }, /* vnmsub213p{s,d} */
{ { 0xaf }, 2, T, R, pfx_66, Wn, LIG }, /* vnmsub213s{s,d} */
+ { { 0xb0 }, 2, F, R, pfx_no, W0, Ln }, /* vcvtneoph2ps */
+ { { 0xb0 }, 2, F, R, pfx_66, W0, Ln }, /* vcvtneeph2ps */
+ { { 0xb0 }, 2, F, R, pfx_f3, W0, Ln }, /* vcvtneebf162ps */
+ { { 0xb0 }, 2, F, R, pfx_f2, W0, Ln }, /* vcvtneobf162ps */
+ { { 0xb1 }, 2, F, R, pfx_66, W0, Ln }, /* vbcstnesh2ps */
+ { { 0xb1 }, 2, F, R, pfx_f3, W0, Ln }, /* vbcstnebf162ps */
{ { 0xb4 }, 2, T, R, pfx_66, W1, Ln }, /* vpmadd52luq */
{ { 0xb5 }, 2, T, R, pfx_66, W1, Ln }, /* vpmadd52huq */
{ { 0xb6 }, 2, T, R, pfx_66, Wn, Ln }, /* vmaddsub231p{s,d} */
@@ -4572,6 +4572,39 @@ int main(int argc, char **argv)
else
printf("skipped\n");
+ printf("%-40s", "Testing vbcstnebf162ps 2(%ecx),%ymm3...");
+ if ( stack_exec && cpu_has_avx_ne_convert )
+ {
+ decl_insn(vbcstnebf162ps);
+
+ asm volatile ( /* vbcstnebf162ps 2(%0), %%ymm3 */
+ put_insn(vbcstnebf162ps,
+ ".byte 0xc4, 0xe2, 0x7e, 0xb1, 0x59, 0x02 ")
+ :: "c" (NULL) );
+
+ res[0] = 0x43210000;
+ regs.ecx = (unsigned long)res;
+ set_insn(vbcstnebf162ps);
+ bytes_read = 0;
+ rc = x86_emulate(&ctxt, &emulops);
+ if ( rc != X86EMUL_OKAY || !check_eip(vbcstnebf162ps) ||
+ bytes_read != 2 )
+ goto fail;
+
+ asm volatile ( "vbroadcastss %1, %%ymm2;"
+ "vsubps %%ymm3, %%ymm2, %%ymm1;"
+ "vptest %%ymm1, %%ymm1;"
+ "setc %b0; setz %h0"
+ : "=&Q" (rc)
+ : "m" (res[0]) );
+ if ( (rc & 0xffff) != 0x0101 )
+ goto fail;
+
+ printf("okay\n");
+ }
+ else
+ printf("skipped\n");
+
printf("%-40s", "Testing stmxcsr (%edx)...");
if ( cpu_has_sse )
{
@@ -188,6 +188,7 @@ void wrpkru(unsigned int val);
#define cpu_has_cmpccxadd cp.feat.cmpccxadd
#define cpu_has_avx_ifma (cp.feat.avx_ifma && xcr0_mask(6))
#define cpu_has_avx_vnni_int8 (cp.feat.avx_vnni_int8 && xcr0_mask(6))
+#define cpu_has_avx_ne_convert (cp.feat.avx_ne_convert && xcr0_mask(6))
#define cpu_has_xgetbv1 (cpu_has_xsave && cp.xstate.xgetbv1)
@@ -175,6 +175,7 @@ extern struct cpuinfo_x86 boot_cpu_data;
/* CPUID level 0x00000007:1.edx */
#define cpu_has_avx_vnni_int8 boot_cpu_has(X86_FEATURE_AVX_VNNI_INT8)
+#define cpu_has_avx_ne_convert boot_cpu_has(X86_FEATURE_AVX_NE_CONVERT)
/* Synthesized. */
#define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON)
@@ -423,6 +423,8 @@ static const struct ext0f38_table {
[0xad] = { .simd_size = simd_scalar_vexw, .d8s = d8s_dq },
[0xae] = { .simd_size = simd_packed_fp, .d8s = d8s_vl },
[0xaf] = { .simd_size = simd_scalar_vexw, .d8s = d8s_dq },
+ [0xb0] = { .simd_size = simd_other, .two_op = 1 },
+ [0xb1] = { .simd_size = simd_other, .two_op = 1 },
[0xb4 ... 0xb5] = { .simd_size = simd_packed_int, .d8s = d8s_vl },
[0xb6 ... 0xb8] = { .simd_size = simd_packed_fp, .d8s = d8s_vl },
[0xb9] = { .simd_size = simd_scalar_vexw, .d8s = d8s_dq },
@@ -601,6 +601,7 @@ amd_like(const struct x86_emulate_ctxt *
#define vcpu_has_wrmsrns() (ctxt->cpuid->feat.wrmsrns)
#define vcpu_has_avx_ifma() (ctxt->cpuid->feat.avx_ifma)
#define vcpu_has_avx_vnni_int8() (ctxt->cpuid->feat.avx_vnni_int8)
+#define vcpu_has_avx_ne_convert() (ctxt->cpuid->feat.avx_ne_convert)
#define vcpu_must_have(feat) \
generate_exception_if(!vcpu_has_##feat(), X86_EXC_UD)
@@ -6208,6 +6208,19 @@ x86_emulate(
host_and_vcpu_must_have(avx512_vbmi2);
goto avx512f_no_sae;
+ case X86EMUL_OPC_VEX (0x0f38, 0xb0): /* vcvtneoph2ps mem,[xy]mm */
+ case X86EMUL_OPC_VEX_66(0x0f38, 0xb0): /* vcvtneeph2ps mem,[xy]mm */
+ case X86EMUL_OPC_VEX_F3(0x0f38, 0xb0): /* vcvtneebf162ps mem,[xy]mm */
+ case X86EMUL_OPC_VEX_F2(0x0f38, 0xb0): /* vcvtneobf162ps mem,[xy]mm */
+ generate_exception_if(ea.type != OP_MEM, EXC_UD);
+ /* fall through */
+ case X86EMUL_OPC_VEX_F3(0x0f38, 0x72): /* vcvtneps2bf16 [xy]mm/mem,xmm */
+ host_and_vcpu_must_have(avx_ne_convert);
+ generate_exception_if(vex.w, EXC_UD);
+ d |= TwoOp;
+ op_bytes = 16 << vex.l;
+ goto simd_0f_ymm;
+
case X86EMUL_OPC_EVEX_66(0x0f38, 0x75): /* vpermi2{b,w} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */
case X86EMUL_OPC_EVEX_66(0x0f38, 0x7d): /* vpermt2{b,w} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */
case X86EMUL_OPC_EVEX_66(0x0f38, 0x8d): /* vperm{b,w} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */
@@ -6737,6 +6750,13 @@ x86_emulate(
break;
}
+ case X86EMUL_OPC_VEX_66(0x0f38, 0xb1): /* vbcstnesh2ps mem,[xy]mm */
+ case X86EMUL_OPC_VEX_F3(0x0f38, 0xb1): /* vbcstnebf162ps mem,[xy]mm */
+ host_and_vcpu_must_have(avx_ne_convert);
+ generate_exception_if(vex.w || ea.type != OP_MEM, EXC_UD);
+ op_bytes = 2;
+ goto simd_0f_ymm;
+
case X86EMUL_OPC_VEX_66(0x0f38, 0xb4): /* vpmadd52luq [xy]mm/mem,[xy]mm,[xy]mm */
case X86EMUL_OPC_VEX_66(0x0f38, 0xb5): /* vpmadd52huq [xy]mm/mem,[xy]mm,[xy]mm */
host_and_vcpu_must_have(avx_ifma);
@@ -306,6 +306,7 @@ XEN_CPUFEATURE(MCDT_NO, 13*32
/* Intel-defined CPU features, CPUID level 0x00000007:1.edx, word 15 */
XEN_CPUFEATURE(AVX_VNNI_INT8, 15*32+ 4) /*A AVX-VNNI-INT8 Instructions */
+XEN_CPUFEATURE(AVX_NE_CONVERT, 15*32+ 5) /*A AVX-NE-CONVERT Instructions */
XEN_CPUFEATURE(CET_SSS, 15*32+18) /* CET Supervisor Shadow Stacks safe to use */
#endif /* XEN_CPUFEATURE */
@@ -232,7 +232,7 @@ def crunch_numbers(state):
# for the XOP prefix). VEX/XOP-encoded GPR instructions, such as
# those from the BMI{1,2}, TBM and LWP sets function fine in the
# absence of any enabled xstate.
- AVX: [FMA, FMA4, F16C, AVX2, XOP],
+ AVX: [FMA, FMA4, F16C, AVX2, XOP, AVX_NE_CONVERT],
# This dependency exists solely for the shadow pagetable code. If the
# host doesn't have NX support, the shadow pagetable code can't handle
Matching what was done earlier, explicit tests are added only for irregular insn / memory access patterns. Signed-off-by: Jan Beulich <jbeulich@suse.com> --- SDE: -grr or -srf