@@ -185,7 +185,7 @@ static const char *const str_7d0[32] =
static const char *const str_7a1[32] =
{
- [ 0] = "sha512",
+ [ 0] = "sha512", [ 1] = "sm3",
[ 4] = "avx-vnni", [ 5] = "avx512-bf16",
@@ -1406,6 +1406,8 @@ static const struct vex {
{ { 0xd3 }, 2, T, R, pfx_no, W0, Ln }, /* vpdpwuuds */
{ { 0xd3 }, 2, T, R, pfx_66, W0, Ln }, /* vpdpwusds */
{ { 0xd3 }, 2, T, R, pfx_f3, W0, Ln }, /* vpdpwsuds */
+ { { 0xda }, 2, T, R, pfx_no, W0, L0 }, /* vsm3msg1 */
+ { { 0xda }, 2, T, R, pfx_66, W0, L0 }, /* vsm3msg2 */
{ { 0xdb }, 2, T, R, pfx_66, WIG, L0 }, /* vaesimc */
{ { 0xdc }, 2, T, R, pfx_66, WIG, Ln }, /* vaesenc */
{ { 0xdd }, 2, T, R, pfx_66, WIG, Ln }, /* vaesenclast */
@@ -1490,6 +1492,7 @@ static const struct vex {
{ { 0x7f }, 3, T, R, pfx_66, Wn, LIG }, /* vfnmsubsd */
{ { 0xce }, 3, T, R, pfx_66, W1, Ln }, /* vgf2p8affineqb */
{ { 0xcf }, 3, T, R, pfx_66, W1, Ln }, /* vgf2p8affineinvqb */
+ { { 0xde }, 3, T, R, pfx_66, W0, L0 }, /* vsm3rnds2 */
{ { 0xdf }, 3, T, R, pfx_66, WIG, Ln }, /* vaeskeygenassist */
{ { 0xf0 }, 3, T, R, pfx_f2, Wn, L0 }, /* rorx */
};
@@ -179,6 +179,7 @@ void wrpkru(unsigned int val);
#define cpu_has_serialize cp.feat.serialize
#define cpu_has_avx512_fp16 (cp.feat.avx512_fp16 && xcr0_mask(0xe6))
#define cpu_has_sha512 (cp.feat.sha512 && xcr0_mask(6))
+#define cpu_has_sm3 (cp.feat.sm3 && xcr0_mask(6))
#define cpu_has_avx_vnni (cp.feat.avx_vnni && xcr0_mask(6))
#define cpu_has_avx512_bf16 (cp.feat.avx512_bf16 && xcr0_mask(0xe6))
#define cpu_has_avx_ifma (cp.feat.avx_ifma && xcr0_mask(6))
@@ -177,6 +177,7 @@ static inline bool boot_cpu_has(unsigned
/* CPUID level 0x00000007:1.eax */
#define cpu_has_sha512 boot_cpu_has(X86_FEATURE_SHA512)
+#define cpu_has_sm3 boot_cpu_has(X86_FEATURE_SM3)
#define cpu_has_avx_vnni boot_cpu_has(X86_FEATURE_AVX_VNNI)
#define cpu_has_avx512_bf16 boot_cpu_has(X86_FEATURE_AVX512_BF16)
#define cpu_has_avx_ifma boot_cpu_has(X86_FEATURE_AVX_IFMA)
@@ -439,6 +439,7 @@ static const struct ext0f38_table {
[0xd3] = { .simd_size = simd_other },
[0xd6] = { .simd_size = simd_other, .d8s = d8s_vl },
[0xd7] = { .simd_size = simd_scalar_vexw, .d8s = d8s_dq },
+ [0xda] = { .simd_size = simd_other },
[0xdb] = { .simd_size = simd_packed_int, .two_op = 1 },
[0xdc ... 0xdf] = { .simd_size = simd_packed_int, .d8s = d8s_vl },
[0xf0] = { .two_op = 1 },
@@ -519,6 +520,7 @@ static const struct ext0f3a_table {
[0xc2] = { .simd_size = simd_any_fp, .d8s = d8s_vl },
[0xcc] = { .simd_size = simd_other },
[0xce ... 0xcf] = { .simd_size = simd_packed_int, .d8s = d8s_vl },
+ [0xde] = { .simd_size = simd_other },
[0xdf] = { .simd_size = simd_packed_int, .two_op = 1 },
[0xf0] = {},
};
@@ -588,6 +588,7 @@ amd_like(const struct x86_emulate_ctxt *
#define vcpu_has_tsxldtrk() (ctxt->cpuid->feat.tsxldtrk)
#define vcpu_has_avx512_fp16() (ctxt->cpuid->feat.avx512_fp16)
#define vcpu_has_sha512() (ctxt->cpuid->feat.sha512)
+#define vcpu_has_sm3() (ctxt->cpuid->feat.sm3)
#define vcpu_has_avx_vnni() (ctxt->cpuid->feat.avx_vnni)
#define vcpu_has_avx512_bf16() (ctxt->cpuid->feat.avx512_bf16)
#define vcpu_has_wrmsrns() (ctxt->cpuid->feat.wrmsrns)
@@ -6890,6 +6890,12 @@ x86_emulate(
op_bytes = 16 << vex.l;
goto simd_0f_ymm;
+ case X86EMUL_OPC_VEX (0x0f38, 0xda): /* vsm3msg1 xmm/mem,xmm,xmm */
+ case X86EMUL_OPC_VEX_66(0x0f38, 0xda): /* vsm3msg2 xmm/mem,xmm,xmm */
+ generate_exception_if(vex.w || vex.l, X86_EXC_UD);
+ host_and_vcpu_must_have(sm3);
+ goto simd_0f_ymm;
+
case X86EMUL_OPC_VEX_66(0x0f38, 0xdc): /* vaesenc {x,y}mm/mem,{x,y}mm,{x,y}mm */
case X86EMUL_OPC_VEX_66(0x0f38, 0xdd): /* vaesenclast {x,y}mm/mem,{x,y}mm,{x,y}mm */
case X86EMUL_OPC_VEX_66(0x0f38, 0xde): /* vaesdec {x,y}mm/mem,{x,y}mm,{x,y}mm */
@@ -7762,6 +7768,12 @@ x86_emulate(
fault_suppression = false;
goto avx512f_imm8_no_sae;
+ case X86EMUL_OPC_VEX_66(0x0f3a, 0xde): /* vsm3rnds2 $imm8,xmm/mem,xmm,xmm */
+ host_and_vcpu_must_have(sm3);
+ generate_exception_if(vex.w || vex.l, X86_EXC_UD);
+ op_bytes = 16;
+ goto simd_0f_imm8_ymm;
+
case X86EMUL_OPC_66(0x0f3a, 0xdf): /* aeskeygenassist $imm8,xmm/m128,xmm */
case X86EMUL_OPC_VEX_66(0x0f3a, 0xdf): /* vaeskeygenassist $imm8,xmm/m128,xmm */
host_and_vcpu_must_have(aesni);
@@ -277,6 +277,7 @@ XEN_CPUFEATURE(SSBD, 9*32+31) /
/* Intel-defined CPU features, CPUID level 0x00000007:1.eax, word 10 */
XEN_CPUFEATURE(SHA512, 10*32+ 0) /*A SHA512 Instructions */
+XEN_CPUFEATURE(SM3, 10*32+ 1) /*A SM3 Instructions */
XEN_CPUFEATURE(AVX_VNNI, 10*32+ 4) /*A AVX-VNNI Instructions */
XEN_CPUFEATURE(AVX512_BF16, 10*32+ 5) /*A AVX512 BFloat16 Instructions */
XEN_CPUFEATURE(FZRM, 10*32+10) /*A Fast Zero-length REP MOVSB */
@@ -262,7 +262,7 @@ def crunch_numbers(state):
# for the XOP prefix). VEX/XOP-encoded GPR instructions, such as
# those from the BMI{1,2}, TBM and LWP sets function fine in the
# absence of any enabled xstate.
- AVX: [FMA, FMA4, F16C, AVX2, XOP, AVX_NE_CONVERT],
+ AVX: [FMA, FMA4, F16C, AVX2, XOP, AVX_NE_CONVERT, SM3],
# This dependency exists solely for the shadow pagetable code. If the
# host doesn't have NX support, the shadow pagetable code can't handle
Since the insns here and in particular their memory access patterns follow the usual scheme, I didn't think it was necessary to add a contrived test specifically for them. Signed-off-by: Jan Beulich <jbeulich@suse.com> --- SDE: -arl, -lnl, or -future