diff mbox series

[2/4] x86emul: support SHA512

Message ID 3a77e584-b6e5-8f1a-d1c7-c4ca0de7b425@suse.com (mailing list archive)
State New, archived
Headers show
Series x86emul: support further AVX extensions | expand

Commit Message

Jan Beulich Aug. 7, 2023, 3:20 p.m. UTC
Since the insns here don't access memory, I didn't think it was
necessary to extend our SHA test for them.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
The need to set op_bytes here is a little odd; I'm inclined to move the
generate_exception_if(!op_bytes, X86_EXC_UD) at the top of SIMD handling
(near the bottom of x86_emulate()) into the "ea.type == OP_MEM"
conditional, as it's relevant (used) only there.
---
SDE: -arl, -lnl, or -future
diff mbox series

Patch

--- a/tools/misc/xen-cpuid.c
+++ b/tools/misc/xen-cpuid.c
@@ -185,6 +185,8 @@  static const char *const str_7d0[32] =
 
 static const char *const str_7a1[32] =
 {
+    [ 0] = "sha512",
+
     [ 4] = "avx-vnni",      [ 5] = "avx512-bf16",
 
     [10] = "fzrm",          [11] = "fsrs",
--- a/tools/tests/x86_emulator/predicates.c
+++ b/tools/tests/x86_emulator/predicates.c
@@ -1396,6 +1396,9 @@  static const struct vex {
     { { 0xbd }, 2, T, R, pfx_66, Wn, LIG }, /* vnmadd231s{s,d} */
     { { 0xbe }, 2, T, R, pfx_66, Wn, Ln }, /* vnmsub231p{s,d} */
     { { 0xbf }, 2, T, R, pfx_66, Wn, LIG }, /* vnmsub231s{s,d} */
+    { { 0xcb, 0xc0 }, 2, F, N, pfx_f2, W0, L1 }, /* vsha512rnds2 */
+    { { 0xcc, 0xc0 }, 2, F, N, pfx_f2, W0, L1 }, /* vsha512msg1 */
+    { { 0xcd, 0xc0 }, 2, F, N, pfx_f2, W0, L1 }, /* vsha512msg2 */
     { { 0xcf }, 2, T, R, pfx_66, W0, Ln }, /* vgf2p8mulb */
     { { 0xd2 }, 2, T, R, pfx_no, W0, Ln }, /* vpdpwuud */
     { { 0xd2 }, 2, T, R, pfx_66, W0, Ln }, /* vpdpwusd */
--- a/tools/tests/x86_emulator/x86-emulate.h
+++ b/tools/tests/x86_emulator/x86-emulate.h
@@ -178,6 +178,7 @@  void wrpkru(unsigned int val);
 #define cpu_has_avx512_vp2intersect (cp.feat.avx512_vp2intersect && xcr0_mask(0xe6))
 #define cpu_has_serialize  cp.feat.serialize
 #define cpu_has_avx512_fp16 (cp.feat.avx512_fp16 && xcr0_mask(0xe6))
+#define cpu_has_sha512     (cp.feat.sha512 && xcr0_mask(6))
 #define cpu_has_avx_vnni   (cp.feat.avx_vnni && xcr0_mask(6))
 #define cpu_has_avx512_bf16 (cp.feat.avx512_bf16 && xcr0_mask(0xe6))
 #define cpu_has_avx_ifma   (cp.feat.avx_ifma && xcr0_mask(6))
--- a/xen/arch/x86/include/asm/cpufeature.h
+++ b/xen/arch/x86/include/asm/cpufeature.h
@@ -176,6 +176,7 @@  static inline bool boot_cpu_has(unsigned
 #define cpu_has_arch_caps       boot_cpu_has(X86_FEATURE_ARCH_CAPS)
 
 /* CPUID level 0x00000007:1.eax */
+#define cpu_has_sha512          boot_cpu_has(X86_FEATURE_SHA512)
 #define cpu_has_avx_vnni        boot_cpu_has(X86_FEATURE_AVX_VNNI)
 #define cpu_has_avx512_bf16     boot_cpu_has(X86_FEATURE_AVX512_BF16)
 #define cpu_has_avx_ifma        boot_cpu_has(X86_FEATURE_AVX_IFMA)
--- a/xen/arch/x86/x86_emulate/decode.c
+++ b/xen/arch/x86/x86_emulate/decode.c
@@ -916,6 +916,14 @@  decode_0f38(struct x86_emulate_state *s,
     case X86EMUL_OPC_EVEX_66(0, 0x7c): /* vpbroadcast{d,q} */
         break;
 
+    case X86EMUL_OPC_VEX_F2(0, 0xcc): /* vsha512msg1 */
+    case X86EMUL_OPC_VEX_F2(0, 0xcd): /* vsha512msg2 */
+        s->desc |= TwoOp;
+        /* fallthrough */
+    case X86EMUL_OPC_VEX_F2(0, 0xcb): /* vsha512rnds2 */
+        s->simd_size = simd_other;
+        break;
+
     case 0xf0: /* movbe / crc32 */
         s->desc |= s->vex.pfx == vex_f2 ? ByteOp : Mov;
         if ( s->vex.pfx >= vex_f3 )
--- a/xen/arch/x86/x86_emulate/private.h
+++ b/xen/arch/x86/x86_emulate/private.h
@@ -587,6 +587,7 @@  amd_like(const struct x86_emulate_ctxt *
 #define vcpu_has_serialize()   (ctxt->cpuid->feat.serialize)
 #define vcpu_has_tsxldtrk()    (ctxt->cpuid->feat.tsxldtrk)
 #define vcpu_has_avx512_fp16() (ctxt->cpuid->feat.avx512_fp16)
+#define vcpu_has_sha512()      (ctxt->cpuid->feat.sha512)
 #define vcpu_has_avx_vnni()    (ctxt->cpuid->feat.avx_vnni)
 #define vcpu_has_avx512_bf16() (ctxt->cpuid->feat.avx512_bf16)
 #define vcpu_has_wrmsrns()     (ctxt->cpuid->feat.wrmsrns)
--- a/xen/arch/x86/x86_emulate/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -6856,6 +6856,14 @@  x86_emulate(
         host_and_vcpu_must_have(avx512er);
         goto simd_zmm_scalar_sae;
 
+    case X86EMUL_OPC_VEX_F2(0x0f38, 0xcb): /* vsha512rnds2 xmm,ymm,ymm */
+    case X86EMUL_OPC_VEX_F2(0x0f38, 0xcc): /* vsha512msg1 xmm,ymm */
+    case X86EMUL_OPC_VEX_F2(0x0f38, 0xcd): /* vsha512msg2 ymm,ymm */
+        host_and_vcpu_must_have(sha512);
+        generate_exception_if(ea.type != OP_REG || vex.w || !vex.l, X86_EXC_UD);
+        op_bytes = 32;
+        goto simd_0f_ymm;
+
     case X86EMUL_OPC_66(0x0f38, 0xcf):      /* gf2p8mulb xmm/m128,xmm */
         host_and_vcpu_must_have(gfni);
         goto simd_0f38_common;
--- a/xen/include/public/arch-x86/cpufeatureset.h
+++ b/xen/include/public/arch-x86/cpufeatureset.h
@@ -276,6 +276,7 @@  XEN_CPUFEATURE(CORE_CAPS,     9*32+30) /
 XEN_CPUFEATURE(SSBD,          9*32+31) /*A  MSR_SPEC_CTRL.SSBD available */
 
 /* Intel-defined CPU features, CPUID level 0x00000007:1.eax, word 10 */
+XEN_CPUFEATURE(SHA512,       10*32+ 0) /*A  SHA512 Instructions */
 XEN_CPUFEATURE(AVX_VNNI,     10*32+ 4) /*A  AVX-VNNI Instructions */
 XEN_CPUFEATURE(AVX512_BF16,  10*32+ 5) /*A  AVX512 BFloat16 Instructions */
 XEN_CPUFEATURE(FZRM,         10*32+10) /*A  Fast Zero-length REP MOVSB */
--- a/xen/tools/gen-cpuid.py
+++ b/xen/tools/gen-cpuid.py
@@ -285,7 +285,7 @@  def crunch_numbers(state):
         # enabled.  Certain later extensions, acting on 256-bit vectors of
         # integers, better depend on AVX2 than AVX.
         AVX2: [AVX512F, VAES, VPCLMULQDQ, AVX_VNNI, AVX_IFMA, AVX_VNNI_INT8,
-               AVX_VNNI_INT16],
+               AVX_VNNI_INT16, SHA512],
 
         # AVX512F is taken to mean hardware support for 512bit registers
         # (which in practice depends on the EVEX prefix to encode) as well