@@ -814,6 +814,13 @@ static const struct test avx10_2_128[] =
INSN(movw, f3, map5, 7e, el, fp16, el),
};
+static const struct test movrs_all[] = {
+ INSN(movrsb, f2, map5, 6f, vl, b, vl),
+ INSN(movrsd, f3, map5, 6f, vl, d_nb, vl),
+ INSN(movrsq, f3, map5, 6f, vl, q_nb, vl),
+ INSN(movrsw, f2, map5, 6f, vl, w, vl),
+};
+
static const unsigned char vl_all[] = { VL_512, VL_128, VL_256 };
static const unsigned char vl_128[] = { VL_128 };
static const unsigned char vl_no128[] = { VL_512, VL_256 };
@@ -1236,6 +1243,10 @@ void evex_disp8_test(void *instr, struct
run(cpu_has_avx10_2, avx10_2, all);
run(cpu_has_avx10_2, avx10_2, 128);
+ if ( cpu_has_avx10_2 )
+ {
+ run(ctxt->addr_size == 64 && cpu_has_movrs, movrs, all);
+ }
#undef run
}
@@ -2195,6 +2195,8 @@ static const struct evex {
{ { 0x6d }, 2, T, R, pfx_f2, Wn, LIG }, /* vcvttsd2sis */
{ { 0x6e }, 2, T, R, pfx_66, WIG, L0 }, /* vmovw */
{ { 0x6e }, 2, T, R, pfx_f3, W0, L0 }, /* vmovw */
+ { { 0x6f }, 2, T, R, pfx_f3, Wn, Ln }, /* vmovrs{d,q} */
+ { { 0x6f }, 2, T, R, pfx_f2, Wn, Ln }, /* vmovrs{b,w} */
{ { 0x74 }, 2, T, R, pfx_no, W0, Ln }, /* vcvtbiasph2bf8s */
{ { 0x74 }, 2, T, R, pfx_f3, W0, Ln }, /* vcvtneph2bf8s */
{ { 0x74 }, 2, T, R, pfx_f2, W0, Ln }, /* vcvtne2ph2bf8s */
@@ -201,6 +201,7 @@ void wrpkru(unsigned int val);
xcr0_mask(0xe6))
#define cpu_has_cmpccxadd cpu_policy.feat.cmpccxadd
#define cpu_has_avx_ifma (cpu_policy.feat.avx_ifma && xcr0_mask(6))
+#define cpu_has_movrs cpu_policy.feat.movrs
#define cpu_has_avx_vnni_int8 (cpu_policy.feat.avx_vnni_int8 && \
xcr0_mask(6))
#define cpu_has_avx_ne_convert (cpu_policy.feat.avx_ne_convert && \
@@ -6298,6 +6298,17 @@ x86_emulate(
op_bytes = 16 << evex.lr;
goto avx512f_no_sae;
+ case X86EMUL_OPC_EVEX_F2(0x0f38, 0x6f): /* vmovrs{b,w} mem,[xyz]mm{k} */
+ elem_bytes = 1 << evex.w;
+ /* fall through */
+ case X86EMUL_OPC_EVEX_F3(0x0f38, 0x6f): /* vmovrs{d,q} mem,[xyz]mm{k} */
+ generate_exception_if(ea.type != OP_MEM || evex.brs, X86_EXC_UD);
+ vcpu_must_have(avx10, 2);
+ vcpu_must_have(movrs);
+ avx512_vlen_check(false);
+ op_bytes = 16 << evex.lr;
+ goto simd_zmm;
+
case X86EMUL_OPC_EVEX_66(0x0f38, 0x70): /* vpshldvw [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */
case X86EMUL_OPC_EVEX_66(0x0f38, 0x72): /* vpshrdvw [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */
generate_exception_if(!evex.w, X86_EXC_UD);
As we ignore cachability aspects of insns, they're treated like simple VMOVs. Signed-off-by: Jan Beulich <jbeulich@suse.com> --- SDE: -??? --- v3: New.