@@ -718,6 +718,11 @@ static const struct test avx10_2_all[] =
INSN(comxsh, f2, map5, 2f, el, fp16, el),
INSN(comxss, f2, 0f, 2f, el, d, el),
INSN(dpphps, , 0f38, 52, vl, d, vl),
+ INSN(minmax, 66, 0f3a, 52, vl, sd, vl),
+ INSN(minmax, 66, 0f3a, 53, el, sd, el),
+ INSN(minmaxpbf16, f2, 0f3a, 52, vl, bf16, vl),
+ INSN(minmaxph, , 0f3a, 52, vl, fp16, vl),
+ INSN(minmaxsh, , 0f3a, 53, el, fp16, el),
INSN(mpsadbw, f3, 0f3a, 42, vl, d_nb, vl),
INSN(pdpbssd, f2, 0f38, 50, vl, d, vl),
INSN(pdpbssds, f2, 0f38, 51, vl, d, vl),
@@ -2091,6 +2091,11 @@ static const struct evex {
{ { 0x44 }, 3, T, R, pfx_66, WIG, Ln }, /* vpclmulqdq */
{ { 0x50 }, 3, T, R, pfx_66, Wn, Ln }, /* vrangep{s,d} */
{ { 0x51 }, 3, T, R, pfx_66, Wn, LIG }, /* vranges{s,d} */
+ { { 0x52 }, 3, T, R, pfx_no, W0, Ln }, /* vminmaxph */
+ { { 0x52 }, 3, T, R, pfx_66, Wn, Ln }, /* vminmaxp{s,d} */
+ { { 0x52 }, 3, T, R, pfx_f2, W0, Ln }, /* vminmaxpbf16 */
+ { { 0x53 }, 3, T, R, pfx_no, W0, LIG }, /* vminmaxsh */
+ { { 0x53 }, 3, T, R, pfx_66, Wn, LIG }, /* vminmaxs{s,d} */
{ { 0x54 }, 3, T, R, pfx_66, Wn, Ln }, /* vfixupimmp{s,d} */
{ { 0x55 }, 3, T, R, pfx_66, Wn, LIG }, /* vfixumpimms{s,d} */
{ { 0x56 }, 3, T, R, pfx_no, W0, Ln }, /* vreduceph */
@@ -499,6 +499,8 @@ static const struct ext0f3a_table {
[0x4c] = { .simd_size = simd_packed_int, .four_op = 1 },
[0x50] = { .simd_size = simd_packed_fp, .d8s = d8s_vl },
[0x51] = { .simd_size = simd_scalar_vexw, .d8s = d8s_dq },
+ [0x52] = { .simd_size = simd_packed_fp, .d8s = d8s_vl },
+ [0x53] = { .simd_size = simd_scalar_vexw, .d8s = d8s_dq },
[0x54] = { .simd_size = simd_packed_fp, .d8s = d8s_vl },
[0x55] = { .simd_size = simd_scalar_vexw, .d8s = d8s_dq },
[0x56] = { .simd_size = simd_packed_fp, .two_op = 1, .d8s = d8s_vl },
@@ -1474,6 +1476,7 @@ int x86emul_decode(struct x86_emulate_st
case 0x0a: /* vrndscalesh */
case 0x26: /* vfpclassph */
case 0x27: /* vfpclasssh */
+ case 0x53: /* vminmaxsh */
case 0x56: /* vgetmantph */
case 0x57: /* vgetmantsh */
case 0x66: /* vreduceph */
@@ -1482,6 +1485,11 @@ int x86emul_decode(struct x86_emulate_st
s->fp16 = true;
break;
+ case 0x52: /* vminmaxp{h,bf16} */
+ if ( !s->evex.pfx || s->evex.pfx == vex_f2 )
+ s->fp16 = true;
+ break;
+
case 0xc2: /* vpcmp{p,s}h */
if ( !(s->evex.pfx & VEX_PREFIX_DOUBLE_MASK) )
s->fp16 = true;
@@ -7716,6 +7716,21 @@ x86_emulate(
generate_exception_if(vex.w, X86_EXC_UD);
goto simd_0f_int_imm8;
+ case X86EMUL_OPC_EVEX_F2(0x0f3a, 0x52): /* vminmaxpbf16 $imm8,[xyz]mm/mem,[xyz]mm,[xyz]mm{k} */
+ generate_exception_if(ea.type != OP_MEM && evex.brs, X86_EXC_UD);
+ op_bytes = 16 << evex.lr;
+ /* fall through */
+ case X86EMUL_OPC_EVEX(0x0f3a, 0x52): /* vminmaxph $imm8,[xyz]mm/mem,[xyz]mm,[xyz]mm{k} */
+ case X86EMUL_OPC_EVEX(0x0f3a, 0x53): /* vminmaxsh $imm,xmm/m16,xmm,xmm,xmm{k} */
+ generate_exception_if(vex.w, X86_EXC_UD);
+ /* fall through */
+ case X86EMUL_OPC_EVEX_66(0x0f3a, 0x52): /* vminmaxp{s,d} $imm8,[xyz]mm/mem,[xyz]mm,[xyz]mm{k} */
+ case X86EMUL_OPC_EVEX_66(0x0f3a, 0x53): /* vminmaxs{s,d} $imm8,xmm/mem,xmm,xmm{k} */
+ vcpu_must_have(avx10, 2);
+ if ( ea.type != OP_REG || !evex.brs )
+ avx512_vlen_check(b & 1);
+ goto simd_imm8_zmm;
+
case X86EMUL_OPC_VEX_66(0x0f3a, 0x5c): /* vfmaddsubps {x,y}mm,{x,y}mm/mem,{x,y}mm,{x,y}mm */
/* vfmaddsubps {x,y}mm/mem,{x,y}mm,{x,y}mm,{x,y}mm */
case X86EMUL_OPC_VEX_66(0x0f3a, 0x5d): /* vfmaddsubpd {x,y}mm,{x,y}mm/mem,{x,y}mm,{x,y}mm */
While they use new major opcodes, they are still pretty similar to various existing insns. Signed-off-by: Jan Beulich <jbeulich@suse.com> --- Spec rev 002 says VMINMAXNEPBF16, yet that's going to change to VMINMAXPBF16. --- SDE: ??? --- v3: New.