@@ -194,9 +194,9 @@ DEF_HELPER_FLAGS_3(vsro, TCG_CALL_NO_RWG, void, avr, avr, avr)
DEF_HELPER_FLAGS_3(vsrv, TCG_CALL_NO_RWG, void, avr, avr, avr)
DEF_HELPER_FLAGS_3(vslv, TCG_CALL_NO_RWG, void, avr, avr, avr)
DEF_HELPER_FLAGS_4(VADDCUW, TCG_CALL_NO_RWG, void, avr, avr, avr, i32)
-DEF_HELPER_FLAGS_2(vprtybw, TCG_CALL_NO_RWG, void, avr, avr)
-DEF_HELPER_FLAGS_2(vprtybd, TCG_CALL_NO_RWG, void, avr, avr)
-DEF_HELPER_FLAGS_2(vprtybq, TCG_CALL_NO_RWG, void, avr, avr)
+DEF_HELPER_FLAGS_3(VPRTYBW, TCG_CALL_NO_RWG, void, avr, avr, i32)
+DEF_HELPER_FLAGS_3(VPRTYBD, TCG_CALL_NO_RWG, void, avr, avr, i32)
+DEF_HELPER_FLAGS_3(VPRTYBQ, TCG_CALL_NO_RWG, void, avr, avr, i32)
DEF_HELPER_FLAGS_4(VSUBCUW, TCG_CALL_NO_RWG, void, avr, avr, avr, i32)
DEF_HELPER_FLAGS_5(vaddsbs, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32)
DEF_HELPER_FLAGS_5(vaddshs, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32)
@@ -529,6 +529,10 @@ VCTZDM 000100 ..... ..... ..... 11111000100 @VX
VPDEPD 000100 ..... ..... ..... 10111001101 @VX
VPEXTD 000100 ..... ..... ..... 10110001101 @VX
+VPRTYBD 000100 ..... 01001 ..... 11000000010 @VX_tb
+VPRTYBQ 000100 ..... 01010 ..... 11000000010 @VX_tb
+VPRTYBW 000100 ..... 01000 ..... 11000000010 @VX_tb
+
## Vector Permute and Formatting Instruction
VEXTDUBVLX 000100 ..... ..... ..... ..... 011000 @VA
@@ -502,7 +502,7 @@ void helper_VADDCUW(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t v)
}
/* vprtybw */
-void helper_vprtybw(ppc_avr_t *r, ppc_avr_t *b)
+void helper_VPRTYBW(ppc_avr_t *r, ppc_avr_t *b, uint32_t v)
{
int i;
for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
@@ -513,7 +513,7 @@ void helper_vprtybw(ppc_avr_t *r, ppc_avr_t *b)
}
/* vprtybd */
-void helper_vprtybd(ppc_avr_t *r, ppc_avr_t *b)
+void helper_VPRTYBD(ppc_avr_t *r, ppc_avr_t *b, uint32_t v)
{
int i;
for (i = 0; i < ARRAY_SIZE(r->u64); i++) {
@@ -525,7 +525,7 @@ void helper_vprtybd(ppc_avr_t *r, ppc_avr_t *b)
}
/* vprtybq */
-void helper_vprtybq(ppc_avr_t *r, ppc_avr_t *b)
+void helper_VPRTYBQ(ppc_avr_t *r, ppc_avr_t *b, uint32_t v)
{
uint64_t res = b->u64[0] ^ b->u64[1];
res ^= res >> 32;
@@ -1659,9 +1659,58 @@ GEN_VXFORM_NOA_ENV(vrfim, 5, 11);
GEN_VXFORM_NOA_ENV(vrfin, 5, 8);
GEN_VXFORM_NOA_ENV(vrfip, 5, 10);
GEN_VXFORM_NOA_ENV(vrfiz, 5, 9);
-GEN_VXFORM_NOA(vprtybw, 1, 24);
-GEN_VXFORM_NOA(vprtybd, 1, 24);
-GEN_VXFORM_NOA(vprtybq, 1, 24);
+
+static void gen_vprtyb(unsigned vece, TCGv_vec t, TCGv_vec b)
+{
+ int i;
+ TCGv_vec tmp = tcg_temp_new_vec_matching(b);
+ /* MO_32 is 2, so 2 iteractions for MO_32 and 3 for MO_64 */
+ for (i = 0; i < vece; i++) {
+ tcg_gen_shri_vec(vece, tmp, b, (4 << (vece - i)));
+ tcg_gen_xor_vec(vece, b, tmp, b);
+ }
+ tcg_gen_dupi_vec(vece, tmp, 1);
+ tcg_gen_and_vec(vece, t, b, tmp);
+ tcg_temp_free_vec(tmp);
+}
+
+static bool do_vx_vprtyb(DisasContext *ctx, arg_VX_tb *a, unsigned vece)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_shri_vec, 0
+ };
+
+ static const GVecGen2 op[] = {
+ {
+ .fniv = gen_vprtyb,
+ .fno = gen_helper_VPRTYBW,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fniv = gen_vprtyb,
+ .fno = gen_helper_VPRTYBD,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ {
+ .fno = gen_helper_VPRTYBQ,
+ .vece = MO_128
+ },
+ };
+
+ REQUIRE_INSNS_FLAGS2(ctx, ISA300);
+ REQUIRE_VECTOR(ctx);
+
+ tcg_gen_gvec_2(avr_full_offset(a->vrt), avr_full_offset(a->vrb),
+ 16, 16, &op[vece - MO_32]);
+
+ return true;
+}
+
+TRANS(VPRTYBW, do_vx_vprtyb, MO_32)
+TRANS(VPRTYBD, do_vx_vprtyb, MO_64)
+TRANS(VPRTYBQ, do_vx_vprtyb, MO_128)
static void gen_vsplt(DisasContext *ctx, int vece)
{
@@ -106,9 +106,6 @@ GEN_VXFORM_300(vsrv, 2, 28),
GEN_VXFORM_300(vslv, 2, 29),
GEN_VXFORM(vslo, 6, 16),
GEN_VXFORM(vsro, 6, 17),
-GEN_HANDLER_E_2(vprtybw, 0x4, 0x1, 0x18, 8, 0, PPC_NONE, PPC2_ISA300),
-GEN_HANDLER_E_2(vprtybd, 0x4, 0x1, 0x18, 9, 0, PPC_NONE, PPC2_ISA300),
-GEN_HANDLER_E_2(vprtybq, 0x4, 0x1, 0x18, 10, 0, PPC_NONE, PPC2_ISA300),
GEN_VXFORM(xpnd04_1, 0, 22),
GEN_VXFORM_300(bcdsr, 0, 23),