@@ -520,6 +520,10 @@ DEF_HELPER_4(xxpermr, void, env, vsr, vsr, vsr)
DEF_HELPER_4(xxextractuw, void, env, vsr, vsr, i32)
DEF_HELPER_4(xxinsertw, void, env, vsr, vsr, i32)
DEF_HELPER_3(xvxsigsp, void, env, vsr, vsr)
+DEF_HELPER_5(XXBLENDVB, void, vsr, vsr, vsr, vsr, i32)
+DEF_HELPER_5(XXBLENDVH, void, vsr, vsr, vsr, vsr, i32)
+DEF_HELPER_5(XXBLENDVW, void, vsr, vsr, vsr, vsr, i32)
+DEF_HELPER_5(XXBLENDVD, void, vsr, vsr, vsr, vsr, i32)
DEF_HELPER_2(efscfsi, i32, env, i32)
DEF_HELPER_2(efscfui, i32, env, i32)
@@ -44,6 +44,16 @@
...... ..... .... . ................ \
&8RR_D si=%8rr_si xt=%8rr_xt
+# Format XX4
+&XX4 xt xa xb xc
+%xx4_xt 0:1 21:5
+%xx4_xa 2:1 16:5
+%xx4_xb 1:1 11:5
+%xx4_xc 3:1 6:5
+@XX4 ........ ........ ........ ........ \
+ ...... ..... ..... ..... ..... .. .... \
+ &XX4 xt=%xx4_xt xa=%xx4_xa xb=%xx4_xb xc=%xx4_xc
+
### Fixed-Point Load Instructions
PLBZ 000001 10 0--.-- .................. \
@@ -175,3 +185,12 @@ XXSPLTIW 000001 01 0000 -- -- ................ \
100000 ..... 0011 . ................ @8RR_D
XXSPLTI32DX 000001 01 0000 -- -- ................ \
100000 ..... 000 .. ................ @8RR_D_IX
+
+XXBLENDVD 000001 01 0000 -- ------------------ \
+ 100001 ..... ..... ..... ..... 11 .... @XX4
+XXBLENDVW 000001 01 0000 -- ------------------ \
+ 100001 ..... ..... ..... ..... 10 .... @XX4
+XXBLENDVH 000001 01 0000 -- ------------------ \
+ 100001 ..... ..... ..... ..... 01 .... @XX4
+XXBLENDVB 000001 01 0000 -- ------------------ \
+ 100001 ..... ..... ..... ..... 00 .... @XX4
@@ -1737,6 +1737,21 @@ void helper_xxinsertw(CPUPPCState *env, ppc_vsr_t *xt,
*xt = t;
}
+#define XXBLEND(name, sz) \
+void glue(helper_XXBLENDV, name)(ppc_avr_t *t, ppc_avr_t *a, ppc_avr_t *b, \
+ ppc_avr_t *c, uint32_t desc) \
+{ \
+ for (int i = 0; i < ARRAY_SIZE(t->glue(u, sz)); i++) { \
+ t->glue(u, sz)[i] = (c->glue(s, sz)[i] >> (sz - 1)) ? \
+ b->glue(u, sz)[i] : a->glue(u, sz)[i]; \
+ } \
+}
+XXBLEND(B, 8)
+XXBLEND(H, 16)
+XXBLEND(W, 32)
+XXBLEND(D, 64)
+#undef XXBLEND
+
#define VEXT_SIGNED(name, element, cast) \
void helper_##name(ppc_avr_t *r, ppc_avr_t *b) \
{ \
@@ -2087,6 +2087,61 @@ TRANS64(PLXV, do_lstxv_PLS_D, false, false)
TRANS64(PSTXVP, do_lstxv_PLS_D, true, true)
TRANS64(PLXVP, do_lstxv_PLS_D, false, true)
+static void gen_xxblendv_vec(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b,
+ TCGv_vec c)
+{
+ TCGv_vec tmp = tcg_temp_new_vec_matching(c);
+ tcg_gen_sari_vec(vece, tmp, c, (8 << vece) - 1);
+ tcg_gen_bitsel_vec(vece, t, tmp, b, a);
+ tcg_temp_free_vec(tmp);
+}
+
+static bool do_xxblendv(DisasContext *ctx, arg_XX4 *a, unsigned vece)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_sari_vec, 0
+ };
+ static const GVecGen4 ops[4] = {
+ {
+ .fniv = gen_xxblendv_vec,
+ .fno = gen_helper_XXBLENDVB,
+ .opt_opc = vecop_list,
+ .vece = MO_8
+ },
+ {
+ .fniv = gen_xxblendv_vec,
+ .fno = gen_helper_XXBLENDVH,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fniv = gen_xxblendv_vec,
+ .fno = gen_helper_XXBLENDVW,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fniv = gen_xxblendv_vec,
+ .fno = gen_helper_XXBLENDVD,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ }
+ };
+
+ REQUIRE_VSX(ctx);
+
+ tcg_gen_gvec_4(vsr_full_offset(a->xt), vsr_full_offset(a->xa),
+ vsr_full_offset(a->xb), vsr_full_offset(a->xc),
+ 16, 16, &ops[vece]);
+
+ return true;
+}
+
+TRANS(XXBLENDVB, do_xxblendv, MO_8)
+TRANS(XXBLENDVH, do_xxblendv, MO_16)
+TRANS(XXBLENDVW, do_xxblendv, MO_32)
+TRANS(XXBLENDVD, do_xxblendv, MO_64)
+
#undef GEN_XX2FORM
#undef GEN_XX3FORM
#undef GEN_XX2IFORM