@@ -786,3 +786,10 @@ XVF64GERPP 111011 ... -- .... 0 ..... 00111010 ..- @XX3_at xa=%xx_xa_pair
XVF64GERPN 111011 ... -- .... 0 ..... 10111010 ..- @XX3_at xa=%xx_xa_pair
XVF64GERNP 111011 ... -- .... 0 ..... 01111010 ..- @XX3_at xa=%xx_xa_pair
XVF64GERNN 111011 ... -- .... 0 ..... 11111010 ..- @XX3_at xa=%xx_xa_pair
+
+## Vector Division Instructions
+
+VDIVSW 000100 ..... ..... ..... 00110001011 @VX
+VDIVUW 000100 ..... ..... ..... 00010001011 @VX
+VDIVSD 000100 ..... ..... ..... 00111001011 @VX
+VDIVUD 000100 ..... ..... ..... 00011001011 @VX
@@ -3238,6 +3238,91 @@ TRANS(VMULHSD, do_vx_mulh, true , do_vx_vmulhd_i64)
TRANS(VMULHUW, do_vx_mulh, false, do_vx_vmulhw_i64)
TRANS(VMULHUD, do_vx_mulh, false, do_vx_vmulhd_i64)
+static bool do_vdiv_vmod(DisasContext *ctx, arg_VX *a, const int vece,
+ void (*func_32)(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b),
+ void (*func_64)(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b))
+{
+ const GVecGen3 op = {
+ .fni4 = func_32,
+ .fni8 = func_64,
+ .vece = vece
+ };
+
+ REQUIRE_VECTOR(ctx);
+
+ tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
+ avr_full_offset(a->vrb), 16, 16, &op);
+
+ return true;
+}
+
+#define DIVU32(NAME, DIV) \
+static void NAME(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b) \
+{ \
+ TCGv_i32 zero = tcg_constant_i32(0); \
+ TCGv_i32 one = tcg_constant_i32(1); \
+ tcg_gen_movcond_i32(TCG_COND_EQ, b, b, zero, one, b); \
+ DIV(t, a, b); \
+}
+
+#define DIVS32(NAME, DIV) \
+static void NAME(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b) \
+{ \
+ TCGv_i32 t0 = tcg_temp_new_i32(); \
+ TCGv_i32 t1 = tcg_temp_new_i32(); \
+ tcg_gen_setcondi_i32(TCG_COND_EQ, t0, a, INT32_MIN); \
+ tcg_gen_setcondi_i32(TCG_COND_EQ, t1, b, -1); \
+ tcg_gen_and_i32(t0, t0, t1); \
+ tcg_gen_setcondi_i32(TCG_COND_EQ, t1, b, 0); \
+ tcg_gen_or_i32(t0, t0, t1); \
+ tcg_gen_movi_i32(t1, 0); \
+ tcg_gen_movcond_i32(TCG_COND_NE, b, t0, t1, t0, b); \
+ DIV(t, a, b); \
+ tcg_temp_free_i32(t0); \
+ tcg_temp_free_i32(t1); \
+}
+
+#define DIVU64(NAME, DIV) \
+static void NAME(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b) \
+{ \
+ TCGv_i64 zero = tcg_constant_i64(0); \
+ TCGv_i64 one = tcg_constant_i64(1); \
+ tcg_gen_movcond_i64(TCG_COND_EQ, b, b, zero, one, b); \
+ DIV(t, a, b); \
+}
+
+#define DIVS64(NAME, DIV) \
+static void NAME(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b) \
+{ \
+ TCGv_i64 t0 = tcg_temp_new_i64(); \
+ TCGv_i64 t1 = tcg_temp_new_i64(); \
+ tcg_gen_setcondi_i64(TCG_COND_EQ, t0, a, INT64_MIN); \
+ tcg_gen_setcondi_i64(TCG_COND_EQ, t1, b, -1); \
+ tcg_gen_and_i64(t0, t0, t1); \
+ tcg_gen_setcondi_i64(TCG_COND_EQ, t1, b, 0); \
+ tcg_gen_or_i64(t0, t0, t1); \
+ tcg_gen_movi_i64(t1, 0); \
+ tcg_gen_movcond_i64(TCG_COND_NE, b, t0, t1, t0, b); \
+ DIV(t, a, b); \
+ tcg_temp_free_i64(t0); \
+ tcg_temp_free_i64(t1); \
+}
+
+DIVS32(do_divsw, tcg_gen_div_i32)
+DIVU32(do_divuw, tcg_gen_divu_i32)
+DIVS64(do_divsd, tcg_gen_div_i64)
+DIVU64(do_divud, tcg_gen_divu_i64)
+
+TRANS_FLAGS2(ISA310, VDIVSW, do_vdiv_vmod, MO_32, do_divsw, NULL)
+TRANS_FLAGS2(ISA310, VDIVUW, do_vdiv_vmod, MO_32, do_divuw, NULL)
+TRANS_FLAGS2(ISA310, VDIVSD, do_vdiv_vmod, MO_64, NULL, do_divsd)
+TRANS_FLAGS2(ISA310, VDIVUD, do_vdiv_vmod, MO_64, NULL, do_divud)
+
+#undef DIVS32
+#undef DIVU32
+#undef DIVS64
+#undef DIVU64
+
#undef GEN_VR_LDX
#undef GEN_VR_STX
#undef GEN_VR_LVE