@@ -648,18 +648,34 @@ DEF_HELPER_6(vaadd_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vaadd_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vaadd_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vaadd_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vaaddu_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vaaddu_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vaaddu_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vaaddu_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vasub_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vasub_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vasub_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vasub_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vasubu_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vasubu_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vasubu_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vasubu_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vaadd_vx_b, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vaadd_vx_h, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vaadd_vx_w, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vaadd_vx_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vaaddu_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vaaddu_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vaaddu_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vaaddu_vx_d, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vasub_vx_b, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vasub_vx_h, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vasub_vx_w, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vasub_vx_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vasubu_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vasubu_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vasubu_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vasubu_vx_d, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vsmul_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vsmul_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
@@ -493,11 +493,14 @@ vssubu_vv 100010 . ..... ..... 000 ..... 1010111 @r_vm
vssubu_vx 100010 . ..... ..... 100 ..... 1010111 @r_vm
vssub_vv 100011 . ..... ..... 000 ..... 1010111 @r_vm
vssub_vx 100011 . ..... ..... 100 ..... 1010111 @r_vm
-vaadd_vv 100100 . ..... ..... 000 ..... 1010111 @r_vm
-vaadd_vx 100100 . ..... ..... 100 ..... 1010111 @r_vm
-vaadd_vi 100100 . ..... ..... 011 ..... 1010111 @r_vm
-vasub_vv 100110 . ..... ..... 000 ..... 1010111 @r_vm
-vasub_vx 100110 . ..... ..... 100 ..... 1010111 @r_vm
+vaadd_vv 001001 . ..... ..... 010 ..... 1010111 @r_vm
+vaadd_vx 001001 . ..... ..... 110 ..... 1010111 @r_vm
+vaaddu_vv 001000 . ..... ..... 010 ..... 1010111 @r_vm
+vaaddu_vx 001000 . ..... ..... 110 ..... 1010111 @r_vm
+vasub_vv 001011 . ..... ..... 010 ..... 1010111 @r_vm
+vasub_vx 001011 . ..... ..... 110 ..... 1010111 @r_vm
+vasubu_vv 001010 . ..... ..... 010 ..... 1010111 @r_vm
+vasubu_vx 001010 . ..... ..... 110 ..... 1010111 @r_vm
vsmul_vv 100111 . ..... ..... 000 ..... 1010111 @r_vm
vsmul_vx 100111 . ..... ..... 100 ..... 1010111 @r_vm
vwsmaccu_vv 111100 . ..... ..... 000 ..... 1010111 @r_vm
@@ -2004,10 +2004,13 @@ GEN_OPIVI_TRANS(vsadd_vi, IMM_SX, vsadd_vx, opivx_check)
/* Vector Single-Width Averaging Add and Subtract */
GEN_OPIVV_TRANS(vaadd_vv, opivv_check)
+GEN_OPIVV_TRANS(vaaddu_vv, opivv_check)
GEN_OPIVV_TRANS(vasub_vv, opivv_check)
+GEN_OPIVV_TRANS(vasubu_vv, opivv_check)
GEN_OPIVX_TRANS(vaadd_vx, opivx_check)
+GEN_OPIVX_TRANS(vaaddu_vx, opivx_check)
GEN_OPIVX_TRANS(vasub_vx, opivx_check)
-GEN_OPIVI_TRANS(vaadd_vi, 0, vaadd_vx, opivx_check)
+GEN_OPIVX_TRANS(vasubu_vx, opivx_check)
/* Vector Single-Width Fractional Multiply with Rounding and Saturation */
GEN_OPIVV_TRANS(vsmul_vv, opivv_check)
@@ -2295,6 +2295,43 @@ GEN_VEXT_VX_RM(vaadd_vx_h, 2, 2)
GEN_VEXT_VX_RM(vaadd_vx_w, 4, 4)
GEN_VEXT_VX_RM(vaadd_vx_d, 8, 8)
+static inline uint32_t aaddu32(CPURISCVState *env, int vxrm,
+ uint32_t a, uint32_t b)
+{
+ uint64_t res = (uint64_t)a + b;
+ uint8_t round = get_round(vxrm, res, 1);
+
+ return (res >> 1) + round;
+}
+
+static inline uint64_t aaddu64(CPURISCVState *env, int vxrm,
+ uint64_t a, uint64_t b)
+{
+ uint64_t res = a + b;
+ uint8_t round = get_round(vxrm, res, 1);
+ uint64_t over = (uint64_t)(res < a) << 63;
+
+ return ((res >> 1) | over) + round;
+}
+
+RVVCALL(OPIVV2_RM, vaaddu_vv_b, OP_UUU_B, H1, H1, H1, aaddu32)
+RVVCALL(OPIVV2_RM, vaaddu_vv_h, OP_UUU_H, H2, H2, H2, aaddu32)
+RVVCALL(OPIVV2_RM, vaaddu_vv_w, OP_UUU_W, H4, H4, H4, aaddu32)
+RVVCALL(OPIVV2_RM, vaaddu_vv_d, OP_UUU_D, H8, H8, H8, aaddu64)
+GEN_VEXT_VV_RM(vaaddu_vv_b, 1, 1)
+GEN_VEXT_VV_RM(vaaddu_vv_h, 2, 2)
+GEN_VEXT_VV_RM(vaaddu_vv_w, 4, 4)
+GEN_VEXT_VV_RM(vaaddu_vv_d, 8, 8)
+
+RVVCALL(OPIVX2_RM, vaaddu_vx_b, OP_UUU_B, H1, H1, aaddu32)
+RVVCALL(OPIVX2_RM, vaaddu_vx_h, OP_UUU_H, H2, H2, aaddu32)
+RVVCALL(OPIVX2_RM, vaaddu_vx_w, OP_UUU_W, H4, H4, aaddu32)
+RVVCALL(OPIVX2_RM, vaaddu_vx_d, OP_UUU_D, H8, H8, aaddu64)
+GEN_VEXT_VX_RM(vaaddu_vx_b, 1, 1)
+GEN_VEXT_VX_RM(vaaddu_vx_h, 2, 2)
+GEN_VEXT_VX_RM(vaaddu_vx_w, 4, 4)
+GEN_VEXT_VX_RM(vaaddu_vx_d, 8, 8)
+
static inline int32_t asub32(CPURISCVState *env, int vxrm, int32_t a, int32_t b)
{
int64_t res = (int64_t)a - b;
@@ -2331,6 +2368,43 @@ GEN_VEXT_VX_RM(vasub_vx_h, 2, 2)
GEN_VEXT_VX_RM(vasub_vx_w, 4, 4)
GEN_VEXT_VX_RM(vasub_vx_d, 8, 8)
+static inline uint32_t asubu32(CPURISCVState *env, int vxrm,
+ uint32_t a, uint32_t b)
+{
+ int64_t res = (int64_t)a - b;
+ uint8_t round = get_round(vxrm, res, 1);
+
+ return (res >> 1) + round;
+}
+
+static inline uint64_t asubu64(CPURISCVState *env, int vxrm,
+ uint64_t a, uint64_t b)
+{
+ uint64_t res = (uint64_t)a - b;
+ uint8_t round = get_round(vxrm, res, 1);
+ uint64_t over = (uint64_t)(res > a) << 63;
+
+ return ((res >> 1) | over) + round;
+}
+
+RVVCALL(OPIVV2_RM, vasubu_vv_b, OP_UUU_B, H1, H1, H1, asubu32)
+RVVCALL(OPIVV2_RM, vasubu_vv_h, OP_UUU_H, H2, H2, H2, asubu32)
+RVVCALL(OPIVV2_RM, vasubu_vv_w, OP_UUU_W, H4, H4, H4, asubu32)
+RVVCALL(OPIVV2_RM, vasubu_vv_d, OP_UUU_D, H8, H8, H8, asubu64)
+GEN_VEXT_VV_RM(vasubu_vv_b, 1, 1)
+GEN_VEXT_VV_RM(vasubu_vv_h, 2, 2)
+GEN_VEXT_VV_RM(vasubu_vv_w, 4, 4)
+GEN_VEXT_VV_RM(vasubu_vv_d, 8, 8)
+
+RVVCALL(OPIVX2_RM, vasubu_vx_b, OP_UUU_B, H1, H1, asubu32)
+RVVCALL(OPIVX2_RM, vasubu_vx_h, OP_UUU_H, H2, H2, asubu32)
+RVVCALL(OPIVX2_RM, vasubu_vx_w, OP_UUU_W, H4, H4, asubu32)
+RVVCALL(OPIVX2_RM, vasubu_vx_d, OP_UUU_D, H8, H8, asubu64)
+GEN_VEXT_VX_RM(vasubu_vx_b, 1, 1)
+GEN_VEXT_VX_RM(vasubu_vx_h, 2, 2)
+GEN_VEXT_VX_RM(vasubu_vx_w, 4, 4)
+GEN_VEXT_VX_RM(vasubu_vx_d, 8, 8)
+
/* Vector Single-Width Fractional Multiply with Rounding and Saturation */
static inline int8_t vsmul8(CPURISCVState *env, int vxrm, int8_t a, int8_t b)
{