@@ -379,18 +379,18 @@ DEF_HELPER_6(vsra_vx_h, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vsra_vx_w, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vsra_vx_d, void, ptr, ptr, tl, ptr, env, i32)
-DEF_HELPER_6(vnsrl_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
-DEF_HELPER_6(vnsrl_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
-DEF_HELPER_6(vnsrl_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
-DEF_HELPER_6(vnsra_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
-DEF_HELPER_6(vnsra_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
-DEF_HELPER_6(vnsra_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
-DEF_HELPER_6(vnsrl_vx_b, void, ptr, ptr, tl, ptr, env, i32)
-DEF_HELPER_6(vnsrl_vx_h, void, ptr, ptr, tl, ptr, env, i32)
-DEF_HELPER_6(vnsrl_vx_w, void, ptr, ptr, tl, ptr, env, i32)
-DEF_HELPER_6(vnsra_vx_b, void, ptr, ptr, tl, ptr, env, i32)
-DEF_HELPER_6(vnsra_vx_h, void, ptr, ptr, tl, ptr, env, i32)
-DEF_HELPER_6(vnsra_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vnsrl_wv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vnsrl_wv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vnsrl_wv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vnsra_wv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vnsra_wv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vnsra_wv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vnsrl_wx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vnsrl_wx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vnsrl_wx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vnsra_wx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vnsra_wx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vnsra_wx_w, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vmseq_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vmseq_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
@@ -352,12 +352,12 @@ vsrl_vi 101000 . ..... ..... 011 ..... 1010111 @r_vm
vsra_vv 101001 . ..... ..... 000 ..... 1010111 @r_vm
vsra_vx 101001 . ..... ..... 100 ..... 1010111 @r_vm
vsra_vi 101001 . ..... ..... 011 ..... 1010111 @r_vm
-vnsrl_vv 101100 . ..... ..... 000 ..... 1010111 @r_vm
-vnsrl_vx 101100 . ..... ..... 100 ..... 1010111 @r_vm
-vnsrl_vi 101100 . ..... ..... 011 ..... 1010111 @r_vm
-vnsra_vv 101101 . ..... ..... 000 ..... 1010111 @r_vm
-vnsra_vx 101101 . ..... ..... 100 ..... 1010111 @r_vm
-vnsra_vi 101101 . ..... ..... 011 ..... 1010111 @r_vm
+vnsrl_wv 101100 . ..... ..... 000 ..... 1010111 @r_vm
+vnsrl_wx 101100 . ..... ..... 100 ..... 1010111 @r_vm
+vnsrl_wi 101100 . ..... ..... 011 ..... 1010111 @r_vm
+vnsra_wv 101101 . ..... ..... 000 ..... 1010111 @r_vm
+vnsra_wx 101101 . ..... ..... 100 ..... 1010111 @r_vm
+vnsra_wi 101101 . ..... ..... 011 ..... 1010111 @r_vm
vmseq_vv 011000 . ..... ..... 000 ..... 1010111 @r_vm
vmseq_vx 011000 . ..... ..... 100 ..... 1010111 @r_vm
vmseq_vi 011000 . ..... ..... 011 ..... 1010111 @r_vm
@@ -1952,7 +1952,7 @@ GEN_OPIVI_GVEC_TRANS(vsrl_vi, IMM_TRUNC_SEW, vsrl_vx, shri)
GEN_OPIVI_GVEC_TRANS(vsra_vi, IMM_TRUNC_SEW, vsra_vx, sari)
/* Vector Narrowing Integer Right Shift Instructions */
-static bool opivv_narrow_check(DisasContext *s, arg_rmrr *a)
+static bool opiwv_narrow_check(DisasContext *s, arg_rmrr *a)
{
return require_rvv(s) &&
vext_check_isa_ill(s) &&
@@ -1960,10 +1960,10 @@ static bool opivv_narrow_check(DisasContext *s, arg_rmrr *a)
}
/* OPIVV with NARROW */
-#define GEN_OPIVV_NARROW_TRANS(NAME) \
+#define GEN_OPIWV_NARROW_TRANS(NAME) \
static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
{ \
- if (opivv_narrow_check(s, a)) { \
+ if (opiwv_narrow_check(s, a)) { \
uint32_t data = 0; \
static gen_helper_gvec_4_ptr * const fns[3] = { \
gen_helper_##NAME##_b, \
@@ -1987,10 +1987,10 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
} \
return false; \
}
-GEN_OPIVV_NARROW_TRANS(vnsra_vv)
-GEN_OPIVV_NARROW_TRANS(vnsrl_vv)
+GEN_OPIWV_NARROW_TRANS(vnsra_wv)
+GEN_OPIWV_NARROW_TRANS(vnsrl_wv)
-static bool opivx_narrow_check(DisasContext *s, arg_rmrr *a)
+static bool opiwx_narrow_check(DisasContext *s, arg_rmrr *a)
{
return require_rvv(s) &&
vext_check_isa_ill(s) &&
@@ -1998,10 +1998,10 @@ static bool opivx_narrow_check(DisasContext *s, arg_rmrr *a)
}
/* OPIVX with NARROW */
-#define GEN_OPIVX_NARROW_TRANS(NAME) \
+#define GEN_OPIWX_NARROW_TRANS(NAME) \
static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
{ \
- if (opivx_narrow_check(s, a)) { \
+ if (opiwx_narrow_check(s, a)) { \
static gen_helper_opivx * const fns[3] = { \
gen_helper_##NAME##_b, \
gen_helper_##NAME##_h, \
@@ -2012,14 +2012,14 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
return false; \
}
-GEN_OPIVX_NARROW_TRANS(vnsra_vx)
-GEN_OPIVX_NARROW_TRANS(vnsrl_vx)
+GEN_OPIWX_NARROW_TRANS(vnsra_wx)
+GEN_OPIWX_NARROW_TRANS(vnsrl_wx)
-/* OPIVI with NARROW */
-#define GEN_OPIVI_NARROW_TRANS(NAME, IMM_MODE, OPIVX) \
+/* OPIWI with NARROW */
+#define GEN_OPIWI_NARROW_TRANS(NAME, IMM_MODE, OPIVX) \
static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
{ \
- if (opivx_narrow_check(s, a)) { \
+ if (opiwx_narrow_check(s, a)) { \
static gen_helper_opivx * const fns[3] = { \
gen_helper_##OPIVX##_b, \
gen_helper_##OPIVX##_h, \
@@ -2031,8 +2031,8 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
return false; \
}
-GEN_OPIVI_NARROW_TRANS(vnsra_vi, IMM_ZX, vnsra_vx)
-GEN_OPIVI_NARROW_TRANS(vnsrl_vi, IMM_ZX, vnsrl_vx)
+GEN_OPIWI_NARROW_TRANS(vnsra_wi, IMM_ZX, vnsra_wx)
+GEN_OPIWI_NARROW_TRANS(vnsrl_wi, IMM_ZX, vnsrl_wx)
/* Vector Integer Comparison Instructions */
/*
@@ -1515,18 +1515,18 @@ GEN_VEXT_SHIFT_VX(vsra_vx_w, int32_t, int32_t, H4, H4, DO_SRL, 0x1f, clearl)
GEN_VEXT_SHIFT_VX(vsra_vx_d, int64_t, int64_t, H8, H8, DO_SRL, 0x3f, clearq)
/* Vector Narrowing Integer Right Shift Instructions */
-GEN_VEXT_SHIFT_VV(vnsrl_vv_b, uint8_t, uint16_t, H1, H2, DO_SRL, 0xf, clearb)
-GEN_VEXT_SHIFT_VV(vnsrl_vv_h, uint16_t, uint32_t, H2, H4, DO_SRL, 0x1f, clearh)
-GEN_VEXT_SHIFT_VV(vnsrl_vv_w, uint32_t, uint64_t, H4, H8, DO_SRL, 0x3f, clearl)
-GEN_VEXT_SHIFT_VV(vnsra_vv_b, uint8_t, int16_t, H1, H2, DO_SRL, 0xf, clearb)
-GEN_VEXT_SHIFT_VV(vnsra_vv_h, uint16_t, int32_t, H2, H4, DO_SRL, 0x1f, clearh)
-GEN_VEXT_SHIFT_VV(vnsra_vv_w, uint32_t, int64_t, H4, H8, DO_SRL, 0x3f, clearl)
-GEN_VEXT_SHIFT_VX(vnsrl_vx_b, uint8_t, uint16_t, H1, H2, DO_SRL, 0xf, clearb)
-GEN_VEXT_SHIFT_VX(vnsrl_vx_h, uint16_t, uint32_t, H2, H4, DO_SRL, 0x1f, clearh)
-GEN_VEXT_SHIFT_VX(vnsrl_vx_w, uint32_t, uint64_t, H4, H8, DO_SRL, 0x3f, clearl)
-GEN_VEXT_SHIFT_VX(vnsra_vx_b, int8_t, int16_t, H1, H2, DO_SRL, 0xf, clearb)
-GEN_VEXT_SHIFT_VX(vnsra_vx_h, int16_t, int32_t, H2, H4, DO_SRL, 0x1f, clearh)
-GEN_VEXT_SHIFT_VX(vnsra_vx_w, int32_t, int64_t, H4, H8, DO_SRL, 0x3f, clearl)
+GEN_VEXT_SHIFT_VV(vnsrl_wv_b, uint8_t, uint16_t, H1, H2, DO_SRL, 0xf, clearb)
+GEN_VEXT_SHIFT_VV(vnsrl_wv_h, uint16_t, uint32_t, H2, H4, DO_SRL, 0x1f, clearh)
+GEN_VEXT_SHIFT_VV(vnsrl_wv_w, uint32_t, uint64_t, H4, H8, DO_SRL, 0x3f, clearl)
+GEN_VEXT_SHIFT_VV(vnsra_wv_b, uint8_t, int16_t, H1, H2, DO_SRL, 0xf, clearb)
+GEN_VEXT_SHIFT_VV(vnsra_wv_h, uint16_t, int32_t, H2, H4, DO_SRL, 0x1f, clearh)
+GEN_VEXT_SHIFT_VV(vnsra_wv_w, uint32_t, int64_t, H4, H8, DO_SRL, 0x3f, clearl)
+GEN_VEXT_SHIFT_VX(vnsrl_wx_b, uint8_t, uint16_t, H1, H2, DO_SRL, 0xf, clearb)
+GEN_VEXT_SHIFT_VX(vnsrl_wx_h, uint16_t, uint32_t, H2, H4, DO_SRL, 0x1f, clearh)
+GEN_VEXT_SHIFT_VX(vnsrl_wx_w, uint32_t, uint64_t, H4, H8, DO_SRL, 0x3f, clearl)
+GEN_VEXT_SHIFT_VX(vnsra_wx_b, int8_t, int16_t, H1, H2, DO_SRL, 0xf, clearb)
+GEN_VEXT_SHIFT_VX(vnsra_wx_h, int16_t, int32_t, H2, H4, DO_SRL, 0x1f, clearh)
+GEN_VEXT_SHIFT_VX(vnsra_wx_w, int32_t, int64_t, H4, H8, DO_SRL, 0x3f, clearl)
/* Vector Integer Comparison Instructions */
#define DO_MSEQ(N, M) (N == M)