@@ -108,41 +108,38 @@ DEF_HELPER_6(vsse8_v, void, ptr, ptr, tl, tl, env, i32)
DEF_HELPER_6(vsse16_v, void, ptr, ptr, tl, tl, env, i32)
DEF_HELPER_6(vsse32_v, void, ptr, ptr, tl, tl, env, i32)
DEF_HELPER_6(vsse64_v, void, ptr, ptr, tl, tl, env, i32)
-DEF_HELPER_6(vlxb_v_b, void, ptr, ptr, tl, ptr, env, i32)
-DEF_HELPER_6(vlxb_v_h, void, ptr, ptr, tl, ptr, env, i32)
-DEF_HELPER_6(vlxb_v_w, void, ptr, ptr, tl, ptr, env, i32)
-DEF_HELPER_6(vlxb_v_d, void, ptr, ptr, tl, ptr, env, i32)
-DEF_HELPER_6(vlxh_v_h, void, ptr, ptr, tl, ptr, env, i32)
-DEF_HELPER_6(vlxh_v_w, void, ptr, ptr, tl, ptr, env, i32)
-DEF_HELPER_6(vlxh_v_d, void, ptr, ptr, tl, ptr, env, i32)
-DEF_HELPER_6(vlxw_v_w, void, ptr, ptr, tl, ptr, env, i32)
-DEF_HELPER_6(vlxw_v_d, void, ptr, ptr, tl, ptr, env, i32)
-DEF_HELPER_6(vlxe_v_b, void, ptr, ptr, tl, ptr, env, i32)
-DEF_HELPER_6(vlxe_v_h, void, ptr, ptr, tl, ptr, env, i32)
-DEF_HELPER_6(vlxe_v_w, void, ptr, ptr, tl, ptr, env, i32)
-DEF_HELPER_6(vlxe_v_d, void, ptr, ptr, tl, ptr, env, i32)
-DEF_HELPER_6(vlxbu_v_b, void, ptr, ptr, tl, ptr, env, i32)
-DEF_HELPER_6(vlxbu_v_h, void, ptr, ptr, tl, ptr, env, i32)
-DEF_HELPER_6(vlxbu_v_w, void, ptr, ptr, tl, ptr, env, i32)
-DEF_HELPER_6(vlxbu_v_d, void, ptr, ptr, tl, ptr, env, i32)
-DEF_HELPER_6(vlxhu_v_h, void, ptr, ptr, tl, ptr, env, i32)
-DEF_HELPER_6(vlxhu_v_w, void, ptr, ptr, tl, ptr, env, i32)
-DEF_HELPER_6(vlxhu_v_d, void, ptr, ptr, tl, ptr, env, i32)
-DEF_HELPER_6(vlxwu_v_w, void, ptr, ptr, tl, ptr, env, i32)
-DEF_HELPER_6(vlxwu_v_d, void, ptr, ptr, tl, ptr, env, i32)
-DEF_HELPER_6(vsxb_v_b, void, ptr, ptr, tl, ptr, env, i32)
-DEF_HELPER_6(vsxb_v_h, void, ptr, ptr, tl, ptr, env, i32)
-DEF_HELPER_6(vsxb_v_w, void, ptr, ptr, tl, ptr, env, i32)
-DEF_HELPER_6(vsxb_v_d, void, ptr, ptr, tl, ptr, env, i32)
-DEF_HELPER_6(vsxh_v_h, void, ptr, ptr, tl, ptr, env, i32)
-DEF_HELPER_6(vsxh_v_w, void, ptr, ptr, tl, ptr, env, i32)
-DEF_HELPER_6(vsxh_v_d, void, ptr, ptr, tl, ptr, env, i32)
-DEF_HELPER_6(vsxw_v_w, void, ptr, ptr, tl, ptr, env, i32)
-DEF_HELPER_6(vsxw_v_d, void, ptr, ptr, tl, ptr, env, i32)
-DEF_HELPER_6(vsxe_v_b, void, ptr, ptr, tl, ptr, env, i32)
-DEF_HELPER_6(vsxe_v_h, void, ptr, ptr, tl, ptr, env, i32)
-DEF_HELPER_6(vsxe_v_w, void, ptr, ptr, tl, ptr, env, i32)
-DEF_HELPER_6(vsxe_v_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vlxei8_8_v, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vlxei8_16_v, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vlxei8_32_v, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vlxei8_64_v, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vlxei16_8_v, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vlxei16_16_v, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vlxei16_32_v, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vlxei16_64_v, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vlxei32_8_v, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vlxei32_16_v, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vlxei32_32_v, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vlxei32_64_v, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vlxei64_8_v, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vlxei64_16_v, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vlxei64_32_v, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vlxei64_64_v, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vsxei8_8_v, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vsxei8_16_v, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vsxei8_32_v, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vsxei8_64_v, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vsxei16_8_v, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vsxei16_16_v, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vsxei16_32_v, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vsxei16_64_v, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vsxei32_8_v, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vsxei32_16_v, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vsxei32_32_v, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vsxei32_64_v, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vsxei64_8_v, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vsxei64_16_v, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vsxei64_32_v, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vsxei64_64_v, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_5(vlbff_v_b, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vlbff_v_h, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vlbff_v_w, void, ptr, ptr, tl, env, i32)
@@ -257,18 +257,17 @@ vlbuff_v ... 000 . 10000 ..... 000 ..... 0000111 @r2_nfvm
vlhuff_v ... 000 . 10000 ..... 101 ..... 0000111 @r2_nfvm
vlwuff_v ... 000 . 10000 ..... 110 ..... 0000111 @r2_nfvm
-vlxb_v ... 111 . ..... ..... 000 ..... 0000111 @r_nfvm
-vlxh_v ... 111 . ..... ..... 101 ..... 0000111 @r_nfvm
-vlxw_v ... 111 . ..... ..... 110 ..... 0000111 @r_nfvm
-vlxe_v ... 011 . ..... ..... 111 ..... 0000111 @r_nfvm
-vlxbu_v ... 011 . ..... ..... 000 ..... 0000111 @r_nfvm
-vlxhu_v ... 011 . ..... ..... 101 ..... 0000111 @r_nfvm
-vlxwu_v ... 011 . ..... ..... 110 ..... 0000111 @r_nfvm
+# Vector indexed load insns.
+vlxei8_v ... 011 . ..... ..... 000 ..... 0000111 @r_nfvm
+vlxei16_v ... 011 . ..... ..... 101 ..... 0000111 @r_nfvm
+vlxei32_v ... 011 . ..... ..... 110 ..... 0000111 @r_nfvm
+vlxei64_v ... 011 . ..... ..... 111 ..... 0000111 @r_nfvm
+
# Vector ordered-indexed and unordered-indexed store insns.
-vsxb_v ... -11 . ..... ..... 000 ..... 0100111 @r_nfvm
-vsxh_v ... -11 . ..... ..... 101 ..... 0100111 @r_nfvm
-vsxw_v ... -11 . ..... ..... 110 ..... 0100111 @r_nfvm
-vsxe_v ... -11 . ..... ..... 111 ..... 0100111 @r_nfvm
+vsxei8_v ... 0-1 . ..... ..... 000 ..... 0100111 @r_nfvm
+vsxei16_v ... 0-1 . ..... ..... 101 ..... 0100111 @r_nfvm
+vsxei32_v ... 0-1 . ..... ..... 110 ..... 0100111 @r_nfvm
+vsxei64_v ... 0-1 . ..... ..... 111 ..... 0100111 @r_nfvm
#*** Vector AMO operations are encoded under the standard AMO major opcode ***
vamoswapw_v 00001 . . ..... ..... 110 ..... 0101111 @r_wdvm
@@ -202,11 +202,72 @@ static bool vext_check_load(DisasContext *s, int vd, int nf, int vm,
return vext_check_store(s, vd, nf, eew) && require_vm(vm, vd);
}
-static bool vext_check_isa_ill(DisasContext *s)
+/*
+ * Vector indexed, indexed segment store check function.
+ *
+ * Rules to be checked here:
+ * 1. EMUL must within the range: 1/8 <= EMUL <= 8. (Section 7.3)
+ * 2. Index vector register number is multiples of EMUL.
+ * (Section 3.3.2, 7.3)
+ * 3. Destination vector register number is multiples of LMUL.
+ * (Section 3.3.2, 7.3)
+ * 4. The EMUL setting must be such that EMUL * NFIELDS ≤ 8. (Section 7.8)
+ * 5. Vector register numbers accessed by the segment load or store
+ * cannot increment past 31. (Section 7.8)
+ */
+static bool vext_check_st_index(DisasContext *s, int vd, int vs2, int nf,
+ uint8_t eew)
{
- return !s->vill;
+ float emul = (float)eew / (1 << (s->sew + 3)) * s->flmul;
+ uint32_t flmul_r = s->flmul < 1 ? 1 : s->flmul;
+ return (emul >= 0.125 && emul <= 8) &&
+ require_align(vs2, emul) &&
+ require_align(vd, s->flmul) &&
+ ((nf * flmul_r) <= (NVPR / 4) &&
+ (vd + nf * flmul_r) <= NVPR);
}
+/*
+ * Vector indexed, indexed segment load check function.
+ *
+ * Rules to be checked here:
+ * 1. All rules applies to store instructions are applies
+ * to load instructions.
+ * 2. Destination vector register group for a masked vector
+ * instruction cannot overlap the source mask register (v0).
+ * (Section 5.3)
+ * 3. Destination vector register cannot overlap a source vector
+ * register (vs2) group.
+ * (Section 5.2)
+ * 4. Destination vector register groups cannot overlap
+ * the source vector register (vs2) group for
+ * indexed segment load instructions. (Section 7.8.3)
+ */
+static bool vext_check_ld_index(DisasContext *s, int vd, int vs2,
+ int nf, int vm, uint8_t eew)
+{
+ float emul = (float)eew / (1 << (s->sew + 3)) * s->flmul;
+ bool ret = vext_check_st_index(s, vd, vs2, nf, eew) &&
+ require_vm(vm, vd);
+ if (eew > (1 << (s->sew + 3))) {
+ if (vd != vs2) {
+ ret &= require_noover(vd, s->flmul, vs2, emul);
+ }
+ } else if (eew < (1 << (s->sew + 3))) {
+ if (emul < 1) {
+ ret &= require_noover(vd, s->flmul, vs2, emul);
+ } else {
+ ret &= require_noover_widen(vd, s->flmul, vs2, emul);
+ }
+ }
+ if (nf > 1) {
+ ret &= (require_noover(vd, s->flmul, vs2, emul) &&
+ require_noover(vd, nf, vs2, 1));
+ }
+ return ret;
+}
+
+
/*
* Check function for vector instruction with format:
* single-width result and single-width sources (SEW = SEW op SEW)
@@ -748,27 +809,34 @@ static bool ld_index_op(DisasContext *s, arg_rnfvm *a, uint8_t seq)
{
uint32_t data = 0;
gen_helper_ldst_index *fn;
- static gen_helper_ldst_index * const fns[7][4] = {
- { gen_helper_vlxb_v_b, gen_helper_vlxb_v_h,
- gen_helper_vlxb_v_w, gen_helper_vlxb_v_d },
- { NULL, gen_helper_vlxh_v_h,
- gen_helper_vlxh_v_w, gen_helper_vlxh_v_d },
- { NULL, NULL,
- gen_helper_vlxw_v_w, gen_helper_vlxw_v_d },
- { gen_helper_vlxe_v_b, gen_helper_vlxe_v_h,
- gen_helper_vlxe_v_w, gen_helper_vlxe_v_d },
- { gen_helper_vlxbu_v_b, gen_helper_vlxbu_v_h,
- gen_helper_vlxbu_v_w, gen_helper_vlxbu_v_d },
- { NULL, gen_helper_vlxhu_v_h,
- gen_helper_vlxhu_v_w, gen_helper_vlxhu_v_d },
- { NULL, NULL,
- gen_helper_vlxwu_v_w, gen_helper_vlxwu_v_d },
+ static gen_helper_ldst_index * const fns[4][4] = {
+ /*
+ * offset vector register group EEW = 8,
+ * data vector register group EEW = SEW
+ */
+ { gen_helper_vlxei8_8_v, gen_helper_vlxei8_16_v,
+ gen_helper_vlxei8_32_v, gen_helper_vlxei8_64_v },
+ /*
+ * offset vector register group EEW = 16,
+ * data vector register group EEW = SEW
+ */
+ { gen_helper_vlxei16_8_v, gen_helper_vlxei16_16_v,
+ gen_helper_vlxei16_32_v, gen_helper_vlxei16_64_v },
+ /*
+ * offset vector register group EEW = 32,
+ * data vector register group EEW = SEW
+ */
+ { gen_helper_vlxei32_8_v, gen_helper_vlxei32_16_v,
+ gen_helper_vlxei32_32_v, gen_helper_vlxei32_64_v },
+ /*
+ * offset vector register group EEW = 64,
+ * data vector register group EEW = SEW
+ */
+ { gen_helper_vlxei64_8_v, gen_helper_vlxei64_16_v,
+ gen_helper_vlxei64_32_v, gen_helper_vlxei64_64_v }
};
- fn = fns[seq][s->sew];
- if (fn == NULL) {
- return false;
- }
+ fn = fns[seq][s->sew];
data = FIELD_DP32(data, VDATA, VM, a->vm);
data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
@@ -776,50 +844,50 @@ static bool ld_index_op(DisasContext *s, arg_rnfvm *a, uint8_t seq)
return ldst_index_trans(a->rd, a->rs1, a->rs2, data, fn, s, false);
}
-/*
- * For vector indexed segment loads, the destination vector register
- * groups cannot overlap the source vector register group (specified by
- * `vs2`), else an illegal instruction exception is raised.
- */
-static bool ld_index_check(DisasContext *s, arg_rnfvm* a)
+static bool ld_index_check(DisasContext *s, arg_rnfvm* a, uint8_t eew)
{
- return (vext_check_isa_ill(s) &&
- vext_check_overlap_mask(s, a->rd, a->vm, false) &&
- vext_check_reg(s, a->rd, false) &&
- vext_check_reg(s, a->rs2, false) &&
- vext_check_nf(s, a->nf) &&
- ((a->nf == 1) ||
- vext_check_overlap_group(a->rd, a->nf << s->lmul,
- a->rs2, 1 << s->lmul)));
+ return require_rvv(s) &&
+ vext_check_isa_ill(s) &&
+ vext_check_ld_index(s, a->rd, a->rs2, a->nf, a->vm, eew);
}
-GEN_VEXT_TRANS(vlxb_v, 0, rnfvm, ld_index_op, ld_index_check)
-GEN_VEXT_TRANS(vlxh_v, 1, rnfvm, ld_index_op, ld_index_check)
-GEN_VEXT_TRANS(vlxw_v, 2, rnfvm, ld_index_op, ld_index_check)
-GEN_VEXT_TRANS(vlxe_v, 3, rnfvm, ld_index_op, ld_index_check)
-GEN_VEXT_TRANS(vlxbu_v, 4, rnfvm, ld_index_op, ld_index_check)
-GEN_VEXT_TRANS(vlxhu_v, 5, rnfvm, ld_index_op, ld_index_check)
-GEN_VEXT_TRANS(vlxwu_v, 6, rnfvm, ld_index_op, ld_index_check)
+GEN_VEXT_TRANS(vlxei8_v, 8, 0, rnfvm, ld_index_op, ld_index_check)
+GEN_VEXT_TRANS(vlxei16_v, 16, 1, rnfvm, ld_index_op, ld_index_check)
+GEN_VEXT_TRANS(vlxei32_v, 32, 2, rnfvm, ld_index_op, ld_index_check)
+GEN_VEXT_TRANS(vlxei64_v, 64, 3, rnfvm, ld_index_op, ld_index_check)
static bool st_index_op(DisasContext *s, arg_rnfvm *a, uint8_t seq)
{
uint32_t data = 0;
gen_helper_ldst_index *fn;
static gen_helper_ldst_index * const fns[4][4] = {
- { gen_helper_vsxb_v_b, gen_helper_vsxb_v_h,
- gen_helper_vsxb_v_w, gen_helper_vsxb_v_d },
- { NULL, gen_helper_vsxh_v_h,
- gen_helper_vsxh_v_w, gen_helper_vsxh_v_d },
- { NULL, NULL,
- gen_helper_vsxw_v_w, gen_helper_vsxw_v_d },
- { gen_helper_vsxe_v_b, gen_helper_vsxe_v_h,
- gen_helper_vsxe_v_w, gen_helper_vsxe_v_d }
+ /*
+ * offset vector register group EEW = 8,
+ * data vector register group EEW = SEW
+ */
+ { gen_helper_vsxei8_8_v, gen_helper_vsxei8_16_v,
+ gen_helper_vsxei8_32_v, gen_helper_vsxei8_64_v },
+ /*
+ * offset vector register group EEW = 16,
+ * data vector register group EEW = SEW
+ */
+ { gen_helper_vsxei16_8_v, gen_helper_vsxei16_16_v,
+ gen_helper_vsxei16_32_v, gen_helper_vsxei16_64_v },
+ /*
+ * offset vector register group EEW = 32,
+ * data vector register group EEW = SEW
+ */
+ { gen_helper_vsxei32_8_v, gen_helper_vsxei32_16_v,
+ gen_helper_vsxei32_32_v, gen_helper_vsxei32_64_v },
+ /*
+ * offset vector register group EEW = 64,
+ * data vector register group EEW = SEW
+ */
+ { gen_helper_vsxei64_8_v, gen_helper_vsxei64_16_v,
+ gen_helper_vsxei64_32_v, gen_helper_vsxei64_64_v }
};
- fn = fns[seq][s->sew];
- if (fn == NULL) {
- return false;
- }
+ fn = fns[seq][s->sew];
data = FIELD_DP32(data, VDATA, VM, a->vm);
data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
@@ -827,18 +895,17 @@ static bool st_index_op(DisasContext *s, arg_rnfvm *a, uint8_t seq)
return ldst_index_trans(a->rd, a->rs1, a->rs2, data, fn, s, true);
}
-static bool st_index_check(DisasContext *s, arg_rnfvm* a)
+static bool st_index_check(DisasContext *s, arg_rnfvm* a, uint8_t eew)
{
- return (vext_check_isa_ill(s) &&
- vext_check_reg(s, a->rd, false) &&
- vext_check_reg(s, a->rs2, false) &&
- vext_check_nf(s, a->nf));
+ return require_rvv(s) &&
+ vext_check_isa_ill(s) &&
+ vext_check_st_index(s, a->rd, a->rs2, a->nf, eew);
}
-GEN_VEXT_TRANS(vsxb_v, 0, rnfvm, st_index_op, st_index_check)
-GEN_VEXT_TRANS(vsxh_v, 1, rnfvm, st_index_op, st_index_check)
-GEN_VEXT_TRANS(vsxw_v, 2, rnfvm, st_index_op, st_index_check)
-GEN_VEXT_TRANS(vsxe_v, 3, rnfvm, st_index_op, st_index_check)
+GEN_VEXT_TRANS(vsxei8_v, 8, 0, rnfvm, st_index_op, st_index_check)
+GEN_VEXT_TRANS(vsxei16_v, 16, 1, rnfvm, st_index_op, st_index_check)
+GEN_VEXT_TRANS(vsxei32_v, 32, 2, rnfvm, st_index_op, st_index_check)
+GEN_VEXT_TRANS(vsxei64_v, 64, 3, rnfvm, st_index_op, st_index_check)
/*
*** unit stride fault-only-first load
@@ -374,8 +374,7 @@ vext_ldst_index(void *vd, void *v0, target_ulong base,
void *vs2, CPURISCVState *env, uint32_t desc,
vext_get_index_addr get_index_addr,
vext_ldst_elem_fn *ldst_elem,
- uint32_t esz, uint32_t msz, uintptr_t ra,
- MMUAccessType access_type)
+ uint32_t esz, uintptr_t ra, MMUAccessType access_type)
{
uint32_t i, k;
uint32_t nf = vext_nf(desc);
@@ -387,7 +386,7 @@ vext_ldst_index(void *vd, void *v0, target_ulong base,
if (!vm && !vext_elem_mask(v0, i)) {
continue;
}
- probe_pages(env, get_index_addr(base, i, vs2), nf * msz, ra,
+ probe_pages(env, get_index_addr(base, i, vs2), nf * esz, ra,
access_type);
}
/* load bytes from guest memory */
@@ -397,67 +396,63 @@ vext_ldst_index(void *vd, void *v0, target_ulong base,
continue;
}
while (k < nf) {
- abi_ptr addr = get_index_addr(base, i, vs2) + k * msz;
+ abi_ptr addr = get_index_addr(base, i, vs2) + k * esz;
ldst_elem(env, addr, i + k * vlmax, vd, ra);
k++;
}
}
}
-#define GEN_VEXT_LD_INDEX(NAME, MTYPE, ETYPE, INDEX_FN, LOAD_FN) \
+#define GEN_VEXT_LD_INDEX(NAME, ETYPE, INDEX_FN, LOAD_FN) \
void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
void *vs2, CPURISCVState *env, uint32_t desc) \
{ \
vext_ldst_index(vd, v0, base, vs2, env, desc, INDEX_FN, \
- LOAD_FN, sizeof(ETYPE), sizeof(MTYPE), \
- GETPC(), MMU_DATA_LOAD); \
-}
-
-GEN_VEXT_LD_INDEX(vlxb_v_b, int8_t, int8_t, idx_b, ldb_b)
-GEN_VEXT_LD_INDEX(vlxb_v_h, int8_t, int16_t, idx_h, ldb_h)
-GEN_VEXT_LD_INDEX(vlxb_v_w, int8_t, int32_t, idx_w, ldb_w)
-GEN_VEXT_LD_INDEX(vlxb_v_d, int8_t, int64_t, idx_d, ldb_d)
-GEN_VEXT_LD_INDEX(vlxh_v_h, int16_t, int16_t, idx_h, ldh_h)
-GEN_VEXT_LD_INDEX(vlxh_v_w, int16_t, int32_t, idx_w, ldh_w)
-GEN_VEXT_LD_INDEX(vlxh_v_d, int16_t, int64_t, idx_d, ldh_d)
-GEN_VEXT_LD_INDEX(vlxw_v_w, int32_t, int32_t, idx_w, ldw_w)
-GEN_VEXT_LD_INDEX(vlxw_v_d, int32_t, int64_t, idx_d, ldw_d)
-GEN_VEXT_LD_INDEX(vlxe_v_b, int8_t, int8_t, idx_b, lde_b)
-GEN_VEXT_LD_INDEX(vlxe_v_h, int16_t, int16_t, idx_h, lde_h)
-GEN_VEXT_LD_INDEX(vlxe_v_w, int32_t, int32_t, idx_w, lde_w)
-GEN_VEXT_LD_INDEX(vlxe_v_d, int64_t, int64_t, idx_d, lde_d)
-GEN_VEXT_LD_INDEX(vlxbu_v_b, uint8_t, uint8_t, idx_b, ldbu_b)
-GEN_VEXT_LD_INDEX(vlxbu_v_h, uint8_t, uint16_t, idx_h, ldbu_h)
-GEN_VEXT_LD_INDEX(vlxbu_v_w, uint8_t, uint32_t, idx_w, ldbu_w)
-GEN_VEXT_LD_INDEX(vlxbu_v_d, uint8_t, uint64_t, idx_d, ldbu_d)
-GEN_VEXT_LD_INDEX(vlxhu_v_h, uint16_t, uint16_t, idx_h, ldhu_h)
-GEN_VEXT_LD_INDEX(vlxhu_v_w, uint16_t, uint32_t, idx_w, ldhu_w)
-GEN_VEXT_LD_INDEX(vlxhu_v_d, uint16_t, uint64_t, idx_d, ldhu_d)
-GEN_VEXT_LD_INDEX(vlxwu_v_w, uint32_t, uint32_t, idx_w, ldwu_w)
-GEN_VEXT_LD_INDEX(vlxwu_v_d, uint32_t, uint64_t, idx_d, ldwu_d)
-
-#define GEN_VEXT_ST_INDEX(NAME, MTYPE, ETYPE, INDEX_FN, STORE_FN)\
+ LOAD_FN, sizeof(ETYPE), GETPC(), MMU_DATA_LOAD); \
+}
+
+GEN_VEXT_LD_INDEX(vlxei8_8_v, int8_t, idx_b, lde_b)
+GEN_VEXT_LD_INDEX(vlxei8_16_v, int16_t, idx_b, lde_h)
+GEN_VEXT_LD_INDEX(vlxei8_32_v, int32_t, idx_b, lde_w)
+GEN_VEXT_LD_INDEX(vlxei8_64_v, int64_t, idx_b, lde_d)
+GEN_VEXT_LD_INDEX(vlxei16_8_v, int8_t, idx_h, lde_b)
+GEN_VEXT_LD_INDEX(vlxei16_16_v, int16_t, idx_h, lde_h)
+GEN_VEXT_LD_INDEX(vlxei16_32_v, int32_t, idx_h, lde_w)
+GEN_VEXT_LD_INDEX(vlxei16_64_v, int64_t, idx_h, lde_d)
+GEN_VEXT_LD_INDEX(vlxei32_8_v, int8_t, idx_w, lde_b)
+GEN_VEXT_LD_INDEX(vlxei32_16_v, int16_t, idx_w, lde_h)
+GEN_VEXT_LD_INDEX(vlxei32_32_v, int32_t, idx_w, lde_w)
+GEN_VEXT_LD_INDEX(vlxei32_64_v, int64_t, idx_w, lde_d)
+GEN_VEXT_LD_INDEX(vlxei64_8_v, int8_t, idx_d, lde_b)
+GEN_VEXT_LD_INDEX(vlxei64_16_v, int16_t, idx_d, lde_h)
+GEN_VEXT_LD_INDEX(vlxei64_32_v, int32_t, idx_d, lde_w)
+GEN_VEXT_LD_INDEX(vlxei64_64_v, int64_t, idx_d, lde_d)
+
+#define GEN_VEXT_ST_INDEX(NAME, ETYPE, INDEX_FN, STORE_FN) \
void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
void *vs2, CPURISCVState *env, uint32_t desc) \
{ \
vext_ldst_index(vd, v0, base, vs2, env, desc, INDEX_FN, \
- STORE_FN, sizeof(ETYPE), sizeof(MTYPE), \
+ STORE_FN, sizeof(ETYPE), \
GETPC(), MMU_DATA_STORE); \
}
-GEN_VEXT_ST_INDEX(vsxb_v_b, int8_t, int8_t, idx_b, stb_b)
-GEN_VEXT_ST_INDEX(vsxb_v_h, int8_t, int16_t, idx_h, stb_h)
-GEN_VEXT_ST_INDEX(vsxb_v_w, int8_t, int32_t, idx_w, stb_w)
-GEN_VEXT_ST_INDEX(vsxb_v_d, int8_t, int64_t, idx_d, stb_d)
-GEN_VEXT_ST_INDEX(vsxh_v_h, int16_t, int16_t, idx_h, sth_h)
-GEN_VEXT_ST_INDEX(vsxh_v_w, int16_t, int32_t, idx_w, sth_w)
-GEN_VEXT_ST_INDEX(vsxh_v_d, int16_t, int64_t, idx_d, sth_d)
-GEN_VEXT_ST_INDEX(vsxw_v_w, int32_t, int32_t, idx_w, stw_w)
-GEN_VEXT_ST_INDEX(vsxw_v_d, int32_t, int64_t, idx_d, stw_d)
-GEN_VEXT_ST_INDEX(vsxe_v_b, int8_t, int8_t, idx_b, ste_b)
-GEN_VEXT_ST_INDEX(vsxe_v_h, int16_t, int16_t, idx_h, ste_h)
-GEN_VEXT_ST_INDEX(vsxe_v_w, int32_t, int32_t, idx_w, ste_w)
-GEN_VEXT_ST_INDEX(vsxe_v_d, int64_t, int64_t, idx_d, ste_d)
+GEN_VEXT_ST_INDEX(vsxei8_8_v, int8_t, idx_b, ste_b)
+GEN_VEXT_ST_INDEX(vsxei8_16_v, int16_t, idx_b, ste_h)
+GEN_VEXT_ST_INDEX(vsxei8_32_v, int32_t, idx_b, ste_w)
+GEN_VEXT_ST_INDEX(vsxei8_64_v, int64_t, idx_b, ste_d)
+GEN_VEXT_ST_INDEX(vsxei16_8_v, int8_t, idx_h, ste_b)
+GEN_VEXT_ST_INDEX(vsxei16_16_v, int16_t, idx_h, ste_h)
+GEN_VEXT_ST_INDEX(vsxei16_32_v, int32_t, idx_h, ste_w)
+GEN_VEXT_ST_INDEX(vsxei16_64_v, int64_t, idx_h, ste_d)
+GEN_VEXT_ST_INDEX(vsxei32_8_v, int8_t, idx_w, ste_b)
+GEN_VEXT_ST_INDEX(vsxei32_16_v, int16_t, idx_w, ste_h)
+GEN_VEXT_ST_INDEX(vsxei32_32_v, int32_t, idx_w, ste_w)
+GEN_VEXT_ST_INDEX(vsxei32_64_v, int64_t, idx_w, ste_d)
+GEN_VEXT_ST_INDEX(vsxei64_8_v, int8_t, idx_d, ste_b)
+GEN_VEXT_ST_INDEX(vsxei64_16_v, int16_t, idx_d, ste_h)
+GEN_VEXT_ST_INDEX(vsxei64_32_v, int32_t, idx_d, ste_w)
+GEN_VEXT_ST_INDEX(vsxei64_64_v, int64_t, idx_d, ste_d)
/*
*** unit-stride fault-only-fisrt load instructions