@@ -140,28 +140,11 @@ DEF_HELPER_6(vsxei64_8_v, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vsxei64_16_v, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vsxei64_32_v, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vsxei64_64_v, void, ptr, ptr, tl, ptr, env, i32)
-DEF_HELPER_5(vlbff_v_b, void, ptr, ptr, tl, env, i32)
-DEF_HELPER_5(vlbff_v_h, void, ptr, ptr, tl, env, i32)
-DEF_HELPER_5(vlbff_v_w, void, ptr, ptr, tl, env, i32)
-DEF_HELPER_5(vlbff_v_d, void, ptr, ptr, tl, env, i32)
-DEF_HELPER_5(vlhff_v_h, void, ptr, ptr, tl, env, i32)
-DEF_HELPER_5(vlhff_v_w, void, ptr, ptr, tl, env, i32)
-DEF_HELPER_5(vlhff_v_d, void, ptr, ptr, tl, env, i32)
-DEF_HELPER_5(vlwff_v_w, void, ptr, ptr, tl, env, i32)
-DEF_HELPER_5(vlwff_v_d, void, ptr, ptr, tl, env, i32)
-DEF_HELPER_5(vleff_v_b, void, ptr, ptr, tl, env, i32)
-DEF_HELPER_5(vleff_v_h, void, ptr, ptr, tl, env, i32)
-DEF_HELPER_5(vleff_v_w, void, ptr, ptr, tl, env, i32)
-DEF_HELPER_5(vleff_v_d, void, ptr, ptr, tl, env, i32)
-DEF_HELPER_5(vlbuff_v_b, void, ptr, ptr, tl, env, i32)
-DEF_HELPER_5(vlbuff_v_h, void, ptr, ptr, tl, env, i32)
-DEF_HELPER_5(vlbuff_v_w, void, ptr, ptr, tl, env, i32)
-DEF_HELPER_5(vlbuff_v_d, void, ptr, ptr, tl, env, i32)
-DEF_HELPER_5(vlhuff_v_h, void, ptr, ptr, tl, env, i32)
-DEF_HELPER_5(vlhuff_v_w, void, ptr, ptr, tl, env, i32)
-DEF_HELPER_5(vlhuff_v_d, void, ptr, ptr, tl, env, i32)
-DEF_HELPER_5(vlwuff_v_w, void, ptr, ptr, tl, env, i32)
-DEF_HELPER_5(vlwuff_v_d, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vle8ff_v, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vle16ff_v, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vle32ff_v, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vle64ff_v, void, ptr, ptr, tl, env, i32)
+
#ifdef TARGET_RISCV64
DEF_HELPER_6(vamoswapw_v_d, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vamoswapd_v_d, void, ptr, ptr, tl, ptr, env, i32)
@@ -249,14 +249,6 @@ vsse16_v ... 010 . ..... ..... 101 ..... 0100111 @r_nfvm
vsse32_v ... 010 . ..... ..... 110 ..... 0100111 @r_nfvm
vsse64_v ... 010 . ..... ..... 111 ..... 0100111 @r_nfvm
-vlbff_v ... 100 . 10000 ..... 000 ..... 0000111 @r2_nfvm
-vlhff_v ... 100 . 10000 ..... 101 ..... 0000111 @r2_nfvm
-vlwff_v ... 100 . 10000 ..... 110 ..... 0000111 @r2_nfvm
-vleff_v ... 000 . 10000 ..... 111 ..... 0000111 @r2_nfvm
-vlbuff_v ... 000 . 10000 ..... 000 ..... 0000111 @r2_nfvm
-vlhuff_v ... 000 . 10000 ..... 101 ..... 0000111 @r2_nfvm
-vlwuff_v ... 000 . 10000 ..... 110 ..... 0000111 @r2_nfvm
-
# Vector indexed load insns.
vlxei8_v ... 011 . ..... ..... 000 ..... 0000111 @r_nfvm
vlxei16_v ... 011 . ..... ..... 101 ..... 0000111 @r_nfvm
@@ -269,6 +261,12 @@ vsxei16_v ... 0-1 . ..... ..... 101 ..... 0100111 @r_nfvm
vsxei32_v ... 0-1 . ..... ..... 110 ..... 0100111 @r_nfvm
vsxei64_v ... 0-1 . ..... ..... 111 ..... 0100111 @r_nfvm
+# Vector unit-stride fault-only-first load insns.
+vle8ff_v ... 000 . 10000 ..... 000 ..... 0000111 @r2_nfvm
+vle16ff_v ... 000 . 10000 ..... 101 ..... 0000111 @r2_nfvm
+vle32ff_v ... 000 . 10000 ..... 110 ..... 0000111 @r2_nfvm
+vle64ff_v ... 000 . 10000 ..... 111 ..... 0000111 @r2_nfvm
+
#*** Vector AMO operations are encoded under the standard AMO major opcode ***
vamoswapw_v 00001 . . ..... ..... 110 ..... 0101111 @r_wdvm
vamoaddw_v 00000 . . ..... ..... 110 ..... 0101111 @r_wdvm
@@ -959,24 +959,12 @@ static bool ldff_op(DisasContext *s, arg_r2nfvm *a, uint8_t seq)
{
uint32_t data = 0;
gen_helper_ldst_us *fn;
- static gen_helper_ldst_us * const fns[7][4] = {
- { gen_helper_vlbff_v_b, gen_helper_vlbff_v_h,
- gen_helper_vlbff_v_w, gen_helper_vlbff_v_d },
- { NULL, gen_helper_vlhff_v_h,
- gen_helper_vlhff_v_w, gen_helper_vlhff_v_d },
- { NULL, NULL,
- gen_helper_vlwff_v_w, gen_helper_vlwff_v_d },
- { gen_helper_vleff_v_b, gen_helper_vleff_v_h,
- gen_helper_vleff_v_w, gen_helper_vleff_v_d },
- { gen_helper_vlbuff_v_b, gen_helper_vlbuff_v_h,
- gen_helper_vlbuff_v_w, gen_helper_vlbuff_v_d },
- { NULL, gen_helper_vlhuff_v_h,
- gen_helper_vlhuff_v_w, gen_helper_vlhuff_v_d },
- { NULL, NULL,
- gen_helper_vlwuff_v_w, gen_helper_vlwuff_v_d }
+ static gen_helper_ldst_us * const fns[4] = {
+ gen_helper_vle8ff_v, gen_helper_vle16ff_v,
+ gen_helper_vle32ff_v, gen_helper_vle64ff_v
};
- fn = fns[seq][s->sew];
+ fn = fns[seq];
if (fn == NULL) {
return false;
}
@@ -990,13 +978,10 @@ static bool ldff_op(DisasContext *s, arg_r2nfvm *a, uint8_t seq)
return ldff_trans(a->rd, a->rs1, data, fn, s);
}
-GEN_VEXT_TRANS(vlbff_v, 0, r2nfvm, ldff_op, ld_us_check)
-GEN_VEXT_TRANS(vlhff_v, 1, r2nfvm, ldff_op, ld_us_check)
-GEN_VEXT_TRANS(vlwff_v, 2, r2nfvm, ldff_op, ld_us_check)
-GEN_VEXT_TRANS(vleff_v, 3, r2nfvm, ldff_op, ld_us_check)
-GEN_VEXT_TRANS(vlbuff_v, 4, r2nfvm, ldff_op, ld_us_check)
-GEN_VEXT_TRANS(vlhuff_v, 5, r2nfvm, ldff_op, ld_us_check)
-GEN_VEXT_TRANS(vlwuff_v, 6, r2nfvm, ldff_op, ld_us_check)
+GEN_VEXT_TRANS(vle8ff_v, 8, 0, r2nfvm, ldff_op, ld_us_check)
+GEN_VEXT_TRANS(vle16ff_v, 16, 1, r2nfvm, ldff_op, ld_us_check)
+GEN_VEXT_TRANS(vle32ff_v, 32, 2, r2nfvm, ldff_op, ld_us_check)
+GEN_VEXT_TRANS(vle64ff_v, 64, 3, r2nfvm, ldff_op, ld_us_check)
/*
*** vector atomic operation
@@ -557,7 +557,7 @@ vext_ldff(void *vd, void *v0, target_ulong base,
CPURISCVState *env, uint32_t desc,
vext_ldst_elem_fn *ldst_elem,
clear_fn *clear_elem,
- uint32_t esz, uint32_t msz, uintptr_t ra)
+ uint32_t esz, uintptr_t ra)
{
void *host;
uint32_t i, k, vl = 0;
@@ -572,24 +572,24 @@ vext_ldff(void *vd, void *v0, target_ulong base,
if (!vm && !vext_elem_mask(v0, i)) {
continue;
}
- addr = base + nf * i * msz;
+ addr = base + nf * i * esz;
if (i == 0) {
- probe_pages(env, addr, nf * msz, ra, MMU_DATA_LOAD);
+ probe_pages(env, addr, nf * esz, ra, MMU_DATA_LOAD);
} else {
/* if it triggers an exception, no need to check watchpoint */
- remain = nf * msz;
+ remain = nf * esz;
while (remain > 0) {
offset = -(addr | TARGET_PAGE_MASK);
host = tlb_vaddr_to_host(env, addr, MMU_DATA_LOAD,
cpu_mmu_index(env, false));
if (host) {
#ifdef CONFIG_USER_ONLY
- if (page_check_range(addr, nf * msz, PAGE_READ) < 0) {
+ if (page_check_range(addr, nf * esz, PAGE_READ) < 0) {
vl = i;
goto ProbeSuccess;
}
#else
- probe_pages(env, addr, nf * msz, ra, MMU_DATA_LOAD);
+ probe_pages(env, addr, nf * esz, ra, MMU_DATA_LOAD);
#endif
} else {
vl = i;
@@ -614,7 +614,7 @@ ProbeSuccess:
continue;
}
while (k < nf) {
- target_ulong addr = base + (i * nf + k) * msz;
+ target_ulong addr = base + (i * nf + k) * esz;
ldst_elem(env, addr, i + k * vlmax, vd, ra);
k++;
}
@@ -629,36 +629,18 @@ ProbeSuccess:
}
}
-#define GEN_VEXT_LDFF(NAME, MTYPE, ETYPE, LOAD_FN, CLEAR_FN) \
-void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
- CPURISCVState *env, uint32_t desc) \
-{ \
- vext_ldff(vd, v0, base, env, desc, LOAD_FN, CLEAR_FN, \
- sizeof(ETYPE), sizeof(MTYPE), GETPC()); \
-}
-
-GEN_VEXT_LDFF(vlbff_v_b, int8_t, int8_t, ldb_b, clearb)
-GEN_VEXT_LDFF(vlbff_v_h, int8_t, int16_t, ldb_h, clearh)
-GEN_VEXT_LDFF(vlbff_v_w, int8_t, int32_t, ldb_w, clearl)
-GEN_VEXT_LDFF(vlbff_v_d, int8_t, int64_t, ldb_d, clearq)
-GEN_VEXT_LDFF(vlhff_v_h, int16_t, int16_t, ldh_h, clearh)
-GEN_VEXT_LDFF(vlhff_v_w, int16_t, int32_t, ldh_w, clearl)
-GEN_VEXT_LDFF(vlhff_v_d, int16_t, int64_t, ldh_d, clearq)
-GEN_VEXT_LDFF(vlwff_v_w, int32_t, int32_t, ldw_w, clearl)
-GEN_VEXT_LDFF(vlwff_v_d, int32_t, int64_t, ldw_d, clearq)
-GEN_VEXT_LDFF(vleff_v_b, int8_t, int8_t, lde_b, clearb)
-GEN_VEXT_LDFF(vleff_v_h, int16_t, int16_t, lde_h, clearh)
-GEN_VEXT_LDFF(vleff_v_w, int32_t, int32_t, lde_w, clearl)
-GEN_VEXT_LDFF(vleff_v_d, int64_t, int64_t, lde_d, clearq)
-GEN_VEXT_LDFF(vlbuff_v_b, uint8_t, uint8_t, ldbu_b, clearb)
-GEN_VEXT_LDFF(vlbuff_v_h, uint8_t, uint16_t, ldbu_h, clearh)
-GEN_VEXT_LDFF(vlbuff_v_w, uint8_t, uint32_t, ldbu_w, clearl)
-GEN_VEXT_LDFF(vlbuff_v_d, uint8_t, uint64_t, ldbu_d, clearq)
-GEN_VEXT_LDFF(vlhuff_v_h, uint16_t, uint16_t, ldhu_h, clearh)
-GEN_VEXT_LDFF(vlhuff_v_w, uint16_t, uint32_t, ldhu_w, clearl)
-GEN_VEXT_LDFF(vlhuff_v_d, uint16_t, uint64_t, ldhu_d, clearq)
-GEN_VEXT_LDFF(vlwuff_v_w, uint32_t, uint32_t, ldwu_w, clearl)
-GEN_VEXT_LDFF(vlwuff_v_d, uint32_t, uint64_t, ldwu_d, clearq)
+#define GEN_VEXT_LDFF(NAME, ETYPE, LOAD_FN, CLEAR_FN) \
+void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
+ CPURISCVState *env, uint32_t desc) \
+{ \
+ vext_ldff(vd, v0, base, env, desc, LOAD_FN, CLEAR_FN, \
+ sizeof(ETYPE), GETPC()); \
+}
+
+GEN_VEXT_LDFF(vle8ff_v, int8_t, lde_b, clearb)
+GEN_VEXT_LDFF(vle16ff_v, int16_t, lde_h, clearh)
+GEN_VEXT_LDFF(vle32ff_v, int32_t, lde_w, clearl)
+GEN_VEXT_LDFF(vle64ff_v, int64_t, lde_d, clearq)
/*
*** Vector AMO Operations (Zvamo)