Message ID | 20211029085922.255197-74-frank.chang@sifive.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | support vector extension v1.0 | expand |
On Fri, Oct 29, 2021 at 8:11 PM <frank.chang@sifive.com> wrote: > > From: Frank Chang <frank.chang@sifive.com> > > Add supports of Vector unit-stride mask load/store instructions > (vlm.v, vsm.v), which has: > evl (effective vector length) = ceil(env->vl / 8). > > The new instructions operate the same as unmasked byte loads and stores. > Add evl parameter to reuse vext_ldst_us(). > > Signed-off-by: Frank Chang <frank.chang@sifive.com> Reviewed-by: Alistair Francis <alistair.francis@wdc.com> Alistair > --- > target/riscv/vector_helper.c | 36 ++++++++++++++++++------------------ > 1 file changed, 18 insertions(+), 18 deletions(-) > > diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c > index 946dca53ffd..83373ca6fc6 100644 > --- a/target/riscv/vector_helper.c > +++ b/target/riscv/vector_helper.c > @@ -279,15 +279,15 @@ GEN_VEXT_ST_STRIDE(vsse64_v, int64_t, ste_d) > /* unmasked unit-stride load and store operation*/ > static void > vext_ldst_us(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc, > - vext_ldst_elem_fn *ldst_elem, > - uint32_t esz, uintptr_t ra, MMUAccessType access_type) > + vext_ldst_elem_fn *ldst_elem, uint32_t esz, uint32_t evl, > + uintptr_t ra, MMUAccessType access_type) > { > uint32_t i, k; > uint32_t nf = vext_nf(desc); > uint32_t max_elems = vext_max_elems(desc, esz); > > /* load bytes from guest memory */ > - for (i = env->vstart; i < env->vl; i++, env->vstart++) { > + for (i = env->vstart; i < evl; i++, env->vstart++) { > k = 0; > while (k < nf) { > target_ulong addr = base + ((i * nf + k) << esz); > @@ -316,7 +316,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong base, \ > CPURISCVState *env, uint32_t desc) \ > { \ > vext_ldst_us(vd, base, env, desc, LOAD_FN, \ > - ctzl(sizeof(ETYPE)), GETPC(), MMU_DATA_LOAD); \ > + ctzl(sizeof(ETYPE)), env->vl, GETPC(), MMU_DATA_LOAD); \ > } > > GEN_VEXT_LD_US(vle8_v, int8_t, lde_b) > @@ -324,20 +324,20 @@ GEN_VEXT_LD_US(vle16_v, int16_t, lde_h) > GEN_VEXT_LD_US(vle32_v, int32_t, lde_w) > GEN_VEXT_LD_US(vle64_v, int64_t, lde_d) > > -#define GEN_VEXT_ST_US(NAME, ETYPE, STORE_FN) \ > -void HELPER(NAME##_mask)(void *vd, void *v0, target_ulong base, \ > - CPURISCVState *env, uint32_t desc) \ > -{ \ > - uint32_t stride = vext_nf(desc) << ctzl(sizeof(ETYPE)); \ > - vext_ldst_stride(vd, v0, base, stride, env, desc, false, STORE_FN, \ > - ctzl(sizeof(ETYPE)), GETPC(), MMU_DATA_STORE); \ > -} \ > - \ > -void HELPER(NAME)(void *vd, void *v0, target_ulong base, \ > - CPURISCVState *env, uint32_t desc) \ > -{ \ > - vext_ldst_us(vd, base, env, desc, STORE_FN, \ > - ctzl(sizeof(ETYPE)), GETPC(), MMU_DATA_STORE); \ > +#define GEN_VEXT_ST_US(NAME, ETYPE, STORE_FN) \ > +void HELPER(NAME##_mask)(void *vd, void *v0, target_ulong base, \ > + CPURISCVState *env, uint32_t desc) \ > +{ \ > + uint32_t stride = vext_nf(desc) << ctzl(sizeof(ETYPE)); \ > + vext_ldst_stride(vd, v0, base, stride, env, desc, false, STORE_FN, \ > + ctzl(sizeof(ETYPE)), GETPC(), MMU_DATA_STORE); \ > +} \ > + \ > +void HELPER(NAME)(void *vd, void *v0, target_ulong base, \ > + CPURISCVState *env, uint32_t desc) \ > +{ \ > + vext_ldst_us(vd, base, env, desc, STORE_FN, \ > + ctzl(sizeof(ETYPE)), env->vl, GETPC(), MMU_DATA_STORE); \ > } > > GEN_VEXT_ST_US(vse8_v, int8_t, ste_b) > -- > 2.25.1 > >
diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c index 946dca53ffd..83373ca6fc6 100644 --- a/target/riscv/vector_helper.c +++ b/target/riscv/vector_helper.c @@ -279,15 +279,15 @@ GEN_VEXT_ST_STRIDE(vsse64_v, int64_t, ste_d) /* unmasked unit-stride load and store operation*/ static void vext_ldst_us(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc, - vext_ldst_elem_fn *ldst_elem, - uint32_t esz, uintptr_t ra, MMUAccessType access_type) + vext_ldst_elem_fn *ldst_elem, uint32_t esz, uint32_t evl, + uintptr_t ra, MMUAccessType access_type) { uint32_t i, k; uint32_t nf = vext_nf(desc); uint32_t max_elems = vext_max_elems(desc, esz); /* load bytes from guest memory */ - for (i = env->vstart; i < env->vl; i++, env->vstart++) { + for (i = env->vstart; i < evl; i++, env->vstart++) { k = 0; while (k < nf) { target_ulong addr = base + ((i * nf + k) << esz); @@ -316,7 +316,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong base, \ CPURISCVState *env, uint32_t desc) \ { \ vext_ldst_us(vd, base, env, desc, LOAD_FN, \ - ctzl(sizeof(ETYPE)), GETPC(), MMU_DATA_LOAD); \ + ctzl(sizeof(ETYPE)), env->vl, GETPC(), MMU_DATA_LOAD); \ } GEN_VEXT_LD_US(vle8_v, int8_t, lde_b) @@ -324,20 +324,20 @@ GEN_VEXT_LD_US(vle16_v, int16_t, lde_h) GEN_VEXT_LD_US(vle32_v, int32_t, lde_w) GEN_VEXT_LD_US(vle64_v, int64_t, lde_d) -#define GEN_VEXT_ST_US(NAME, ETYPE, STORE_FN) \ -void HELPER(NAME##_mask)(void *vd, void *v0, target_ulong base, \ - CPURISCVState *env, uint32_t desc) \ -{ \ - uint32_t stride = vext_nf(desc) << ctzl(sizeof(ETYPE)); \ - vext_ldst_stride(vd, v0, base, stride, env, desc, false, STORE_FN, \ - ctzl(sizeof(ETYPE)), GETPC(), MMU_DATA_STORE); \ -} \ - \ -void HELPER(NAME)(void *vd, void *v0, target_ulong base, \ - CPURISCVState *env, uint32_t desc) \ -{ \ - vext_ldst_us(vd, base, env, desc, STORE_FN, \ - ctzl(sizeof(ETYPE)), GETPC(), MMU_DATA_STORE); \ +#define GEN_VEXT_ST_US(NAME, ETYPE, STORE_FN) \ +void HELPER(NAME##_mask)(void *vd, void *v0, target_ulong base, \ + CPURISCVState *env, uint32_t desc) \ +{ \ + uint32_t stride = vext_nf(desc) << ctzl(sizeof(ETYPE)); \ + vext_ldst_stride(vd, v0, base, stride, env, desc, false, STORE_FN, \ + ctzl(sizeof(ETYPE)), GETPC(), MMU_DATA_STORE); \ +} \ + \ +void HELPER(NAME)(void *vd, void *v0, target_ulong base, \ + CPURISCVState *env, uint32_t desc) \ +{ \ + vext_ldst_us(vd, base, env, desc, STORE_FN, \ + ctzl(sizeof(ETYPE)), env->vl, GETPC(), MMU_DATA_STORE); \ } GEN_VEXT_ST_US(vse8_v, int8_t, ste_b)