Message ID | 20211015074627.3957162-59-frank.chang@sifive.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | support vector extension v1.0 | expand |
On Fri, Oct 15, 2021 at 6:38 PM <frank.chang@sifive.com> wrote: > > From: Frank Chang <frank.chang@sifive.com> > > Add the following instructions: > > * vfslide1up.vf > * vfslide1down.vf > > Signed-off-by: Frank Chang <frank.chang@sifive.com> Reviewed-by: Alistair Francis <alistair.francis@wdc.com> Alistair > --- > target/riscv/helper.h | 7 ++ > target/riscv/insn32.decode | 2 + > target/riscv/insn_trans/trans_rvv.c.inc | 16 +++ > target/riscv/vector_helper.c | 141 ++++++++++++++++-------- > 4 files changed, 121 insertions(+), 45 deletions(-) > > diff --git a/target/riscv/helper.h b/target/riscv/helper.h > index 304c12494d4..012d0343771 100644 > --- a/target/riscv/helper.h > +++ b/target/riscv/helper.h > @@ -1071,6 +1071,13 @@ DEF_HELPER_6(vslide1down_vx_h, void, ptr, ptr, tl, ptr, env, i32) > DEF_HELPER_6(vslide1down_vx_w, void, ptr, ptr, tl, ptr, env, i32) > DEF_HELPER_6(vslide1down_vx_d, void, ptr, ptr, tl, ptr, env, i32) > > +DEF_HELPER_6(vfslide1up_vf_h, void, ptr, ptr, i64, ptr, env, i32) > +DEF_HELPER_6(vfslide1up_vf_w, void, ptr, ptr, i64, ptr, env, i32) > +DEF_HELPER_6(vfslide1up_vf_d, void, ptr, ptr, i64, ptr, env, i32) > +DEF_HELPER_6(vfslide1down_vf_h, void, ptr, ptr, i64, ptr, env, i32) > +DEF_HELPER_6(vfslide1down_vf_w, void, ptr, ptr, i64, ptr, env, i32) > +DEF_HELPER_6(vfslide1down_vf_d, void, ptr, ptr, i64, ptr, env, i32) > + > DEF_HELPER_6(vrgather_vv_b, void, ptr, ptr, ptr, ptr, env, i32) > DEF_HELPER_6(vrgather_vv_h, void, ptr, ptr, ptr, ptr, env, i32) > DEF_HELPER_6(vrgather_vv_w, void, ptr, ptr, ptr, ptr, env, i32) > diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode > index 7548b71efdb..c5cc14c45c4 100644 > --- a/target/riscv/insn32.decode > +++ b/target/riscv/insn32.decode > @@ -577,6 +577,8 @@ vfsgnjn_vv 001001 . ..... ..... 001 ..... 1010111 @r_vm > vfsgnjn_vf 001001 . ..... ..... 101 ..... 1010111 @r_vm > vfsgnjx_vv 001010 . ..... ..... 001 ..... 1010111 @r_vm > vfsgnjx_vf 001010 . ..... ..... 101 ..... 1010111 @r_vm > +vfslide1up_vf 001110 . ..... ..... 101 ..... 1010111 @r_vm > +vfslide1down_vf 001111 . ..... ..... 101 ..... 1010111 @r_vm > vmfeq_vv 011000 . ..... ..... 001 ..... 1010111 @r_vm > vmfeq_vf 011000 . ..... ..... 101 ..... 1010111 @r_vm > vmfne_vv 011100 . ..... ..... 001 ..... 1010111 @r_vm > diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc > index e59fc5a01d8..7ee1e122e8e 100644 > --- a/target/riscv/insn_trans/trans_rvv.c.inc > +++ b/target/riscv/insn_trans/trans_rvv.c.inc > @@ -3120,6 +3120,22 @@ GEN_OPIVX_TRANS(vslidedown_vx, slidedown_check) > GEN_OPIVX_TRANS(vslide1down_vx, slidedown_check) > GEN_OPIVI_TRANS(vslidedown_vi, IMM_ZX, vslidedown_vx, slidedown_check) > > +/* Vector Floating-Point Slide Instructions */ > +static bool fslideup_check(DisasContext *s, arg_rmrr *a) > +{ > + return slideup_check(s, a) && > + require_rvf(s); > +} > + > +static bool fslidedown_check(DisasContext *s, arg_rmrr *a) > +{ > + return slidedown_check(s, a) && > + require_rvf(s); > +} > + > +GEN_OPFVF_TRANS(vfslide1up_vf, fslideup_check) > +GEN_OPFVF_TRANS(vfslide1down_vf, fslidedown_check) > + > /* Vector Register Gather Instruction */ > static bool vrgather_vv_check(DisasContext *s, arg_rmrr *a) > { > diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c > index d79f59e443e..7fa5189af4e 100644 > --- a/target/riscv/vector_helper.c > +++ b/target/riscv/vector_helper.c > @@ -4455,57 +4455,108 @@ GEN_VEXT_VSLIDEDOWN_VX(vslidedown_vx_h, uint16_t, H2) > GEN_VEXT_VSLIDEDOWN_VX(vslidedown_vx_w, uint32_t, H4) > GEN_VEXT_VSLIDEDOWN_VX(vslidedown_vx_d, uint64_t, H8) > > -#define GEN_VEXT_VSLIDE1UP_VX(NAME, ETYPE, H) \ > -void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \ > - CPURISCVState *env, uint32_t desc) \ > -{ \ > - uint32_t vm = vext_vm(desc); \ > - uint32_t vl = env->vl; \ > - uint32_t i; \ > - \ > - for (i = 0; i < vl; i++) { \ > - if (!vm && !vext_elem_mask(v0, i)) { \ > - continue; \ > - } \ > - if (i == 0) { \ > - *((ETYPE *)vd + H(i)) = s1; \ > - } else { \ > - *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i - 1)); \ > - } \ > - } \ > +#define GEN_VEXT_VSLIE1UP(ESZ, H) \ > +static void vslide1up_##ESZ(void *vd, void *v0, target_ulong s1, void *vs2, \ > + CPURISCVState *env, uint32_t desc) \ > +{ \ > + typedef uint##ESZ##_t ETYPE; \ > + uint32_t vm = vext_vm(desc); \ > + uint32_t vl = env->vl; \ > + uint32_t i; \ > + \ > + for (i = 0; i < vl; i++) { \ > + if (!vm && !vext_elem_mask(v0, i)) { \ > + continue; \ > + } \ > + if (i == 0) { \ > + *((ETYPE *)vd + H(i)) = s1; \ > + } else { \ > + *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i - 1)); \ > + } \ > + } \ > +} > + > +GEN_VEXT_VSLIE1UP(8, H1) > +GEN_VEXT_VSLIE1UP(16, H2) > +GEN_VEXT_VSLIE1UP(32, H4) > +GEN_VEXT_VSLIE1UP(64, H8) > + > +#define GEN_VEXT_VSLIDE1UP_VX(NAME, ESZ) \ > +void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \ > + CPURISCVState *env, uint32_t desc) \ > +{ \ > + vslide1up_##ESZ(vd, v0, s1, vs2, env, desc); \ > } > > /* vslide1up.vx vd, vs2, rs1, vm # vd[0]=x[rs1], vd[i+1] = vs2[i] */ > -GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_b, uint8_t, H1) > -GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_h, uint16_t, H2) > -GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_w, uint32_t, H4) > -GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_d, uint64_t, H8) > - > -#define GEN_VEXT_VSLIDE1DOWN_VX(NAME, ETYPE, H) \ > -void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \ > - CPURISCVState *env, uint32_t desc) \ > -{ \ > - uint32_t vm = vext_vm(desc); \ > - uint32_t vl = env->vl; \ > - uint32_t i; \ > - \ > - for (i = 0; i < vl; i++) { \ > - if (!vm && !vext_elem_mask(v0, i)) { \ > - continue; \ > - } \ > - if (i == vl - 1) { \ > - *((ETYPE *)vd + H(i)) = s1; \ > - } else { \ > - *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i + 1)); \ > - } \ > - } \ > +GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_b, 8) > +GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_h, 16) > +GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_w, 32) > +GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_d, 64) > + > +#define GEN_VEXT_VSLIDE1DOWN(ESZ, H) \ > +static void vslide1down_##ESZ(void *vd, void *v0, target_ulong s1, void *vs2, \ > + CPURISCVState *env, uint32_t desc) \ > +{ \ > + typedef uint##ESZ##_t ETYPE; \ > + uint32_t vm = vext_vm(desc); \ > + uint32_t vl = env->vl; \ > + uint32_t i; \ > + \ > + for (i = 0; i < vl; i++) { \ > + if (!vm && !vext_elem_mask(v0, i)) { \ > + continue; \ > + } \ > + if (i == vl - 1) { \ > + *((ETYPE *)vd + H(i)) = s1; \ > + } else { \ > + *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i + 1)); \ > + } \ > + } \ > +} > + > +GEN_VEXT_VSLIDE1DOWN(8, H1) > +GEN_VEXT_VSLIDE1DOWN(16, H2) > +GEN_VEXT_VSLIDE1DOWN(32, H4) > +GEN_VEXT_VSLIDE1DOWN(64, H8) > + > +#define GEN_VEXT_VSLIDE1DOWN_VX(NAME, ESZ) \ > +void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \ > + CPURISCVState *env, uint32_t desc) \ > +{ \ > + vslide1down_##ESZ(vd, v0, s1, vs2, env, desc); \ > } > > /* vslide1down.vx vd, vs2, rs1, vm # vd[i] = vs2[i+1], vd[vl-1]=x[rs1] */ > -GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_b, uint8_t, H1) > -GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_h, uint16_t, H2) > -GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_w, uint32_t, H4) > -GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_d, uint64_t, H8) > +GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_b, 8) > +GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_h, 16) > +GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_w, 32) > +GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_d, 64) > + > +/* Vector Floating-Point Slide Instructions */ > +#define GEN_VEXT_VFSLIDE1UP_VF(NAME, ESZ) \ > +void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \ > + CPURISCVState *env, uint32_t desc) \ > +{ \ > + vslide1up_##ESZ(vd, v0, s1, vs2, env, desc); \ > +} > + > +/* vfslide1up.vf vd, vs2, rs1, vm # vd[0]=f[rs1], vd[i+1] = vs2[i] */ > +GEN_VEXT_VFSLIDE1UP_VF(vfslide1up_vf_h, 16) > +GEN_VEXT_VFSLIDE1UP_VF(vfslide1up_vf_w, 32) > +GEN_VEXT_VFSLIDE1UP_VF(vfslide1up_vf_d, 64) > + > +#define GEN_VEXT_VFSLIDE1DOWN_VF(NAME, ESZ) \ > +void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \ > + CPURISCVState *env, uint32_t desc) \ > +{ \ > + vslide1down_##ESZ(vd, v0, s1, vs2, env, desc); \ > +} > + > +/* vfslide1down.vf vd, vs2, rs1, vm # vd[i] = vs2[i+1], vd[vl-1]=f[rs1] */ > +GEN_VEXT_VFSLIDE1DOWN_VF(vfslide1down_vf_h, 16) > +GEN_VEXT_VFSLIDE1DOWN_VF(vfslide1down_vf_w, 32) > +GEN_VEXT_VFSLIDE1DOWN_VF(vfslide1down_vf_d, 64) > > /* Vector Register Gather Instruction */ > #define GEN_VEXT_VRGATHER_VV(NAME, TS1, TS2, HS1, HS2) \ > -- > 2.25.1 > >
diff --git a/target/riscv/helper.h b/target/riscv/helper.h index 304c12494d4..012d0343771 100644 --- a/target/riscv/helper.h +++ b/target/riscv/helper.h @@ -1071,6 +1071,13 @@ DEF_HELPER_6(vslide1down_vx_h, void, ptr, ptr, tl, ptr, env, i32) DEF_HELPER_6(vslide1down_vx_w, void, ptr, ptr, tl, ptr, env, i32) DEF_HELPER_6(vslide1down_vx_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vfslide1up_vf_h, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfslide1up_vf_w, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfslide1up_vf_d, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfslide1down_vf_h, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfslide1down_vf_w, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfslide1down_vf_d, void, ptr, ptr, i64, ptr, env, i32) + DEF_HELPER_6(vrgather_vv_b, void, ptr, ptr, ptr, ptr, env, i32) DEF_HELPER_6(vrgather_vv_h, void, ptr, ptr, ptr, ptr, env, i32) DEF_HELPER_6(vrgather_vv_w, void, ptr, ptr, ptr, ptr, env, i32) diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode index 7548b71efdb..c5cc14c45c4 100644 --- a/target/riscv/insn32.decode +++ b/target/riscv/insn32.decode @@ -577,6 +577,8 @@ vfsgnjn_vv 001001 . ..... ..... 001 ..... 1010111 @r_vm vfsgnjn_vf 001001 . ..... ..... 101 ..... 1010111 @r_vm vfsgnjx_vv 001010 . ..... ..... 001 ..... 1010111 @r_vm vfsgnjx_vf 001010 . ..... ..... 101 ..... 1010111 @r_vm +vfslide1up_vf 001110 . ..... ..... 101 ..... 1010111 @r_vm +vfslide1down_vf 001111 . ..... ..... 101 ..... 1010111 @r_vm vmfeq_vv 011000 . ..... ..... 001 ..... 1010111 @r_vm vmfeq_vf 011000 . ..... ..... 101 ..... 1010111 @r_vm vmfne_vv 011100 . ..... ..... 001 ..... 1010111 @r_vm diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc index e59fc5a01d8..7ee1e122e8e 100644 --- a/target/riscv/insn_trans/trans_rvv.c.inc +++ b/target/riscv/insn_trans/trans_rvv.c.inc @@ -3120,6 +3120,22 @@ GEN_OPIVX_TRANS(vslidedown_vx, slidedown_check) GEN_OPIVX_TRANS(vslide1down_vx, slidedown_check) GEN_OPIVI_TRANS(vslidedown_vi, IMM_ZX, vslidedown_vx, slidedown_check) +/* Vector Floating-Point Slide Instructions */ +static bool fslideup_check(DisasContext *s, arg_rmrr *a) +{ + return slideup_check(s, a) && + require_rvf(s); +} + +static bool fslidedown_check(DisasContext *s, arg_rmrr *a) +{ + return slidedown_check(s, a) && + require_rvf(s); +} + +GEN_OPFVF_TRANS(vfslide1up_vf, fslideup_check) +GEN_OPFVF_TRANS(vfslide1down_vf, fslidedown_check) + /* Vector Register Gather Instruction */ static bool vrgather_vv_check(DisasContext *s, arg_rmrr *a) { diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c index d79f59e443e..7fa5189af4e 100644 --- a/target/riscv/vector_helper.c +++ b/target/riscv/vector_helper.c @@ -4455,57 +4455,108 @@ GEN_VEXT_VSLIDEDOWN_VX(vslidedown_vx_h, uint16_t, H2) GEN_VEXT_VSLIDEDOWN_VX(vslidedown_vx_w, uint32_t, H4) GEN_VEXT_VSLIDEDOWN_VX(vslidedown_vx_d, uint64_t, H8) -#define GEN_VEXT_VSLIDE1UP_VX(NAME, ETYPE, H) \ -void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \ - CPURISCVState *env, uint32_t desc) \ -{ \ - uint32_t vm = vext_vm(desc); \ - uint32_t vl = env->vl; \ - uint32_t i; \ - \ - for (i = 0; i < vl; i++) { \ - if (!vm && !vext_elem_mask(v0, i)) { \ - continue; \ - } \ - if (i == 0) { \ - *((ETYPE *)vd + H(i)) = s1; \ - } else { \ - *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i - 1)); \ - } \ - } \ +#define GEN_VEXT_VSLIE1UP(ESZ, H) \ +static void vslide1up_##ESZ(void *vd, void *v0, target_ulong s1, void *vs2, \ + CPURISCVState *env, uint32_t desc) \ +{ \ + typedef uint##ESZ##_t ETYPE; \ + uint32_t vm = vext_vm(desc); \ + uint32_t vl = env->vl; \ + uint32_t i; \ + \ + for (i = 0; i < vl; i++) { \ + if (!vm && !vext_elem_mask(v0, i)) { \ + continue; \ + } \ + if (i == 0) { \ + *((ETYPE *)vd + H(i)) = s1; \ + } else { \ + *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i - 1)); \ + } \ + } \ +} + +GEN_VEXT_VSLIE1UP(8, H1) +GEN_VEXT_VSLIE1UP(16, H2) +GEN_VEXT_VSLIE1UP(32, H4) +GEN_VEXT_VSLIE1UP(64, H8) + +#define GEN_VEXT_VSLIDE1UP_VX(NAME, ESZ) \ +void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \ + CPURISCVState *env, uint32_t desc) \ +{ \ + vslide1up_##ESZ(vd, v0, s1, vs2, env, desc); \ } /* vslide1up.vx vd, vs2, rs1, vm # vd[0]=x[rs1], vd[i+1] = vs2[i] */ -GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_b, uint8_t, H1) -GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_h, uint16_t, H2) -GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_w, uint32_t, H4) -GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_d, uint64_t, H8) - -#define GEN_VEXT_VSLIDE1DOWN_VX(NAME, ETYPE, H) \ -void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \ - CPURISCVState *env, uint32_t desc) \ -{ \ - uint32_t vm = vext_vm(desc); \ - uint32_t vl = env->vl; \ - uint32_t i; \ - \ - for (i = 0; i < vl; i++) { \ - if (!vm && !vext_elem_mask(v0, i)) { \ - continue; \ - } \ - if (i == vl - 1) { \ - *((ETYPE *)vd + H(i)) = s1; \ - } else { \ - *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i + 1)); \ - } \ - } \ +GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_b, 8) +GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_h, 16) +GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_w, 32) +GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_d, 64) + +#define GEN_VEXT_VSLIDE1DOWN(ESZ, H) \ +static void vslide1down_##ESZ(void *vd, void *v0, target_ulong s1, void *vs2, \ + CPURISCVState *env, uint32_t desc) \ +{ \ + typedef uint##ESZ##_t ETYPE; \ + uint32_t vm = vext_vm(desc); \ + uint32_t vl = env->vl; \ + uint32_t i; \ + \ + for (i = 0; i < vl; i++) { \ + if (!vm && !vext_elem_mask(v0, i)) { \ + continue; \ + } \ + if (i == vl - 1) { \ + *((ETYPE *)vd + H(i)) = s1; \ + } else { \ + *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i + 1)); \ + } \ + } \ +} + +GEN_VEXT_VSLIDE1DOWN(8, H1) +GEN_VEXT_VSLIDE1DOWN(16, H2) +GEN_VEXT_VSLIDE1DOWN(32, H4) +GEN_VEXT_VSLIDE1DOWN(64, H8) + +#define GEN_VEXT_VSLIDE1DOWN_VX(NAME, ESZ) \ +void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \ + CPURISCVState *env, uint32_t desc) \ +{ \ + vslide1down_##ESZ(vd, v0, s1, vs2, env, desc); \ } /* vslide1down.vx vd, vs2, rs1, vm # vd[i] = vs2[i+1], vd[vl-1]=x[rs1] */ -GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_b, uint8_t, H1) -GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_h, uint16_t, H2) -GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_w, uint32_t, H4) -GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_d, uint64_t, H8) +GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_b, 8) +GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_h, 16) +GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_w, 32) +GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_d, 64) + +/* Vector Floating-Point Slide Instructions */ +#define GEN_VEXT_VFSLIDE1UP_VF(NAME, ESZ) \ +void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \ + CPURISCVState *env, uint32_t desc) \ +{ \ + vslide1up_##ESZ(vd, v0, s1, vs2, env, desc); \ +} + +/* vfslide1up.vf vd, vs2, rs1, vm # vd[0]=f[rs1], vd[i+1] = vs2[i] */ +GEN_VEXT_VFSLIDE1UP_VF(vfslide1up_vf_h, 16) +GEN_VEXT_VFSLIDE1UP_VF(vfslide1up_vf_w, 32) +GEN_VEXT_VFSLIDE1UP_VF(vfslide1up_vf_d, 64) + +#define GEN_VEXT_VFSLIDE1DOWN_VF(NAME, ESZ) \ +void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \ + CPURISCVState *env, uint32_t desc) \ +{ \ + vslide1down_##ESZ(vd, v0, s1, vs2, env, desc); \ +} + +/* vfslide1down.vf vd, vs2, rs1, vm # vd[i] = vs2[i+1], vd[vl-1]=f[rs1] */ +GEN_VEXT_VFSLIDE1DOWN_VF(vfslide1down_vf_h, 16) +GEN_VEXT_VFSLIDE1DOWN_VF(vfslide1down_vf_w, 32) +GEN_VEXT_VFSLIDE1DOWN_VF(vfslide1down_vf_d, 64) /* Vector Register Gather Instruction */ #define GEN_VEXT_VRGATHER_VV(NAME, TS1, TS2, HS1, HS2) \