@@ -120,59 +120,51 @@ static bool require_noover(const int8_t dst, const int8_t dst_lmul,
return !is_overlapped(dst, dst_size, src, src_size);
}
-static bool trans_vsetvl(DisasContext *ctx, arg_vsetvl *a)
+static bool do_vsetvl(DisasContext *s, int rd, int rs1, TCGv s2)
{
- TCGv s1, s2, dst;
+ TCGv s1, dst;
- if (!require_rvv(ctx) || !has_ext(ctx, RVV)) {
+ if (!require_rvv(s) || !has_ext(s, RVV)) {
return false;
}
- s2 = get_gpr(ctx, a->rs2, EXT_ZERO);
- dst = dest_gpr(ctx, a->rd);
+ dst = dest_gpr(s, rd);
- /* Using x0 as the rs1 register specifier, encodes an infinite AVL */
- if (a->rs1 == 0) {
+ if (rd == 0 && rs1 == 0) {
+ s1 = tcg_temp_new();
+ tcg_gen_mov_tl(s1, cpu_vl);
+ } else if (rs1 == 0) {
/* As the mask is at least one bit, RV_VLEN_MAX is >= VLMAX */
s1 = tcg_constant_tl(RV_VLEN_MAX);
} else {
- s1 = get_gpr(ctx, a->rs1, EXT_ZERO);
+ s1 = get_gpr(s, rs1, EXT_ZERO);
}
+
gen_helper_vsetvl(dst, cpu_env, s1, s2);
- gen_set_gpr(ctx, a->rd, dst);
- mark_vs_dirty(ctx);
+ gen_set_gpr(s, rd, dst);
+ mark_vs_dirty(s);
- tcg_gen_movi_tl(cpu_pc, ctx->pc_succ_insn);
+ tcg_gen_movi_tl(cpu_pc, s->pc_succ_insn);
tcg_gen_lookup_and_goto_ptr();
- ctx->base.is_jmp = DISAS_NORETURN;
- return true;
-}
+ s->base.is_jmp = DISAS_NORETURN;
-static bool trans_vsetvli(DisasContext *ctx, arg_vsetvli *a)
-{
- TCGv s1, s2, dst;
-
- if (!require_rvv(ctx) || !has_ext(ctx, RVV)) {
- return false;
+ if (rd == 0 && rs1 == 0) {
+ tcg_temp_free(s1);
}
- s2 = tcg_constant_tl(a->zimm);
- dst = dest_gpr(ctx, a->rd);
+ return true;
+}
- /* Using x0 as the rs1 register specifier, encodes an infinite AVL */
- if (a->rs1 == 0) {
- /* As the mask is at least one bit, RV_VLEN_MAX is >= VLMAX */
- s1 = tcg_constant_tl(RV_VLEN_MAX);
- } else {
- s1 = get_gpr(ctx, a->rs1, EXT_ZERO);
- }
- gen_helper_vsetvl(dst, cpu_env, s1, s2);
- gen_set_gpr(ctx, a->rd, dst);
- mark_vs_dirty(ctx);
+static bool trans_vsetvl(DisasContext *s, arg_vsetvl *a)
+{
+ TCGv s2 = get_gpr(s, a->rs2, EXT_ZERO);
+ return do_vsetvl(s, a->rd, a->rs1, s2);
+}
- gen_goto_tb(ctx, 0, ctx->pc_succ_insn);
- ctx->base.is_jmp = DISAS_NORETURN;
- return true;
+static bool trans_vsetvli(DisasContext *s, arg_vsetvli *a)
+{
+ TCGv s2 = tcg_constant_tl(a->zimm);
+ return do_vsetvl(s, a->rd, a->rs1, s2);
}
/* vector register offset from env */
@@ -31,12 +31,24 @@ target_ulong HELPER(vsetvl)(CPURISCVState *env, target_ulong s1,
{
int vlmax, vl;
RISCVCPU *cpu = env_archcpu(env);
+ uint64_t lmul = FIELD_EX64(s2, VTYPE, VLMUL);
uint16_t sew = 8 << FIELD_EX64(s2, VTYPE, VSEW);
uint8_t ediv = FIELD_EX64(s2, VTYPE, VEDIV);
bool vill = FIELD_EX64(s2, VTYPE, VILL);
target_ulong reserved = FIELD_EX64(s2, VTYPE, RESERVED);
- if ((sew > cpu->cfg.elen) || vill || (ediv != 0) || (reserved != 0)) {
+ if (lmul & 4) {
+ /* Fractional LMUL. */
+ if (lmul == 4 ||
+ cpu->cfg.elen >> (8 - lmul) < sew) {
+ vill = true;
+ }
+ }
+
+ if ((sew > cpu->cfg.elen)
+ || vill
+ || (ediv != 0)
+ || (reserved != 0)) {
/* only set vill bit. */
env->vtype = FIELD_DP64(0, VTYPE, VILL, 1);
env->vl = 0;