Message ID | 20240522091305.1858-1-zhiwei_liu@linux.alibaba.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | [1/1] target/riscv: Support Zama16b extension | expand |
On 5/22/24 06:13, LIU Zhiwei wrote: > Zama16b is the property that misaligned load/stores/atomics within > a naturally aligned 16-byte region are atomic. > > According to the specification, Zama16b applies only to AMOs, loads > and stores defined in the base ISAs, and loads and stores of no more > than XLEN bits defined in the F, D, and Q extensions. Thus it should > not apply to zacas or RVC instructions. > > For an instruction in that set, if all accessed bytes lie within 16B granule, > the instruction will not raise an exception for reasons of address alignment, > and the instruction will give rise to only one memory operation for the > purposes of RVWMO—i.e., it will execute atomically. > > Signed-off-by: LIU Zhiwei <zhiwei_liu@linux.alibaba.com> > --- > target/riscv/cpu.c | 2 ++ > target/riscv/cpu_cfg.h | 1 + > target/riscv/insn_trans/trans_rva.c.inc | 42 ++++++++++++++----------- > target/riscv/insn_trans/trans_rvd.c.inc | 14 +++++++-- > target/riscv/insn_trans/trans_rvf.c.inc | 14 +++++++-- > target/riscv/insn_trans/trans_rvi.c.inc | 6 ++++ > 6 files changed, 57 insertions(+), 22 deletions(-) > > diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c > index eb1a2e7d6d..911e9892ed 100644 > --- a/target/riscv/cpu.c > +++ b/target/riscv/cpu.c > @@ -117,6 +117,7 @@ const RISCVIsaExtData isa_edata_arr[] = { > ISA_EXT_DATA_ENTRY(za64rs, PRIV_VERSION_1_12_0, has_priv_1_11), > ISA_EXT_DATA_ENTRY(zaamo, PRIV_VERSION_1_12_0, ext_zaamo), > ISA_EXT_DATA_ENTRY(zacas, PRIV_VERSION_1_12_0, ext_zacas), > + ISA_EXT_DATA_ENTRY(zama16b, PRIV_VERSION_1_12_0, ext_zama16b), Is this the right order? Shouldn't it be after zalrsc? LGTM otherwise. Thanks, Daniel > ISA_EXT_DATA_ENTRY(zalrsc, PRIV_VERSION_1_12_0, ext_zalrsc), > ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs), > ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa), > @@ -1464,6 +1465,7 @@ const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = { > MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true), > MULTI_EXT_CFG_BOOL("zihintpause", ext_zihintpause, true), > MULTI_EXT_CFG_BOOL("zacas", ext_zacas, false), > + MULTI_EXT_CFG_BOOL("zama16b", ext_zama16b, false), > MULTI_EXT_CFG_BOOL("zaamo", ext_zaamo, false), > MULTI_EXT_CFG_BOOL("zalrsc", ext_zalrsc, false), > MULTI_EXT_CFG_BOOL("zawrs", ext_zawrs, true), > diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h > index cb750154bd..eaa66eb4f8 100644 > --- a/target/riscv/cpu_cfg.h > +++ b/target/riscv/cpu_cfg.h > @@ -81,6 +81,7 @@ struct RISCVCPUConfig { > bool ext_zdinx; > bool ext_zaamo; > bool ext_zacas; > + bool ext_zama16b; > bool ext_zalrsc; > bool ext_zawrs; > bool ext_zfa; > diff --git a/target/riscv/insn_trans/trans_rva.c.inc b/target/riscv/insn_trans/trans_rva.c.inc > index 4a9e4591d1..eb080baddd 100644 > --- a/target/riscv/insn_trans/trans_rva.c.inc > +++ b/target/riscv/insn_trans/trans_rva.c.inc > @@ -103,6 +103,12 @@ static bool gen_amo(DisasContext *ctx, arg_atomic *a, > TCGv dest = dest_gpr(ctx, a->rd); > TCGv src1, src2 = get_gpr(ctx, a->rs2, EXT_NONE); > > + if (ctx->cfg_ptr->ext_zama16b) { > + mop |= MO_ATOM_WITHIN16; > + } else { > + mop |= MO_ALIGN; > + } > + > decode_save_opc(ctx); > src1 = get_address(ctx, a->rs1, 0); > func(dest, src1, src2, ctx->mem_idx, mop); > @@ -126,55 +132,55 @@ static bool trans_sc_w(DisasContext *ctx, arg_sc_w *a) > static bool trans_amoswap_w(DisasContext *ctx, arg_amoswap_w *a) > { > REQUIRE_A_OR_ZAAMO(ctx); > - return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, (MO_ALIGN | MO_TESL)); > + return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, MO_TESL); > } > > static bool trans_amoadd_w(DisasContext *ctx, arg_amoadd_w *a) > { > REQUIRE_A_OR_ZAAMO(ctx); > - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, (MO_ALIGN | MO_TESL)); > + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, MO_TESL); > } > > static bool trans_amoxor_w(DisasContext *ctx, arg_amoxor_w *a) > { > REQUIRE_A_OR_ZAAMO(ctx); > - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, (MO_ALIGN | MO_TESL)); > + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, MO_TESL); > } > > static bool trans_amoand_w(DisasContext *ctx, arg_amoand_w *a) > { > REQUIRE_A_OR_ZAAMO(ctx); > - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, (MO_ALIGN | MO_TESL)); > + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, MO_TESL); > } > > static bool trans_amoor_w(DisasContext *ctx, arg_amoor_w *a) > { > REQUIRE_A_OR_ZAAMO(ctx); > - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, (MO_ALIGN | MO_TESL)); > + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, MO_TESL); > } > > static bool trans_amomin_w(DisasContext *ctx, arg_amomin_w *a) > { > REQUIRE_A_OR_ZAAMO(ctx); > - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, (MO_ALIGN | MO_TESL)); > + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, MO_TESL); > } > > static bool trans_amomax_w(DisasContext *ctx, arg_amomax_w *a) > { > REQUIRE_A_OR_ZAAMO(ctx); > - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, (MO_ALIGN | MO_TESL)); > + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, MO_TESL); > } > > static bool trans_amominu_w(DisasContext *ctx, arg_amominu_w *a) > { > REQUIRE_A_OR_ZAAMO(ctx); > - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, (MO_ALIGN | MO_TESL)); > + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, MO_TESL); > } > > static bool trans_amomaxu_w(DisasContext *ctx, arg_amomaxu_w *a) > { > REQUIRE_A_OR_ZAAMO(ctx); > - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, (MO_ALIGN | MO_TESL)); > + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, MO_TESL); > } > > static bool trans_lr_d(DisasContext *ctx, arg_lr_d *a) > @@ -195,61 +201,61 @@ static bool trans_amoswap_d(DisasContext *ctx, arg_amoswap_d *a) > { > REQUIRE_64BIT(ctx); > REQUIRE_A_OR_ZAAMO(ctx); > - return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, (MO_ALIGN | MO_TEUQ)); > + return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, MO_TEUQ); > } > > static bool trans_amoadd_d(DisasContext *ctx, arg_amoadd_d *a) > { > REQUIRE_64BIT(ctx); > REQUIRE_A_OR_ZAAMO(ctx); > - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, (MO_ALIGN | MO_TEUQ)); > + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, MO_TEUQ); > } > > static bool trans_amoxor_d(DisasContext *ctx, arg_amoxor_d *a) > { > REQUIRE_64BIT(ctx); > REQUIRE_A_OR_ZAAMO(ctx); > - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, (MO_ALIGN | MO_TEUQ)); > + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, MO_TEUQ); > } > > static bool trans_amoand_d(DisasContext *ctx, arg_amoand_d *a) > { > REQUIRE_64BIT(ctx); > REQUIRE_A_OR_ZAAMO(ctx); > - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, (MO_ALIGN | MO_TEUQ)); > + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, MO_TEUQ); > } > > static bool trans_amoor_d(DisasContext *ctx, arg_amoor_d *a) > { > REQUIRE_64BIT(ctx); > REQUIRE_A_OR_ZAAMO(ctx); > - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, (MO_ALIGN | MO_TEUQ)); > + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, MO_TEUQ); > } > > static bool trans_amomin_d(DisasContext *ctx, arg_amomin_d *a) > { > REQUIRE_64BIT(ctx); > REQUIRE_A_OR_ZAAMO(ctx); > - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, (MO_ALIGN | MO_TEUQ)); > + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, MO_TEUQ); > } > > static bool trans_amomax_d(DisasContext *ctx, arg_amomax_d *a) > { > REQUIRE_64BIT(ctx); > REQUIRE_A_OR_ZAAMO(ctx); > - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, (MO_ALIGN | MO_TEUQ)); > + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, MO_TEUQ); > } > > static bool trans_amominu_d(DisasContext *ctx, arg_amominu_d *a) > { > REQUIRE_64BIT(ctx); > REQUIRE_A_OR_ZAAMO(ctx); > - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, (MO_ALIGN | MO_TEUQ)); > + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, MO_TEUQ); > } > > static bool trans_amomaxu_d(DisasContext *ctx, arg_amomaxu_d *a) > { > REQUIRE_64BIT(ctx); > REQUIRE_A_OR_ZAAMO(ctx); > - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, (MO_ALIGN | MO_TEUQ)); > + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, MO_TEUQ); > } > diff --git a/target/riscv/insn_trans/trans_rvd.c.inc b/target/riscv/insn_trans/trans_rvd.c.inc > index d9ce9e407f..1f5fac65a2 100644 > --- a/target/riscv/insn_trans/trans_rvd.c.inc > +++ b/target/riscv/insn_trans/trans_rvd.c.inc > @@ -42,13 +42,18 @@ > static bool trans_fld(DisasContext *ctx, arg_fld *a) > { > TCGv addr; > + MemOp memop = MO_TEUQ; > > REQUIRE_FPU; > REQUIRE_EXT(ctx, RVD); > > + if (ctx->cfg_ptr->ext_zama16b && (ctx->cur_insn_len != 2)) { > + memop |= MO_ATOM_WITHIN16; > + } > + > decode_save_opc(ctx); > addr = get_address(ctx, a->rs1, a->imm); > - tcg_gen_qemu_ld_i64(cpu_fpr[a->rd], addr, ctx->mem_idx, MO_TEUQ); > + tcg_gen_qemu_ld_i64(cpu_fpr[a->rd], addr, ctx->mem_idx, memop); > > mark_fs_dirty(ctx); > return true; > @@ -57,13 +62,18 @@ static bool trans_fld(DisasContext *ctx, arg_fld *a) > static bool trans_fsd(DisasContext *ctx, arg_fsd *a) > { > TCGv addr; > + MemOp memop = MO_TEUQ; > > REQUIRE_FPU; > REQUIRE_EXT(ctx, RVD); > > + if (ctx->cfg_ptr->ext_zama16b && (ctx->cur_insn_len != 2)) { > + memop |= MO_ATOM_WITHIN16; > + } > + > decode_save_opc(ctx); > addr = get_address(ctx, a->rs1, a->imm); > - tcg_gen_qemu_st_i64(cpu_fpr[a->rs2], addr, ctx->mem_idx, MO_TEUQ); > + tcg_gen_qemu_st_i64(cpu_fpr[a->rs2], addr, ctx->mem_idx, memop); > return true; > } > > diff --git a/target/riscv/insn_trans/trans_rvf.c.inc b/target/riscv/insn_trans/trans_rvf.c.inc > index 97a368970b..f771aa1939 100644 > --- a/target/riscv/insn_trans/trans_rvf.c.inc > +++ b/target/riscv/insn_trans/trans_rvf.c.inc > @@ -43,14 +43,19 @@ static bool trans_flw(DisasContext *ctx, arg_flw *a) > { > TCGv_i64 dest; > TCGv addr; > + MemOp memop = MO_TEUL; > > REQUIRE_FPU; > REQUIRE_EXT(ctx, RVF); > > + if (ctx->cfg_ptr->ext_zama16b && (ctx->cur_insn_len != 2)) { > + memop |= MO_ATOM_WITHIN16; > + } > + > decode_save_opc(ctx); > addr = get_address(ctx, a->rs1, a->imm); > dest = cpu_fpr[a->rd]; > - tcg_gen_qemu_ld_i64(dest, addr, ctx->mem_idx, MO_TEUL); > + tcg_gen_qemu_ld_i64(dest, addr, ctx->mem_idx, memop); > gen_nanbox_s(dest, dest); > > mark_fs_dirty(ctx); > @@ -60,13 +65,18 @@ static bool trans_flw(DisasContext *ctx, arg_flw *a) > static bool trans_fsw(DisasContext *ctx, arg_fsw *a) > { > TCGv addr; > + MemOp memop = MO_TEUL; > > REQUIRE_FPU; > REQUIRE_EXT(ctx, RVF); > > + if (ctx->cfg_ptr->ext_zama16b && (ctx->cur_insn_len != 2)) { > + memop |= MO_ATOM_WITHIN16; > + } > + > decode_save_opc(ctx); > addr = get_address(ctx, a->rs1, a->imm); > - tcg_gen_qemu_st_i64(cpu_fpr[a->rs2], addr, ctx->mem_idx, MO_TEUL); > + tcg_gen_qemu_st_i64(cpu_fpr[a->rs2], addr, ctx->mem_idx, memop); > return true; > } > > diff --git a/target/riscv/insn_trans/trans_rvi.c.inc b/target/riscv/insn_trans/trans_rvi.c.inc > index ad40d3e87f..98e3806d5e 100644 > --- a/target/riscv/insn_trans/trans_rvi.c.inc > +++ b/target/riscv/insn_trans/trans_rvi.c.inc > @@ -268,6 +268,9 @@ static bool gen_load(DisasContext *ctx, arg_lb *a, MemOp memop) > { > bool out; > > + if (ctx->cfg_ptr->ext_zama16b && (ctx->cur_insn_len != 2)) { > + memop |= MO_ATOM_WITHIN16; > + } > decode_save_opc(ctx); > if (get_xl(ctx) == MXL_RV128) { > out = gen_load_i128(ctx, a, memop); > @@ -366,6 +369,9 @@ static bool gen_store_i128(DisasContext *ctx, arg_sb *a, MemOp memop) > > static bool gen_store(DisasContext *ctx, arg_sb *a, MemOp memop) > { > + if (ctx->cfg_ptr->ext_zama16b && (ctx->cur_insn_len != 2)) { > + memop |= MO_ATOM_WITHIN16; > + } > decode_save_opc(ctx); > if (get_xl(ctx) == MXL_RV128) { > return gen_store_i128(ctx, a, memop);
Zhiwei, On 5/22/24 06:13, LIU Zhiwei wrote: > Zama16b is the property that misaligned load/stores/atomics within > a naturally aligned 16-byte region are atomic. > > According to the specification, Zama16b applies only to AMOs, loads > and stores defined in the base ISAs, and loads and stores of no more > than XLEN bits defined in the F, D, and Q extensions. Thus it should > not apply to zacas or RVC instructions. > > For an instruction in that set, if all accessed bytes lie within 16B granule, > the instruction will not raise an exception for reasons of address alignment, > and the instruction will give rise to only one memory operation for the > purposes of RVWMO—i.e., it will execute atomically. > > Signed-off-by: LIU Zhiwei <zhiwei_liu@linux.alibaba.com> > --- > target/riscv/cpu.c | 2 ++ > target/riscv/cpu_cfg.h | 1 + > target/riscv/insn_trans/trans_rva.c.inc | 42 ++++++++++++++----------- > target/riscv/insn_trans/trans_rvd.c.inc | 14 +++++++-- > target/riscv/insn_trans/trans_rvf.c.inc | 14 +++++++-- > target/riscv/insn_trans/trans_rvi.c.inc | 6 ++++ > 6 files changed, 57 insertions(+), 22 deletions(-) > > diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c > index eb1a2e7d6d..911e9892ed 100644 > --- a/target/riscv/cpu.c > +++ b/target/riscv/cpu.c > @@ -117,6 +117,7 @@ const RISCVIsaExtData isa_edata_arr[] = { > ISA_EXT_DATA_ENTRY(za64rs, PRIV_VERSION_1_12_0, has_priv_1_11), > ISA_EXT_DATA_ENTRY(zaamo, PRIV_VERSION_1_12_0, ext_zaamo), > ISA_EXT_DATA_ENTRY(zacas, PRIV_VERSION_1_12_0, ext_zacas), > + ISA_EXT_DATA_ENTRY(zama16b, PRIV_VERSION_1_12_0, ext_zama16b), Isn't zama16b a 1.13 spec extension? Looking it up I think it is. In this case I think it's wise to make this patch (and any other 1.13 extension that we might be adding now) dependent on "[RESEND PATCH v2 0/5] target/riscv: Support RISC-V privilege 1.13 spec" And then we can set the proper spec versions for each one. Thanks, Daniel > ISA_EXT_DATA_ENTRY(zalrsc, PRIV_VERSION_1_12_0, ext_zalrsc), > ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs), > ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa), > @@ -1464,6 +1465,7 @@ const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = { > MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true), > MULTI_EXT_CFG_BOOL("zihintpause", ext_zihintpause, true), > MULTI_EXT_CFG_BOOL("zacas", ext_zacas, false), > + MULTI_EXT_CFG_BOOL("zama16b", ext_zama16b, false), > MULTI_EXT_CFG_BOOL("zaamo", ext_zaamo, false), > MULTI_EXT_CFG_BOOL("zalrsc", ext_zalrsc, false), > MULTI_EXT_CFG_BOOL("zawrs", ext_zawrs, true), > diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h > index cb750154bd..eaa66eb4f8 100644 > --- a/target/riscv/cpu_cfg.h > +++ b/target/riscv/cpu_cfg.h > @@ -81,6 +81,7 @@ struct RISCVCPUConfig { > bool ext_zdinx; > bool ext_zaamo; > bool ext_zacas; > + bool ext_zama16b; > bool ext_zalrsc; > bool ext_zawrs; > bool ext_zfa; > diff --git a/target/riscv/insn_trans/trans_rva.c.inc b/target/riscv/insn_trans/trans_rva.c.inc > index 4a9e4591d1..eb080baddd 100644 > --- a/target/riscv/insn_trans/trans_rva.c.inc > +++ b/target/riscv/insn_trans/trans_rva.c.inc > @@ -103,6 +103,12 @@ static bool gen_amo(DisasContext *ctx, arg_atomic *a, > TCGv dest = dest_gpr(ctx, a->rd); > TCGv src1, src2 = get_gpr(ctx, a->rs2, EXT_NONE); > > + if (ctx->cfg_ptr->ext_zama16b) { > + mop |= MO_ATOM_WITHIN16; > + } else { > + mop |= MO_ALIGN; > + } > + > decode_save_opc(ctx); > src1 = get_address(ctx, a->rs1, 0); > func(dest, src1, src2, ctx->mem_idx, mop); > @@ -126,55 +132,55 @@ static bool trans_sc_w(DisasContext *ctx, arg_sc_w *a) > static bool trans_amoswap_w(DisasContext *ctx, arg_amoswap_w *a) > { > REQUIRE_A_OR_ZAAMO(ctx); > - return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, (MO_ALIGN | MO_TESL)); > + return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, MO_TESL); > } > > static bool trans_amoadd_w(DisasContext *ctx, arg_amoadd_w *a) > { > REQUIRE_A_OR_ZAAMO(ctx); > - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, (MO_ALIGN | MO_TESL)); > + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, MO_TESL); > } > > static bool trans_amoxor_w(DisasContext *ctx, arg_amoxor_w *a) > { > REQUIRE_A_OR_ZAAMO(ctx); > - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, (MO_ALIGN | MO_TESL)); > + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, MO_TESL); > } > > static bool trans_amoand_w(DisasContext *ctx, arg_amoand_w *a) > { > REQUIRE_A_OR_ZAAMO(ctx); > - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, (MO_ALIGN | MO_TESL)); > + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, MO_TESL); > } > > static bool trans_amoor_w(DisasContext *ctx, arg_amoor_w *a) > { > REQUIRE_A_OR_ZAAMO(ctx); > - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, (MO_ALIGN | MO_TESL)); > + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, MO_TESL); > } > > static bool trans_amomin_w(DisasContext *ctx, arg_amomin_w *a) > { > REQUIRE_A_OR_ZAAMO(ctx); > - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, (MO_ALIGN | MO_TESL)); > + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, MO_TESL); > } > > static bool trans_amomax_w(DisasContext *ctx, arg_amomax_w *a) > { > REQUIRE_A_OR_ZAAMO(ctx); > - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, (MO_ALIGN | MO_TESL)); > + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, MO_TESL); > } > > static bool trans_amominu_w(DisasContext *ctx, arg_amominu_w *a) > { > REQUIRE_A_OR_ZAAMO(ctx); > - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, (MO_ALIGN | MO_TESL)); > + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, MO_TESL); > } > > static bool trans_amomaxu_w(DisasContext *ctx, arg_amomaxu_w *a) > { > REQUIRE_A_OR_ZAAMO(ctx); > - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, (MO_ALIGN | MO_TESL)); > + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, MO_TESL); > } > > static bool trans_lr_d(DisasContext *ctx, arg_lr_d *a) > @@ -195,61 +201,61 @@ static bool trans_amoswap_d(DisasContext *ctx, arg_amoswap_d *a) > { > REQUIRE_64BIT(ctx); > REQUIRE_A_OR_ZAAMO(ctx); > - return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, (MO_ALIGN | MO_TEUQ)); > + return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, MO_TEUQ); > } > > static bool trans_amoadd_d(DisasContext *ctx, arg_amoadd_d *a) > { > REQUIRE_64BIT(ctx); > REQUIRE_A_OR_ZAAMO(ctx); > - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, (MO_ALIGN | MO_TEUQ)); > + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, MO_TEUQ); > } > > static bool trans_amoxor_d(DisasContext *ctx, arg_amoxor_d *a) > { > REQUIRE_64BIT(ctx); > REQUIRE_A_OR_ZAAMO(ctx); > - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, (MO_ALIGN | MO_TEUQ)); > + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, MO_TEUQ); > } > > static bool trans_amoand_d(DisasContext *ctx, arg_amoand_d *a) > { > REQUIRE_64BIT(ctx); > REQUIRE_A_OR_ZAAMO(ctx); > - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, (MO_ALIGN | MO_TEUQ)); > + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, MO_TEUQ); > } > > static bool trans_amoor_d(DisasContext *ctx, arg_amoor_d *a) > { > REQUIRE_64BIT(ctx); > REQUIRE_A_OR_ZAAMO(ctx); > - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, (MO_ALIGN | MO_TEUQ)); > + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, MO_TEUQ); > } > > static bool trans_amomin_d(DisasContext *ctx, arg_amomin_d *a) > { > REQUIRE_64BIT(ctx); > REQUIRE_A_OR_ZAAMO(ctx); > - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, (MO_ALIGN | MO_TEUQ)); > + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, MO_TEUQ); > } > > static bool trans_amomax_d(DisasContext *ctx, arg_amomax_d *a) > { > REQUIRE_64BIT(ctx); > REQUIRE_A_OR_ZAAMO(ctx); > - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, (MO_ALIGN | MO_TEUQ)); > + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, MO_TEUQ); > } > > static bool trans_amominu_d(DisasContext *ctx, arg_amominu_d *a) > { > REQUIRE_64BIT(ctx); > REQUIRE_A_OR_ZAAMO(ctx); > - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, (MO_ALIGN | MO_TEUQ)); > + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, MO_TEUQ); > } > > static bool trans_amomaxu_d(DisasContext *ctx, arg_amomaxu_d *a) > { > REQUIRE_64BIT(ctx); > REQUIRE_A_OR_ZAAMO(ctx); > - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, (MO_ALIGN | MO_TEUQ)); > + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, MO_TEUQ); > } > diff --git a/target/riscv/insn_trans/trans_rvd.c.inc b/target/riscv/insn_trans/trans_rvd.c.inc > index d9ce9e407f..1f5fac65a2 100644 > --- a/target/riscv/insn_trans/trans_rvd.c.inc > +++ b/target/riscv/insn_trans/trans_rvd.c.inc > @@ -42,13 +42,18 @@ > static bool trans_fld(DisasContext *ctx, arg_fld *a) > { > TCGv addr; > + MemOp memop = MO_TEUQ; > > REQUIRE_FPU; > REQUIRE_EXT(ctx, RVD); > > + if (ctx->cfg_ptr->ext_zama16b && (ctx->cur_insn_len != 2)) { > + memop |= MO_ATOM_WITHIN16; > + } > + > decode_save_opc(ctx); > addr = get_address(ctx, a->rs1, a->imm); > - tcg_gen_qemu_ld_i64(cpu_fpr[a->rd], addr, ctx->mem_idx, MO_TEUQ); > + tcg_gen_qemu_ld_i64(cpu_fpr[a->rd], addr, ctx->mem_idx, memop); > > mark_fs_dirty(ctx); > return true; > @@ -57,13 +62,18 @@ static bool trans_fld(DisasContext *ctx, arg_fld *a) > static bool trans_fsd(DisasContext *ctx, arg_fsd *a) > { > TCGv addr; > + MemOp memop = MO_TEUQ; > > REQUIRE_FPU; > REQUIRE_EXT(ctx, RVD); > > + if (ctx->cfg_ptr->ext_zama16b && (ctx->cur_insn_len != 2)) { > + memop |= MO_ATOM_WITHIN16; > + } > + > decode_save_opc(ctx); > addr = get_address(ctx, a->rs1, a->imm); > - tcg_gen_qemu_st_i64(cpu_fpr[a->rs2], addr, ctx->mem_idx, MO_TEUQ); > + tcg_gen_qemu_st_i64(cpu_fpr[a->rs2], addr, ctx->mem_idx, memop); > return true; > } > > diff --git a/target/riscv/insn_trans/trans_rvf.c.inc b/target/riscv/insn_trans/trans_rvf.c.inc > index 97a368970b..f771aa1939 100644 > --- a/target/riscv/insn_trans/trans_rvf.c.inc > +++ b/target/riscv/insn_trans/trans_rvf.c.inc > @@ -43,14 +43,19 @@ static bool trans_flw(DisasContext *ctx, arg_flw *a) > { > TCGv_i64 dest; > TCGv addr; > + MemOp memop = MO_TEUL; > > REQUIRE_FPU; > REQUIRE_EXT(ctx, RVF); > > + if (ctx->cfg_ptr->ext_zama16b && (ctx->cur_insn_len != 2)) { > + memop |= MO_ATOM_WITHIN16; > + } > + > decode_save_opc(ctx); > addr = get_address(ctx, a->rs1, a->imm); > dest = cpu_fpr[a->rd]; > - tcg_gen_qemu_ld_i64(dest, addr, ctx->mem_idx, MO_TEUL); > + tcg_gen_qemu_ld_i64(dest, addr, ctx->mem_idx, memop); > gen_nanbox_s(dest, dest); > > mark_fs_dirty(ctx); > @@ -60,13 +65,18 @@ static bool trans_flw(DisasContext *ctx, arg_flw *a) > static bool trans_fsw(DisasContext *ctx, arg_fsw *a) > { > TCGv addr; > + MemOp memop = MO_TEUL; > > REQUIRE_FPU; > REQUIRE_EXT(ctx, RVF); > > + if (ctx->cfg_ptr->ext_zama16b && (ctx->cur_insn_len != 2)) { > + memop |= MO_ATOM_WITHIN16; > + } > + > decode_save_opc(ctx); > addr = get_address(ctx, a->rs1, a->imm); > - tcg_gen_qemu_st_i64(cpu_fpr[a->rs2], addr, ctx->mem_idx, MO_TEUL); > + tcg_gen_qemu_st_i64(cpu_fpr[a->rs2], addr, ctx->mem_idx, memop); > return true; > } > > diff --git a/target/riscv/insn_trans/trans_rvi.c.inc b/target/riscv/insn_trans/trans_rvi.c.inc > index ad40d3e87f..98e3806d5e 100644 > --- a/target/riscv/insn_trans/trans_rvi.c.inc > +++ b/target/riscv/insn_trans/trans_rvi.c.inc > @@ -268,6 +268,9 @@ static bool gen_load(DisasContext *ctx, arg_lb *a, MemOp memop) > { > bool out; > > + if (ctx->cfg_ptr->ext_zama16b && (ctx->cur_insn_len != 2)) { > + memop |= MO_ATOM_WITHIN16; > + } > decode_save_opc(ctx); > if (get_xl(ctx) == MXL_RV128) { > out = gen_load_i128(ctx, a, memop); > @@ -366,6 +369,9 @@ static bool gen_store_i128(DisasContext *ctx, arg_sb *a, MemOp memop) > > static bool gen_store(DisasContext *ctx, arg_sb *a, MemOp memop) > { > + if (ctx->cfg_ptr->ext_zama16b && (ctx->cur_insn_len != 2)) { > + memop |= MO_ATOM_WITHIN16; > + } > decode_save_opc(ctx); > if (get_xl(ctx) == MXL_RV128) { > return gen_store_i128(ctx, a, memop);
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c index eb1a2e7d6d..911e9892ed 100644 --- a/target/riscv/cpu.c +++ b/target/riscv/cpu.c @@ -117,6 +117,7 @@ const RISCVIsaExtData isa_edata_arr[] = { ISA_EXT_DATA_ENTRY(za64rs, PRIV_VERSION_1_12_0, has_priv_1_11), ISA_EXT_DATA_ENTRY(zaamo, PRIV_VERSION_1_12_0, ext_zaamo), ISA_EXT_DATA_ENTRY(zacas, PRIV_VERSION_1_12_0, ext_zacas), + ISA_EXT_DATA_ENTRY(zama16b, PRIV_VERSION_1_12_0, ext_zama16b), ISA_EXT_DATA_ENTRY(zalrsc, PRIV_VERSION_1_12_0, ext_zalrsc), ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs), ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa), @@ -1464,6 +1465,7 @@ const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = { MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true), MULTI_EXT_CFG_BOOL("zihintpause", ext_zihintpause, true), MULTI_EXT_CFG_BOOL("zacas", ext_zacas, false), + MULTI_EXT_CFG_BOOL("zama16b", ext_zama16b, false), MULTI_EXT_CFG_BOOL("zaamo", ext_zaamo, false), MULTI_EXT_CFG_BOOL("zalrsc", ext_zalrsc, false), MULTI_EXT_CFG_BOOL("zawrs", ext_zawrs, true), diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h index cb750154bd..eaa66eb4f8 100644 --- a/target/riscv/cpu_cfg.h +++ b/target/riscv/cpu_cfg.h @@ -81,6 +81,7 @@ struct RISCVCPUConfig { bool ext_zdinx; bool ext_zaamo; bool ext_zacas; + bool ext_zama16b; bool ext_zalrsc; bool ext_zawrs; bool ext_zfa; diff --git a/target/riscv/insn_trans/trans_rva.c.inc b/target/riscv/insn_trans/trans_rva.c.inc index 4a9e4591d1..eb080baddd 100644 --- a/target/riscv/insn_trans/trans_rva.c.inc +++ b/target/riscv/insn_trans/trans_rva.c.inc @@ -103,6 +103,12 @@ static bool gen_amo(DisasContext *ctx, arg_atomic *a, TCGv dest = dest_gpr(ctx, a->rd); TCGv src1, src2 = get_gpr(ctx, a->rs2, EXT_NONE); + if (ctx->cfg_ptr->ext_zama16b) { + mop |= MO_ATOM_WITHIN16; + } else { + mop |= MO_ALIGN; + } + decode_save_opc(ctx); src1 = get_address(ctx, a->rs1, 0); func(dest, src1, src2, ctx->mem_idx, mop); @@ -126,55 +132,55 @@ static bool trans_sc_w(DisasContext *ctx, arg_sc_w *a) static bool trans_amoswap_w(DisasContext *ctx, arg_amoswap_w *a) { REQUIRE_A_OR_ZAAMO(ctx); - return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, (MO_ALIGN | MO_TESL)); + return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, MO_TESL); } static bool trans_amoadd_w(DisasContext *ctx, arg_amoadd_w *a) { REQUIRE_A_OR_ZAAMO(ctx); - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, (MO_ALIGN | MO_TESL)); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, MO_TESL); } static bool trans_amoxor_w(DisasContext *ctx, arg_amoxor_w *a) { REQUIRE_A_OR_ZAAMO(ctx); - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, (MO_ALIGN | MO_TESL)); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, MO_TESL); } static bool trans_amoand_w(DisasContext *ctx, arg_amoand_w *a) { REQUIRE_A_OR_ZAAMO(ctx); - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, (MO_ALIGN | MO_TESL)); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, MO_TESL); } static bool trans_amoor_w(DisasContext *ctx, arg_amoor_w *a) { REQUIRE_A_OR_ZAAMO(ctx); - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, (MO_ALIGN | MO_TESL)); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, MO_TESL); } static bool trans_amomin_w(DisasContext *ctx, arg_amomin_w *a) { REQUIRE_A_OR_ZAAMO(ctx); - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, (MO_ALIGN | MO_TESL)); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, MO_TESL); } static bool trans_amomax_w(DisasContext *ctx, arg_amomax_w *a) { REQUIRE_A_OR_ZAAMO(ctx); - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, (MO_ALIGN | MO_TESL)); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, MO_TESL); } static bool trans_amominu_w(DisasContext *ctx, arg_amominu_w *a) { REQUIRE_A_OR_ZAAMO(ctx); - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, (MO_ALIGN | MO_TESL)); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, MO_TESL); } static bool trans_amomaxu_w(DisasContext *ctx, arg_amomaxu_w *a) { REQUIRE_A_OR_ZAAMO(ctx); - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, (MO_ALIGN | MO_TESL)); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, MO_TESL); } static bool trans_lr_d(DisasContext *ctx, arg_lr_d *a) @@ -195,61 +201,61 @@ static bool trans_amoswap_d(DisasContext *ctx, arg_amoswap_d *a) { REQUIRE_64BIT(ctx); REQUIRE_A_OR_ZAAMO(ctx); - return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, (MO_ALIGN | MO_TEUQ)); + return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, MO_TEUQ); } static bool trans_amoadd_d(DisasContext *ctx, arg_amoadd_d *a) { REQUIRE_64BIT(ctx); REQUIRE_A_OR_ZAAMO(ctx); - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, (MO_ALIGN | MO_TEUQ)); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, MO_TEUQ); } static bool trans_amoxor_d(DisasContext *ctx, arg_amoxor_d *a) { REQUIRE_64BIT(ctx); REQUIRE_A_OR_ZAAMO(ctx); - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, (MO_ALIGN | MO_TEUQ)); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, MO_TEUQ); } static bool trans_amoand_d(DisasContext *ctx, arg_amoand_d *a) { REQUIRE_64BIT(ctx); REQUIRE_A_OR_ZAAMO(ctx); - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, (MO_ALIGN | MO_TEUQ)); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, MO_TEUQ); } static bool trans_amoor_d(DisasContext *ctx, arg_amoor_d *a) { REQUIRE_64BIT(ctx); REQUIRE_A_OR_ZAAMO(ctx); - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, (MO_ALIGN | MO_TEUQ)); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, MO_TEUQ); } static bool trans_amomin_d(DisasContext *ctx, arg_amomin_d *a) { REQUIRE_64BIT(ctx); REQUIRE_A_OR_ZAAMO(ctx); - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, (MO_ALIGN | MO_TEUQ)); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, MO_TEUQ); } static bool trans_amomax_d(DisasContext *ctx, arg_amomax_d *a) { REQUIRE_64BIT(ctx); REQUIRE_A_OR_ZAAMO(ctx); - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, (MO_ALIGN | MO_TEUQ)); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, MO_TEUQ); } static bool trans_amominu_d(DisasContext *ctx, arg_amominu_d *a) { REQUIRE_64BIT(ctx); REQUIRE_A_OR_ZAAMO(ctx); - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, (MO_ALIGN | MO_TEUQ)); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, MO_TEUQ); } static bool trans_amomaxu_d(DisasContext *ctx, arg_amomaxu_d *a) { REQUIRE_64BIT(ctx); REQUIRE_A_OR_ZAAMO(ctx); - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, (MO_ALIGN | MO_TEUQ)); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, MO_TEUQ); } diff --git a/target/riscv/insn_trans/trans_rvd.c.inc b/target/riscv/insn_trans/trans_rvd.c.inc index d9ce9e407f..1f5fac65a2 100644 --- a/target/riscv/insn_trans/trans_rvd.c.inc +++ b/target/riscv/insn_trans/trans_rvd.c.inc @@ -42,13 +42,18 @@ static bool trans_fld(DisasContext *ctx, arg_fld *a) { TCGv addr; + MemOp memop = MO_TEUQ; REQUIRE_FPU; REQUIRE_EXT(ctx, RVD); + if (ctx->cfg_ptr->ext_zama16b && (ctx->cur_insn_len != 2)) { + memop |= MO_ATOM_WITHIN16; + } + decode_save_opc(ctx); addr = get_address(ctx, a->rs1, a->imm); - tcg_gen_qemu_ld_i64(cpu_fpr[a->rd], addr, ctx->mem_idx, MO_TEUQ); + tcg_gen_qemu_ld_i64(cpu_fpr[a->rd], addr, ctx->mem_idx, memop); mark_fs_dirty(ctx); return true; @@ -57,13 +62,18 @@ static bool trans_fld(DisasContext *ctx, arg_fld *a) static bool trans_fsd(DisasContext *ctx, arg_fsd *a) { TCGv addr; + MemOp memop = MO_TEUQ; REQUIRE_FPU; REQUIRE_EXT(ctx, RVD); + if (ctx->cfg_ptr->ext_zama16b && (ctx->cur_insn_len != 2)) { + memop |= MO_ATOM_WITHIN16; + } + decode_save_opc(ctx); addr = get_address(ctx, a->rs1, a->imm); - tcg_gen_qemu_st_i64(cpu_fpr[a->rs2], addr, ctx->mem_idx, MO_TEUQ); + tcg_gen_qemu_st_i64(cpu_fpr[a->rs2], addr, ctx->mem_idx, memop); return true; } diff --git a/target/riscv/insn_trans/trans_rvf.c.inc b/target/riscv/insn_trans/trans_rvf.c.inc index 97a368970b..f771aa1939 100644 --- a/target/riscv/insn_trans/trans_rvf.c.inc +++ b/target/riscv/insn_trans/trans_rvf.c.inc @@ -43,14 +43,19 @@ static bool trans_flw(DisasContext *ctx, arg_flw *a) { TCGv_i64 dest; TCGv addr; + MemOp memop = MO_TEUL; REQUIRE_FPU; REQUIRE_EXT(ctx, RVF); + if (ctx->cfg_ptr->ext_zama16b && (ctx->cur_insn_len != 2)) { + memop |= MO_ATOM_WITHIN16; + } + decode_save_opc(ctx); addr = get_address(ctx, a->rs1, a->imm); dest = cpu_fpr[a->rd]; - tcg_gen_qemu_ld_i64(dest, addr, ctx->mem_idx, MO_TEUL); + tcg_gen_qemu_ld_i64(dest, addr, ctx->mem_idx, memop); gen_nanbox_s(dest, dest); mark_fs_dirty(ctx); @@ -60,13 +65,18 @@ static bool trans_flw(DisasContext *ctx, arg_flw *a) static bool trans_fsw(DisasContext *ctx, arg_fsw *a) { TCGv addr; + MemOp memop = MO_TEUL; REQUIRE_FPU; REQUIRE_EXT(ctx, RVF); + if (ctx->cfg_ptr->ext_zama16b && (ctx->cur_insn_len != 2)) { + memop |= MO_ATOM_WITHIN16; + } + decode_save_opc(ctx); addr = get_address(ctx, a->rs1, a->imm); - tcg_gen_qemu_st_i64(cpu_fpr[a->rs2], addr, ctx->mem_idx, MO_TEUL); + tcg_gen_qemu_st_i64(cpu_fpr[a->rs2], addr, ctx->mem_idx, memop); return true; } diff --git a/target/riscv/insn_trans/trans_rvi.c.inc b/target/riscv/insn_trans/trans_rvi.c.inc index ad40d3e87f..98e3806d5e 100644 --- a/target/riscv/insn_trans/trans_rvi.c.inc +++ b/target/riscv/insn_trans/trans_rvi.c.inc @@ -268,6 +268,9 @@ static bool gen_load(DisasContext *ctx, arg_lb *a, MemOp memop) { bool out; + if (ctx->cfg_ptr->ext_zama16b && (ctx->cur_insn_len != 2)) { + memop |= MO_ATOM_WITHIN16; + } decode_save_opc(ctx); if (get_xl(ctx) == MXL_RV128) { out = gen_load_i128(ctx, a, memop); @@ -366,6 +369,9 @@ static bool gen_store_i128(DisasContext *ctx, arg_sb *a, MemOp memop) static bool gen_store(DisasContext *ctx, arg_sb *a, MemOp memop) { + if (ctx->cfg_ptr->ext_zama16b && (ctx->cur_insn_len != 2)) { + memop |= MO_ATOM_WITHIN16; + } decode_save_opc(ctx); if (get_xl(ctx) == MXL_RV128) { return gen_store_i128(ctx, a, memop);
Zama16b is the property that misaligned load/stores/atomics within a naturally aligned 16-byte region are atomic. According to the specification, Zama16b applies only to AMOs, loads and stores defined in the base ISAs, and loads and stores of no more than XLEN bits defined in the F, D, and Q extensions. Thus it should not apply to zacas or RVC instructions. For an instruction in that set, if all accessed bytes lie within 16B granule, the instruction will not raise an exception for reasons of address alignment, and the instruction will give rise to only one memory operation for the purposes of RVWMO—i.e., it will execute atomically. Signed-off-by: LIU Zhiwei <zhiwei_liu@linux.alibaba.com> --- target/riscv/cpu.c | 2 ++ target/riscv/cpu_cfg.h | 1 + target/riscv/insn_trans/trans_rva.c.inc | 42 ++++++++++++++----------- target/riscv/insn_trans/trans_rvd.c.inc | 14 +++++++-- target/riscv/insn_trans/trans_rvf.c.inc | 14 +++++++-- target/riscv/insn_trans/trans_rvi.c.inc | 6 ++++ 6 files changed, 57 insertions(+), 22 deletions(-)