@@ -723,6 +723,8 @@ DEF_HELPER_FLAGS_1(load_vtb, TCG_CALL_NO_RWG, tl, env)
#if defined(TARGET_PPC64)
DEF_HELPER_FLAGS_1(load_purr, TCG_CALL_NO_RWG, tl, env)
DEF_HELPER_FLAGS_2(store_purr, TCG_CALL_NO_RWG, void, env, tl)
+DEF_HELPER_2(store_amr, void, env, tl)
+DEF_HELPER_2(store_iamr, void, env, tl)
DEF_HELPER_2(store_hrmor, void, env, tl)
DEF_HELPER_2(store_ptcr, void, env, tl)
DEF_HELPER_FLAGS_1(load_dpdes, TCG_CALL_NO_RWG, tl, env)
@@ -238,7 +238,7 @@ static void register_amr_sprs(CPUPPCState *env)
spr_register_kvm_hv(env, SPR_AMR, "AMR",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_amr,
- &spr_read_generic, &spr_write_generic,
+ &spr_read_generic, &spr_write_amr,
KVM_REG_PPC_AMR, 0);
spr_register_kvm_hv(env, SPR_UAMOR, "UAMOR",
SPR_NOACCESS, SPR_NOACCESS,
@@ -259,7 +259,7 @@ static void register_iamr_sprs(CPUPPCState *env)
spr_register_kvm_hv(env, SPR_IAMR, "IAMR",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_iamr,
- &spr_read_generic, &spr_write_generic,
+ &spr_read_generic, &spr_write_iamr,
KVM_REG_PPC_IAMR, 0);
#endif /* !CONFIG_USER_ONLY */
}
@@ -169,6 +169,54 @@ void helper_store_sdr1(CPUPPCState *env, target_ulong val)
}
#if defined(TARGET_PPC64)
+void helper_store_amr(CPUPPCState *env, target_ulong val)
+{
+ target_ulong old, new, mask;
+
+ if (FIELD_EX64(env->msr, MSR, PR)) {
+ mask = env->spr[SPR_UAMOR];
+ } else if (FIELD_EX64(env->msr, MSR, HV)) {
+ mask = (target_ulong)-1;
+ } else {
+ mask = env->spr[SPR_AMOR];
+ }
+
+ old = env->spr[SPR_AMR];
+ /* Replace controllable bits with those in val */
+ new = (old & ~mask) | (val & mask);
+
+ if (old != new) {
+ CPUState *cs = env_cpu(env);
+ env->spr[SPR_AMR] = new;
+ /* AMR is involved in MMU translations so must flush TLB */
+ tlb_flush(cs);
+ }
+}
+
+void helper_store_iamr(CPUPPCState *env, target_ulong val)
+{
+ target_ulong old, new, mask;
+
+ if (FIELD_EX64(env->msr, MSR, PR)) {
+ g_assert_not_reached(); /* mtIAMR is privileged */
+ } else if (FIELD_EX64(env->msr, MSR, HV)) {
+ mask = (target_ulong)-1;
+ } else {
+ mask = env->spr[SPR_AMOR];
+ }
+
+ old = env->spr[SPR_IAMR];
+ /* Replace controllable bits with those in val */
+ new = (old & ~mask) | (val & mask);
+
+ if (old != new) {
+ CPUState *cs = env_cpu(env);
+ env->spr[SPR_IAMR] = new;
+ /* IAMR is involved in MMU translations so must flush TLB */
+ tlb_flush(cs);
+ }
+}
+
void helper_store_hrmor(CPUPPCState *env, target_ulong val)
{
if (env->spr[SPR_HRMOR] != val) {
@@ -1080,33 +1080,7 @@ void spr_write_excp_vector(DisasContext *ctx, int sprn, int gprn)
#ifndef CONFIG_USER_ONLY
void spr_write_amr(DisasContext *ctx, int sprn, int gprn)
{
- TCGv t0 = tcg_temp_new();
- TCGv t1 = tcg_temp_new();
- TCGv t2 = tcg_temp_new();
-
- /*
- * Note, the HV=1 PR=0 case is handled earlier by simply using
- * spr_write_generic for HV mode in the SPR table
- */
-
- /* Build insertion mask into t1 based on context */
- if (ctx->pr) {
- gen_load_spr(t1, SPR_UAMOR);
- } else {
- gen_load_spr(t1, SPR_AMOR);
- }
-
- /* Mask new bits into t2 */
- tcg_gen_and_tl(t2, t1, cpu_gpr[gprn]);
-
- /* Load AMR and clear new bits in t0 */
- gen_load_spr(t0, SPR_AMR);
- tcg_gen_andc_tl(t0, t0, t1);
-
- /* Or'in new bits and write it out */
- tcg_gen_or_tl(t0, t0, t2);
- gen_store_spr(SPR_AMR, t0);
- spr_store_dump_spr(SPR_AMR);
+ gen_helper_store_amr(tcg_env, cpu_gpr[gprn]);
}
void spr_write_uamor(DisasContext *ctx, int sprn, int gprn)
@@ -1138,29 +1112,7 @@ void spr_write_uamor(DisasContext *ctx, int sprn, int gprn)
void spr_write_iamr(DisasContext *ctx, int sprn, int gprn)
{
- TCGv t0 = tcg_temp_new();
- TCGv t1 = tcg_temp_new();
- TCGv t2 = tcg_temp_new();
-
- /*
- * Note, the HV=1 case is handled earlier by simply using
- * spr_write_generic for HV mode in the SPR table
- */
-
- /* Build insertion mask into t1 based on context */
- gen_load_spr(t1, SPR_AMOR);
-
- /* Mask new bits into t2 */
- tcg_gen_and_tl(t2, t1, cpu_gpr[gprn]);
-
- /* Load AMR and clear new bits in t0 */
- gen_load_spr(t0, SPR_IAMR);
- tcg_gen_andc_tl(t0, t0, t1);
-
- /* Or'in new bits and write it out */
- tcg_gen_or_tl(t0, t0, t2);
- gen_store_spr(SPR_IAMR, t0);
- spr_store_dump_spr(SPR_IAMR);
+ gen_helper_store_iamr(tcg_env, cpu_gpr[gprn]);
}
#endif
#endif
The IAMR and AMR registers are involved with MMU translations that are not tagged in the TLB (i.e., with mmuidx), so the TLB needs to be flushed when these are changed, e.g., as PIDR, LPIDR already do. This moves AMR and IAMR write to helpers rather than use tlb_need_flush because they can be written in problem state where tlb_need_flush is not checked. XXX: As far as I can tell this is needed for correct memory protection key operation, however it seems to be causing slowdowns when booting Linux, enough to cause failures due to timeouts, so I will not merge it at the moment. I have been considering possible ways to speed this up e.g., with mmu indexes, but that's not entirely trivial and needs a bit more work. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> --- target/ppc/helper.h | 2 ++ target/ppc/cpu_init.c | 4 ++-- target/ppc/misc_helper.c | 48 +++++++++++++++++++++++++++++++++++++ target/ppc/translate.c | 52 ++-------------------------------------- 4 files changed, 54 insertions(+), 52 deletions(-)