Message ID | 20191128161718.24361-3-maz@kernel.org (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | target/arm: More HCR_EL2.TIDx fixes | expand |
On Thu, Nov 28, 2019 at 04:17:17PM +0000, Marc Zyngier wrote: > HCR_EL2.TID1 mandates that access from EL1 to REVIDR_EL1, AIDR_EL1 > (and their 32bit equivalents) as well as TCMTR, TLBTR are trapped > to EL2. QEMU ignores it, naking it harder for a hypervisor to Typo: "making it harder" > virtualize the HW (though to be fair, no known hypervisor actually > cares). > > Do the right thing by trapping to EL2 if HCR_EL2.TID1 is set. > > Signed-off-by: Marc Zyngier <maz@kernel.org> > --- > target/arm/helper.c | 36 ++++++++++++++++++++++++++++++++---- > 1 file changed, 32 insertions(+), 4 deletions(-) > > diff --git a/target/arm/helper.c b/target/arm/helper.c > index 0b6887b100..9bff769692 100644 > --- a/target/arm/helper.c > +++ b/target/arm/helper.c > @@ -1973,6 +1973,26 @@ static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri) > return ret; > } > > +static CPAccessResult access_aa64_tid1(CPUARMState *env, const ARMCPRegInfo *ri, > + bool isread) > +{ > + if (arm_hcr_el2_eff(env) & HCR_TID1) { > + return CP_ACCESS_TRAP_EL2; > + } I think we need to check for EL1 here? Otherwise: Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com> Cheers, Edgar > + > + return CP_ACCESS_OK; > +} > + > +static CPAccessResult access_aa32_tid1(CPUARMState *env, const ARMCPRegInfo *ri, > + bool isread) > +{ > + if (arm_feature(env, ARM_FEATURE_V8)) { > + return access_aa64_tid1(env, ri, isread); > + } > + > + return CP_ACCESS_OK; > +} > + > static const ARMCPRegInfo v7_cp_reginfo[] = { > /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */ > { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4, > @@ -2136,7 +2156,9 @@ static const ARMCPRegInfo v7_cp_reginfo[] = { > */ > { .name = "AIDR", .state = ARM_CP_STATE_BOTH, > .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 7, > - .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, > + .access = PL1_R, .type = ARM_CP_CONST, > + .accessfn = access_aa64_tid1, > + .resetvalue = 0 }, > /* Auxiliary fault status registers: these also are IMPDEF, and we > * choose to RAZ/WI for all cores. > */ > @@ -6732,7 +6754,9 @@ void register_cp_regs_for_features(ARMCPU *cpu) > .access = PL1_R, .resetvalue = cpu->midr }, > { .name = "REVIDR_EL1", .state = ARM_CP_STATE_BOTH, > .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 6, > - .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->revidr }, > + .access = PL1_R, > + .accessfn = access_aa64_tid1, > + .type = ARM_CP_CONST, .resetvalue = cpu->revidr }, > REGINFO_SENTINEL > }; > ARMCPRegInfo id_cp_reginfo[] = { > @@ -6747,14 +6771,18 @@ void register_cp_regs_for_features(ARMCPU *cpu) > /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */ > { .name = "TCMTR", > .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 2, > - .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, > + .access = PL1_R, > + .accessfn = access_aa32_tid1, > + .type = ARM_CP_CONST, .resetvalue = 0 }, > REGINFO_SENTINEL > }; > /* TLBTR is specific to VMSA */ > ARMCPRegInfo id_tlbtr_reginfo = { > .name = "TLBTR", > .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 3, > - .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0, > + .access = PL1_R, > + .accessfn = access_aa32_tid1, > + .type = ARM_CP_CONST, .resetvalue = 0, > }; > /* MPUIR is specific to PMSA V6+ */ > ARMCPRegInfo id_mpuir_reginfo = { > -- > 2.20.1 > >
diff --git a/target/arm/helper.c b/target/arm/helper.c index 0b6887b100..9bff769692 100644 --- a/target/arm/helper.c +++ b/target/arm/helper.c @@ -1973,6 +1973,26 @@ static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri) return ret; } +static CPAccessResult access_aa64_tid1(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + if (arm_hcr_el2_eff(env) & HCR_TID1) { + return CP_ACCESS_TRAP_EL2; + } + + return CP_ACCESS_OK; +} + +static CPAccessResult access_aa32_tid1(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + if (arm_feature(env, ARM_FEATURE_V8)) { + return access_aa64_tid1(env, ri, isread); + } + + return CP_ACCESS_OK; +} + static const ARMCPRegInfo v7_cp_reginfo[] = { /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */ { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4, @@ -2136,7 +2156,9 @@ static const ARMCPRegInfo v7_cp_reginfo[] = { */ { .name = "AIDR", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 7, - .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, + .access = PL1_R, .type = ARM_CP_CONST, + .accessfn = access_aa64_tid1, + .resetvalue = 0 }, /* Auxiliary fault status registers: these also are IMPDEF, and we * choose to RAZ/WI for all cores. */ @@ -6732,7 +6754,9 @@ void register_cp_regs_for_features(ARMCPU *cpu) .access = PL1_R, .resetvalue = cpu->midr }, { .name = "REVIDR_EL1", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 6, - .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->revidr }, + .access = PL1_R, + .accessfn = access_aa64_tid1, + .type = ARM_CP_CONST, .resetvalue = cpu->revidr }, REGINFO_SENTINEL }; ARMCPRegInfo id_cp_reginfo[] = { @@ -6747,14 +6771,18 @@ void register_cp_regs_for_features(ARMCPU *cpu) /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */ { .name = "TCMTR", .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 2, - .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, + .access = PL1_R, + .accessfn = access_aa32_tid1, + .type = ARM_CP_CONST, .resetvalue = 0 }, REGINFO_SENTINEL }; /* TLBTR is specific to VMSA */ ARMCPRegInfo id_tlbtr_reginfo = { .name = "TLBTR", .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 3, - .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0, + .access = PL1_R, + .accessfn = access_aa32_tid1, + .type = ARM_CP_CONST, .resetvalue = 0, }; /* MPUIR is specific to PMSA V6+ */ ARMCPRegInfo id_mpuir_reginfo = {
HCR_EL2.TID1 mandates that access from EL1 to REVIDR_EL1, AIDR_EL1 (and their 32bit equivalents) as well as TCMTR, TLBTR are trapped to EL2. QEMU ignores it, naking it harder for a hypervisor to virtualize the HW (though to be fair, no known hypervisor actually cares). Do the right thing by trapping to EL2 if HCR_EL2.TID1 is set. Signed-off-by: Marc Zyngier <maz@kernel.org> --- target/arm/helper.c | 36 ++++++++++++++++++++++++++++++++---- 1 file changed, 32 insertions(+), 4 deletions(-)