@@ -280,9 +280,10 @@ static __always_inline void __load_stage2(struct kvm_s2_mmu *mmu, unsigned long
asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT));
}
-static __always_inline void __load_guest_stage2(struct kvm_s2_mmu *mmu)
+static __always_inline void __load_guest_stage2(struct kvm_s2_mmu *mmu,
+ struct kvm_arch *arch)
{
- __load_stage2(mmu, kern_hyp_va(mmu->arch)->vtcr);
+ __load_stage2(mmu, arch->vtcr);
}
static inline struct kvm *kvm_s2_mmu_to_kvm(struct kvm_s2_mmu *mmu)
@@ -170,6 +170,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
{
struct kvm_cpu_context *host_ctxt;
struct kvm_cpu_context *guest_ctxt;
+ struct kvm_s2_mmu *mmu;
bool pmu_switch_needed;
u64 exit_code;
@@ -213,7 +214,8 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
__sysreg32_restore_state(vcpu);
__sysreg_restore_state_nvhe(guest_ctxt);
- __load_guest_stage2(kern_hyp_va(vcpu->arch.hw_mmu));
+ mmu = kern_hyp_va(vcpu->arch.hw_mmu);
+ __load_guest_stage2(mmu, kern_hyp_va(mmu->arch));
__activate_traps(vcpu);
__hyp_vgic_restore_state(vcpu);
@@ -39,7 +39,7 @@ static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
* ensuring that we always have an ISB, but not two ISBs back
* to back.
*/
- __load_guest_stage2(mmu);
+ __load_guest_stage2(mmu, kern_hyp_va(mmu->arch));
asm(ALTERNATIVE("isb", "nop", ARM64_WORKAROUND_SPECULATIVE_AT));
}
@@ -128,7 +128,7 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
* __load_guest_stage2 configures stage 2 translation, and
* __activate_traps clear HCR_EL2.TGE (among other things).
*/
- __load_guest_stage2(vcpu->arch.hw_mmu);
+ __load_guest_stage2(vcpu->arch.hw_mmu, vcpu->arch.hw_mmu->arch);
__activate_traps(vcpu);
__kvm_adjust_pc(vcpu);
@@ -53,7 +53,7 @@ static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
* place before clearing TGE. __load_guest_stage2() already
* has an ISB in order to deal with this.
*/
- __load_guest_stage2(mmu);
+ __load_guest_stage2(mmu, mmu->arch);
val = read_sysreg(hcr_el2);
val &= ~HCR_TGE;
write_sysreg(val, hcr_el2);