@@ -3815,7 +3815,7 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root);
} else
BUG();
- vcpu->arch.mmu->root_cr3 = vcpu->arch.mmu->get_cr3(vcpu);
+ vcpu->arch.mmu->root_cr3 = kvm_mmu_get_guest_cr3(vcpu, vcpu->arch.mmu);
return 0;
}
@@ -3827,7 +3827,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
gfn_t root_gfn, root_cr3;
int i;
- root_cr3 = vcpu->arch.mmu->get_cr3(vcpu);
+ root_cr3 = kvm_mmu_get_guest_cr3(vcpu, vcpu->arch.mmu);
root_gfn = root_cr3 >> PAGE_SHIFT;
if (mmu_check_root(vcpu, root_gfn))
@@ -4191,7 +4191,7 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id;
arch.gfn = gfn;
arch.direct_map = vcpu->arch.mmu->direct_map;
- arch.cr3 = vcpu->arch.mmu->get_cr3(vcpu);
+ arch.cr3 = kvm_mmu_get_guest_cr3(vcpu, vcpu->arch.mmu);
return kvm_setup_async_pf(vcpu, cr2_or_gpa,
kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
@@ -4453,7 +4453,7 @@ void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3, bool skip_tlb_flush)
}
EXPORT_SYMBOL_GPL(kvm_mmu_new_cr3);
-static unsigned long get_cr3(struct kvm_vcpu *vcpu)
+unsigned long get_guest_cr3(struct kvm_vcpu *vcpu)
{
return kvm_read_cr3(vcpu);
}
@@ -5040,7 +5040,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
context->shadow_root_level = kvm_x86_ops->get_tdp_level(vcpu);
context->direct_map = true;
context->set_cr3 = kvm_x86_ops->set_tdp_cr3;
- context->get_cr3 = get_cr3;
+ context->get_cr3 = get_guest_cr3;
context->get_pdptr = kvm_pdptr_read;
context->inject_page_fault = kvm_inject_page_fault;
@@ -5187,7 +5187,7 @@ static void init_kvm_softmmu(struct kvm_vcpu *vcpu)
kvm_init_shadow_mmu(vcpu);
context->set_cr3 = kvm_x86_ops->set_cr3;
- context->get_cr3 = get_cr3;
+ context->get_cr3 = get_guest_cr3;
context->get_pdptr = kvm_pdptr_read;
context->inject_page_fault = kvm_inject_page_fault;
}
@@ -5202,7 +5202,7 @@ static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
return;
g_context->mmu_role.as_u64 = new_role.as_u64;
- g_context->get_cr3 = get_cr3;
+ g_context->get_cr3 = get_guest_cr3;
g_context->get_pdptr = kvm_pdptr_read;
g_context->inject_page_fault = kvm_inject_page_fault;
@@ -102,6 +102,17 @@ static inline void kvm_mmu_load_cr3(struct kvm_vcpu *vcpu)
kvm_get_active_pcid(vcpu));
}
+unsigned long get_guest_cr3(struct kvm_vcpu *vcpu);
+
+static inline unsigned long kvm_mmu_get_guest_cr3(struct kvm_vcpu *vcpu,
+ struct kvm_mmu *mmu)
+{
+ if (IS_ENABLED(CONFIG_RETPOLINE) && mmu->get_cr3 == get_guest_cr3)
+ return kvm_read_cr3(vcpu);
+
+ return mmu->get_cr3(vcpu);
+}
+
/*
* Currently, we have two sorts of write-protection, a) the first one
* write-protects guest page to sync the guest modification, b) another one is
@@ -315,7 +315,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
trace_kvm_mmu_pagetable_walk(addr, access);
retry_walk:
walker->level = mmu->root_level;
- pte = mmu->get_cr3(vcpu);
+ pte = kvm_mmu_get_guest_cr3(vcpu, mmu);
have_ad = PT_HAVE_ACCESSED_DIRTY(mmu);
#if PTTYPE == 64
@@ -10130,7 +10130,7 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
return;
if (!vcpu->arch.mmu->direct_map &&
- work->arch.cr3 != vcpu->arch.mmu->get_cr3(vcpu))
+ work->arch.cr3 != kvm_mmu_get_guest_cr3(vcpu, vcpu->arch.mmu))
return;
vcpu->arch.mmu->page_fault(vcpu, work->cr2_or_gpa, 0, true);