@@ -242,6 +242,7 @@ struct kvm_pio_request {
struct kvm_mmu {
void (*new_cr3)(struct kvm_vcpu *vcpu);
void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root);
+ unsigned long (*get_cr3)(struct kvm_vcpu *vcpu);
int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err);
void (*free)(struct kvm_vcpu *vcpu);
gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access,
@@ -2071,7 +2071,7 @@ static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
int direct = 0;
u64 pdptr;
- root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT;
+ root_gfn = vcpu->arch.mmu.get_cr3(vcpu) >> PAGE_SHIFT;
if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
hpa_t root = vcpu->arch.mmu.root_hpa;
@@ -2254,6 +2254,11 @@ static void paging_new_cr3(struct kvm_vcpu *vcpu)
mmu_free_roots(vcpu);
}
+static unsigned long get_cr3(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.cr3;
+}
+
static void inject_page_fault(struct kvm_vcpu *vcpu,
u64 addr,
u32 err_code)
@@ -2399,6 +2404,7 @@ static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
context->root_hpa = INVALID_PAGE;
vcpu->arch.mmu.tdp_enabled = true;
vcpu->arch.mmu.set_cr3 = kvm_x86_ops->set_tdp_cr3;
+ vcpu->arch.mmu.get_cr3 = get_cr3;
if (!is_paging(vcpu)) {
context->gva_to_gpa = nonpaging_gva_to_gpa;
@@ -2439,6 +2445,7 @@ static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
vcpu->arch.mmu.base_role.glevels = vcpu->arch.mmu.root_level;
vcpu->arch.mmu.tdp_enabled = false;
vcpu->arch.mmu.set_cr3 = kvm_x86_ops->set_cr3;
+ vcpu->arch.mmu.get_cr3 = get_cr3;
return r;
}
@@ -126,7 +126,7 @@ static int FNAME(walk_addr)(struct guest_walker *walker,
fetch_fault);
walk:
walker->level = vcpu->arch.mmu.root_level;
- pte = vcpu->arch.cr3;
+ pte = vcpu->arch.mmu.get_cr3(vcpu);
#if PTTYPE == 64
if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) {
pte = kvm_pdptr_read(vcpu, (addr >> 30) & 3);
@@ -137,7 +137,7 @@ walk:
}
#endif
ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) ||
- (vcpu->arch.cr3 & CR3_NONPAE_RESERVED_BITS) == 0);
+ (vcpu->arch.mmu.get_cr3(vcpu) & CR3_NONPAE_RESERVED_BITS) == 0);
pt_access = ACC_ALL;