@@ -250,6 +250,7 @@ struct kvm_mmu {
void (*free)(struct kvm_vcpu *vcpu);
gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access,
u32 *error);
+ gpa_t (*translate_gpa)(struct kvm_vcpu *vcpu, gpa_t gpa, u32 *error);
void (*prefetch_page)(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *page);
int (*sync_page)(struct kvm_vcpu *vcpu,
@@ -2149,6 +2149,11 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
spin_unlock(&vcpu->kvm->mmu_lock);
}
+static gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 *error)
+{
+ return gpa;
+}
+
static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr,
u32 access, u32 *error)
{
@@ -2399,6 +2404,7 @@ static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
context->new_cr3 = nonpaging_new_cr3;
context->page_fault = tdp_page_fault;
context->free = nonpaging_free;
+ context->translate_gpa = translate_gpa;
context->prefetch_page = nonpaging_prefetch_page;
context->sync_page = nonpaging_sync_page;
context->invlpg = nonpaging_invlpg;
@@ -2443,6 +2449,7 @@ int kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
else
r = paging32_init_context(vcpu, context);
+ vcpu->arch.mmu.translate_gpa = translate_gpa;
vcpu->arch.mmu.base_role.glevels = vcpu->arch.mmu.root_level;
vcpu->arch.mmu.tdp_enabled = false;
@@ -122,6 +122,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
unsigned index, pt_access, pte_access;
gpa_t pte_gpa;
int rsvd_fault = 0;
+ u32 error;
trace_kvm_mmu_pagetable_walk(addr, write_fault, user_fault,
fetch_fault);
@@ -150,6 +151,15 @@ walk:
table_gfn = gpte_to_gfn(pte);
pte_gpa = gfn_to_gpa(table_gfn);
pte_gpa += index * sizeof(pt_element_t);
+
+ pte_gpa = mmu->translate_gpa(vcpu, pte_gpa, &error);
+ if (pte_gpa == UNMAPPED_GVA) {
+ walker->error_code = error;
+ return 0;
+ }
+ /* pte_gpa might have changed - recalculate table_gfn */
+ table_gfn = gpa_to_gfn(pte_gpa);
+
walker->table_gfn[walker->level - 1] = table_gfn;
walker->pte_gpa[walker->level - 1] = pte_gpa;
@@ -209,6 +219,15 @@ walk:
is_cpuid_PSE36())
walker->gfn += pse36_gfn_delta(pte);
+ /* Do the final translation */
+ pte_gpa = gfn_to_gpa(walker->gfn);
+ pte_gpa = mmu->translate_gpa(vcpu, pte_gpa, &error);
+ if (pte_gpa == UNMAPPED_GVA) {
+ walker->error_code = error;
+ return 0;
+ }
+ walker->gfn = gpa_to_gfn(pte_gpa);
+
break;
}
@@ -503,6 +503,11 @@ static inline gpa_t gfn_to_gpa(gfn_t gfn)
return (gpa_t)gfn << PAGE_SHIFT;
}
+static inline gfn_t gpa_to_gfn(gpa_t gpa)
+{
+ return (gfn_t)gpa >> PAGE_SHIFT;
+}
+
static inline hpa_t pfn_to_hpa(pfn_t pfn)
{
return (hpa_t)pfn << PAGE_SHIFT;