@@ -2256,6 +2256,62 @@ static bool is_rsvd_bits_set(struct kvm_vcpu *vcpu, u64 gpte, int level)
return (gpte & vcpu->arch.mmu.rsvd_bits_mask[bit7][level-1]) != 0;
}
+static void paging_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
+{
+ struct kvm_shadow_walk_iterator iterator;
+ gpa_t pte_gpa = -1;
+ int level;
+ u64 *sptep;
+ int need_flush = 0;
+ unsigned pte_size = 0;
+
+ spin_lock(&vcpu->kvm->mmu_lock);
+
+ for_each_shadow_entry(vcpu, gva, iterator) {
+ level = iterator.level;
+ sptep = iterator.sptep;
+
+ if (level == PT_PAGE_TABLE_LEVEL ||
+ ((level == PT_DIRECTORY_LEVEL && is_large_pte(*sptep))) ||
+ ((level == PT_PDPE_LEVEL && is_large_pte(*sptep)))) {
+ struct kvm_mmu_page *sp = page_header(__pa(sptep));
+ int offset = 0;
+
+ if (!sp->role.cr4_pae)
+ offset = sp->role.quadrant << PT64_LEVEL_BITS;;
+ pte_size = sp->role.cr4_pae ? 8 : 4;
+ pte_gpa = (sp->gfn << PAGE_SHIFT);
+ pte_gpa += (sptep - sp->spt + offset) * pte_size;
+
+ if (is_shadow_present_pte(*sptep)) {
+ rmap_remove(vcpu->kvm, sptep);
+ if (is_large_pte(*sptep))
+ --vcpu->kvm->stat.lpages;
+ need_flush = 1;
+ }
+ __set_spte(sptep, shadow_trap_nonpresent_pte);
+ break;
+ }
+
+ if (!is_shadow_present_pte(*sptep))
+ break;
+ }
+
+ if (need_flush)
+ kvm_flush_remote_tlbs(vcpu->kvm);
+
+ atomic_inc(&vcpu->kvm->arch.invlpg_counter);
+
+ spin_unlock(&vcpu->kvm->mmu_lock);
+
+ if (pte_gpa == -1)
+ return;
+
+ if (mmu_topup_memory_caches(vcpu))
+ return;
+ kvm_mmu_pte_write(vcpu, pte_gpa, NULL, pte_size, 0);
+}
+
#define PTTYPE 64
#include "paging_tmpl.h"
#undef PTTYPE
@@ -2335,7 +2391,7 @@ static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
context->gva_to_gpa = paging64_gva_to_gpa;
context->prefetch_page = paging64_prefetch_page;
context->sync_page = paging64_sync_page;
- context->invlpg = paging64_invlpg;
+ context->invlpg = paging_invlpg;
context->free = paging_free;
context->root_level = level;
context->shadow_root_level = level;
@@ -2360,7 +2416,7 @@ static int paging32_init_context(struct kvm_vcpu *vcpu)
context->free = paging_free;
context->prefetch_page = paging32_prefetch_page;
context->sync_page = paging32_sync_page;
- context->invlpg = paging32_invlpg;
+ context->invlpg = paging_invlpg;
context->root_level = PT32_ROOT_LEVEL;
context->shadow_root_level = PT32E_ROOT_LEVEL;
context->root_hpa = INVALID_PAGE;
@@ -460,62 +460,6 @@ out_unlock:
return 0;
}
-static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
-{
- struct kvm_shadow_walk_iterator iterator;
- gpa_t pte_gpa = -1;
- int level;
- u64 *sptep;
- int need_flush = 0;
-
- spin_lock(&vcpu->kvm->mmu_lock);
-
- for_each_shadow_entry(vcpu, gva, iterator) {
- level = iterator.level;
- sptep = iterator.sptep;
-
- if (level == PT_PAGE_TABLE_LEVEL ||
- ((level == PT_DIRECTORY_LEVEL && is_large_pte(*sptep))) ||
- ((level == PT_PDPE_LEVEL && is_large_pte(*sptep)))) {
- struct kvm_mmu_page *sp = page_header(__pa(sptep));
- int offset = 0;
-
- if (PTTYPE == 32)
- offset = sp->role.quadrant << PT64_LEVEL_BITS;;
-
- pte_gpa = (sp->gfn << PAGE_SHIFT);
- pte_gpa += (sptep - sp->spt + offset) *
- sizeof(pt_element_t);
-
- if (is_shadow_present_pte(*sptep)) {
- rmap_remove(vcpu->kvm, sptep);
- if (is_large_pte(*sptep))
- --vcpu->kvm->stat.lpages;
- need_flush = 1;
- }
- __set_spte(sptep, shadow_trap_nonpresent_pte);
- break;
- }
-
- if (!is_shadow_present_pte(*sptep))
- break;
- }
-
- if (need_flush)
- kvm_flush_remote_tlbs(vcpu->kvm);
-
- atomic_inc(&vcpu->kvm->arch.invlpg_counter);
-
- spin_unlock(&vcpu->kvm->mmu_lock);
-
- if (pte_gpa == -1)
- return;
-
- if (mmu_topup_memory_caches(vcpu))
- return;
- kvm_mmu_pte_write(vcpu, pte_gpa, NULL, sizeof(pt_element_t), 0);
-}
-
static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access,
u32 *error)
{