@@ -1680,11 +1680,8 @@ static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, long nr)
percpu_counter_add(&kvm_total_used_mmu_pages, nr);
}
-static void kvm_mmu_free_sp(struct kvm_mmu_page *sp)
+void kvm_mmu_free_sp(struct kvm_mmu_page *sp)
{
- MMU_WARN_ON(!is_empty_shadow_page(sp->spt));
- hlist_del(&sp->hash_link);
- list_del(&sp->link);
free_page((unsigned long)sp->spt);
if (!sp->role.direct)
free_page((unsigned long)sp->gfns);
@@ -2505,6 +2502,9 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
list_for_each_entry_safe(sp, nsp, invalid_list, link) {
WARN_ON(!sp->role.invalid || sp->root_count);
+ MMU_WARN_ON(!is_empty_shadow_page(sp->spt));
+ hlist_del(&sp->hash_link);
+ list_del(&sp->link);
kvm_mmu_free_sp(sp);
}
}
@@ -162,4 +162,6 @@ void unaccount_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp);
struct kvm_mmu_page *kvm_mmu_alloc_direct_sp_for_split(gfp_t gfp);
+void kvm_mmu_free_sp(struct kvm_mmu_page *sp);
+
#endif /* __KVM_X86_MMU_INTERNAL_H */
@@ -59,8 +59,7 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
static void tdp_mmu_free_sp(struct kvm_mmu_page *sp)
{
- free_page((unsigned long)sp->spt);
- kmem_cache_free(mmu_page_header_cache, sp);
+ kvm_mmu_free_sp(sp);
}
/*
Use a common function to free kvm_mmu_page structs in the TDP MMU and the shadow MMU. This reduces the amount of duplicate code and is needed in subsequent commits that allocate and free kvm_mmu_pages for eager page splitting. No functional change intended. Signed-off-by: David Matlack <dmatlack@google.com> --- arch/x86/kvm/mmu/mmu.c | 8 ++++---- arch/x86/kvm/mmu/mmu_internal.h | 2 ++ arch/x86/kvm/mmu/tdp_mmu.c | 3 +-- 3 files changed, 7 insertions(+), 6 deletions(-)