@@ -3154,8 +3154,8 @@ static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa,
sp = to_shadow_page(*root_hpa & PT64_BASE_ADDR_MASK);
- if (is_tdp_mmu_page(sp) && kvm_tdp_mmu_put_root(kvm, sp))
- kvm_tdp_mmu_free_root(kvm, sp);
+ if (is_tdp_mmu_page(sp))
+ kvm_tdp_mmu_put_root(kvm, sp);
else if (!--sp->root_count && sp->role.invalid)
kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
@@ -41,10 +41,31 @@ void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
rcu_barrier();
}
-static void tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root)
+static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
+ gfn_t start, gfn_t end, bool can_yield);
+
+static void tdp_mmu_free_sp(struct kvm_mmu_page *sp)
{
- if (kvm_tdp_mmu_put_root(kvm, root))
- kvm_tdp_mmu_free_root(kvm, root);
+ free_page((unsigned long)sp->spt);
+ kmem_cache_free(mmu_page_header_cache, sp);
+}
+
+void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root)
+{
+ gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
+
+ lockdep_assert_held_write(&kvm->mmu_lock);
+
+ if (--root->root_count)
+ return;
+
+ WARN_ON(!root->tdp_mmu_page);
+
+ list_del(&root->link);
+
+ zap_gfn_range(kvm, root, 0, max_gfn, false);
+
+ tdp_mmu_free_sp(root);
}
static inline bool tdp_mmu_next_root_valid(struct kvm *kvm,
@@ -66,7 +87,7 @@ static inline struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
struct kvm_mmu_page *next_root;
next_root = list_next_entry(root, link);
- tdp_mmu_put_root(kvm, root);
+ kvm_tdp_mmu_put_root(kvm, root);
return next_root;
}
@@ -85,31 +106,6 @@ static inline struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
#define for_each_tdp_mmu_root(_kvm, _root) \
list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link)
-static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
- gfn_t start, gfn_t end, bool can_yield);
-
-static void tdp_mmu_free_sp(struct kvm_mmu_page *sp)
-{
- free_page((unsigned long)sp->spt);
- kmem_cache_free(mmu_page_header_cache, sp);
-}
-
-void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root)
-{
- gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
-
- lockdep_assert_held_write(&kvm->mmu_lock);
-
- WARN_ON(root->root_count);
- WARN_ON(!root->tdp_mmu_page);
-
- list_del(&root->link);
-
- zap_gfn_range(kvm, root, 0, max_gfn, false);
-
- tdp_mmu_free_sp(root);
-}
-
static union kvm_mmu_page_role page_role_for_level(struct kvm_vcpu *vcpu,
int level)
{
@@ -6,7 +6,6 @@
#include <linux/kvm_host.h>
hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu);
-void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root);
static inline void kvm_tdp_mmu_get_root(struct kvm *kvm,
struct kvm_mmu_page *root)
@@ -17,14 +16,7 @@ static inline void kvm_tdp_mmu_get_root(struct kvm *kvm,
++root->root_count;
}
-static inline bool kvm_tdp_mmu_put_root(struct kvm *kvm,
- struct kvm_mmu_page *root)
-{
- lockdep_assert_held(&kvm->mmu_lock);
- --root->root_count;
-
- return !root->root_count;
-}
+void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root);
bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end);
void kvm_tdp_mmu_zap_all(struct kvm *kvm);
kvm_tdp_mmu_put_root and kvm_tdp_mmu_free_root are always called together, so merge the functions to simplify TDP MMU root refcounting / freeing. Signed-off-by: Ben Gardon <bgardon@google.com> --- arch/x86/kvm/mmu/mmu.c | 4 +-- arch/x86/kvm/mmu/tdp_mmu.c | 54 ++++++++++++++++++-------------------- arch/x86/kvm/mmu/tdp_mmu.h | 10 +------ 3 files changed, 28 insertions(+), 40 deletions(-)