@@ -512,7 +512,7 @@ static bool mmu_spte_update(u64 *sptep, u64 new_spte)
* state bits, it is used to clear the last level sptep.
* Returns the old PTE.
*/
-static u64 mmu_spte_clear_track_bits(struct kvm *kvm, u64 *sptep)
+u64 mmu_spte_clear_track_bits(struct kvm *kvm, u64 *sptep)
{
kvm_pfn_t pfn;
u64 old_spte = *sptep;
@@ -855,42 +855,6 @@ gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn,
return slot;
}
-static void kvm_zap_one_rmap_spte(struct kvm *kvm,
- struct kvm_rmap_head *rmap_head, u64 *sptep)
-{
- mmu_spte_clear_track_bits(kvm, sptep);
- pte_list_remove(sptep, rmap_head);
-}
-
-/* Return true if at least one SPTE was zapped, false otherwise */
-static bool kvm_zap_all_rmap_sptes(struct kvm *kvm,
- struct kvm_rmap_head *rmap_head)
-{
- struct pte_list_desc *desc, *next;
- int i;
-
- if (!rmap_head->val)
- return false;
-
- if (!(rmap_head->val & 1)) {
- mmu_spte_clear_track_bits(kvm, (u64 *)rmap_head->val);
- goto out;
- }
-
- desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
-
- for (; desc; desc = next) {
- for (i = 0; i < desc->spte_count; i++)
- mmu_spte_clear_track_bits(kvm, desc->sptes[i]);
- next = desc->more;
- free_pte_list_desc(desc);
- }
-out:
- /* rmap_head is meaningless now, remember to reset it */
- rmap_head->val = 0;
- return true;
-}
-
static void drop_spte(struct kvm *kvm, u64 *sptep)
{
u64 old_spte = mmu_spte_clear_track_bits(kvm, sptep);
@@ -1145,19 +1109,6 @@ static bool kvm_vcpu_write_protect_gfn(struct kvm_vcpu *vcpu, u64 gfn)
return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn, PG_LEVEL_4K);
}
-static bool __kvm_zap_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
- const struct kvm_memory_slot *slot)
-{
- return kvm_zap_all_rmap_sptes(kvm, rmap_head);
-}
-
-static bool kvm_zap_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
- struct kvm_memory_slot *slot, gfn_t gfn, int level,
- pte_t unused)
-{
- return __kvm_zap_rmap(kvm, rmap_head, slot);
-}
-
static bool kvm_set_pte_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
struct kvm_memory_slot *slot, gfn_t gfn, int level,
pte_t pte)
@@ -319,4 +319,5 @@ void track_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp);
void untrack_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp);
gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index);
+u64 mmu_spte_clear_track_bits(struct kvm *kvm, u64 *sptep);
#endif /* __KVM_X86_MMU_INTERNAL_H */
@@ -56,7 +56,7 @@ int pte_list_add(struct kvm_mmu_memory_cache *cache, u64 *spte,
return count;
}
-void free_pte_list_desc(struct pte_list_desc *pte_list_desc)
+static void free_pte_list_desc(struct pte_list_desc *pte_list_desc)
{
kmem_cache_free(pte_list_desc_cache, pte_list_desc);
}
@@ -283,3 +283,51 @@ void slot_rmap_walk_next(struct slot_rmap_walk_iterator *iterator)
rmap_walk_init_level(iterator, iterator->level);
}
+
+void kvm_zap_one_rmap_spte(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
+ u64 *sptep)
+{
+ mmu_spte_clear_track_bits(kvm, sptep);
+ pte_list_remove(sptep, rmap_head);
+}
+
+/* Return true if at least one SPTE was zapped, false otherwise */
+bool kvm_zap_all_rmap_sptes(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
+{
+ struct pte_list_desc *desc, *next;
+ int i;
+
+ if (!rmap_head->val)
+ return false;
+
+ if (!(rmap_head->val & 1)) {
+ mmu_spte_clear_track_bits(kvm, (u64 *)rmap_head->val);
+ goto out;
+ }
+
+ desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
+
+ for (; desc; desc = next) {
+ for (i = 0; i < desc->spte_count; i++)
+ mmu_spte_clear_track_bits(kvm, desc->sptes[i]);
+ next = desc->more;
+ free_pte_list_desc(desc);
+ }
+out:
+ /* rmap_head is meaningless now, remember to reset it */
+ rmap_head->val = 0;
+ return true;
+}
+
+bool __kvm_zap_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
+ const struct kvm_memory_slot *slot)
+{
+ return kvm_zap_all_rmap_sptes(kvm, rmap_head);
+}
+
+bool kvm_zap_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
+ struct kvm_memory_slot *slot, gfn_t gfn, int level,
+ pte_t unused)
+{
+ return __kvm_zap_rmap(kvm, rmap_head, slot);
+}
@@ -27,7 +27,6 @@ static struct kmem_cache *pte_list_desc_cache;
int pte_list_add(struct kvm_mmu_memory_cache *cache, u64 *spte,
struct kvm_rmap_head *rmap_head);
-void free_pte_list_desc(struct pte_list_desc *pte_list_desc);
void pte_list_remove(u64 *spte, struct kvm_rmap_head *rmap_head);
unsigned int pte_list_count(struct kvm_rmap_head *rmap_head);
@@ -90,4 +89,12 @@ typedef bool (*rmap_handler_t)(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
struct kvm_memory_slot *slot, gfn_t gfn,
int level, pte_t pte);
+void kvm_zap_one_rmap_spte(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
+ u64 *sptep);
+bool kvm_zap_all_rmap_sptes(struct kvm *kvm, struct kvm_rmap_head *rmap_head);
+bool __kvm_zap_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
+ const struct kvm_memory_slot *slot);
+bool kvm_zap_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
+ struct kvm_memory_slot *slot, gfn_t gfn, int level,
+ pte_t unused);
#endif /* __KVM_X86_MMU_RMAP_H */
Move the various rmap zap functions to rmap.c. These functions are less "pure" rmap operations in that they also contain some SPTE manipulation, however they're mostly about rmap / pte list manipulation. No functional change intended. Signed-off-by: Ben Gardon <bgardon@google.com> --- arch/x86/kvm/mmu/mmu.c | 51 +-------------------------------- arch/x86/kvm/mmu/mmu_internal.h | 1 + arch/x86/kvm/mmu/rmap.c | 50 +++++++++++++++++++++++++++++++- arch/x86/kvm/mmu/rmap.h | 9 +++++- 4 files changed, 59 insertions(+), 52 deletions(-)