@@ -4520,6 +4520,36 @@ slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot,
PT_PAGE_TABLE_LEVEL, lock_flush_tlb);
}
+void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
+{
+ struct kvm_memslots *slots;
+ struct kvm_memory_slot *memslot;
+ bool flush = false;
+
+ slots = kvm_memslots(kvm);
+
+ spin_lock(&kvm->mmu_lock);
+ kvm_for_each_memslot(memslot, slots) {
+ gfn_t start, end;
+
+ start = max(gfn_start, memslot->base_gfn);
+ end = min(gfn_end, memslot->base_gfn + memslot->npages);
+ if (start >= end)
+ continue;
+
+ flush |= slot_handle_level_range(kvm, memslot, kvm_zap_rmapp,
+ PT_PAGE_TABLE_LEVEL,
+ PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES - 1,
+ start, end - 1, true);
+ }
+
+ if (flush)
+ kvm_flush_remote_tlbs(kvm);
+ spin_unlock(&kvm->mmu_lock);
+
+}
+EXPORT_SYMBOL_GPL(kvm_zap_gfn_range);
+
static bool slot_rmap_write_protect(struct kvm *kvm, unsigned long *rmapp)
{
return __rmap_write_protect(kvm, rmapp, false);
@@ -170,4 +170,5 @@ static inline bool permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
}
void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm);
+void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end);
#endif