@@ -1124,6 +1124,12 @@ struct kvm_arch {
*/
spinlock_t tdp_mmu_pages_lock;
#endif /* CONFIG_X86_64 */
+
+ /*
+ * If set, rmaps have been allocated for all memslots and should be
+ * allocated for any newly created or modified memslots.
+ */
+ bool memslots_have_rmaps;
};
struct kvm_vm_stat {
@@ -5469,6 +5469,8 @@ void kvm_mmu_init_vm(struct kvm *kvm)
kvm_mmu_init_tdp_mmu(kvm);
+ kvm->arch.memslots_have_rmaps = true;
+
node->track_write = kvm_mmu_pte_write;
node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot;
kvm_page_track_register_notifier(kvm, node);
@@ -10935,7 +10935,8 @@ static int memslot_rmap_alloc(struct kvm_memory_slot *slot,
return 0;
}
-static int kvm_alloc_memslot_metadata(struct kvm_memory_slot *slot,
+static int kvm_alloc_memslot_metadata(struct kvm *kvm,
+ struct kvm_memory_slot *slot,
unsigned long npages)
{
int i;
@@ -10948,9 +10949,11 @@ static int kvm_alloc_memslot_metadata(struct kvm_memory_slot *slot,
*/
memset(&slot->arch, 0, sizeof(slot->arch));
- r = memslot_rmap_alloc(slot, npages);
- if (r)
- return r;
+ if (kvm->arch.memslots_have_rmaps) {
+ r = memslot_rmap_alloc(slot, npages);
+ if (r)
+ return r;
+ }
for (i = 1; i < KVM_NR_PAGE_SIZES; ++i) {
struct kvm_lpage_info *linfo;
@@ -11021,7 +11024,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
enum kvm_mr_change change)
{
if (change == KVM_MR_CREATE || change == KVM_MR_MOVE)
- return kvm_alloc_memslot_metadata(memslot,
+ return kvm_alloc_memslot_metadata(kvm, memslot,
mem->memory_size >> PAGE_SHIFT);
return 0;
}