@@ -431,11 +431,11 @@ static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu)
*/
struct kvm_memslots {
u64 generation;
- struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM];
/* The mapping table from slot id to the index in memslots[]. */
short id_to_index[KVM_MEM_SLOTS_NUM];
atomic_t lru_slot;
int used_slots;
+ struct kvm_memory_slot memslots[];
};
struct kvm {
@@ -566,7 +566,7 @@ static struct kvm_memslots *kvm_alloc_memslots(void)
return NULL;
for (i = 0; i < KVM_MEM_SLOTS_NUM; i++)
- slots->id_to_index[i] = slots->memslots[i].id = -1;
+ slots->id_to_index[i] = -1;
return slots;
}
@@ -1078,6 +1078,32 @@ static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
return old_memslots;
}
+/*
+ * Note, at a minimum, the current number of used slots must be allocated, even
+ * when deleting a memslot, as we need a complete duplicate of the memslots for
+ * use when invalidating a memslot prior to deleting/moving the memslot.
+ */
+static struct kvm_memslots *kvm_dup_memslots(struct kvm_memslots *old,
+ enum kvm_mr_change change)
+{
+ struct kvm_memslots *slots;
+ size_t old_size, new_size;
+
+ old_size = sizeof(struct kvm_memslots) +
+ (sizeof(struct kvm_memory_slot) * old->used_slots);
+
+ if (change == KVM_MR_CREATE)
+ new_size = old_size + sizeof(struct kvm_memory_slot);
+ else
+ new_size = old_size;
+
+ slots = kvzalloc(new_size, GFP_KERNEL_ACCOUNT);
+ if (likely(slots))
+ memcpy(slots, old, old_size);
+
+ return slots;
+}
+
static int kvm_set_memslot(struct kvm *kvm,
const struct kvm_userspace_memory_region *mem,
struct kvm_memory_slot *old,
@@ -1088,10 +1114,9 @@ static int kvm_set_memslot(struct kvm *kvm,
struct kvm_memslots *slots;
int r;
- slots = kvzalloc(sizeof(struct kvm_memslots), GFP_KERNEL_ACCOUNT);
+ slots = kvm_dup_memslots(__kvm_memslots(kvm, as_id), change);
if (!slots)
return -ENOMEM;
- memcpy(slots, __kvm_memslots(kvm, as_id), sizeof(struct kvm_memslots));
if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
/*