@@ -4377,7 +4377,7 @@ static int kvm_vm_ioctl_get_dirty_log_hv(struct kvm *kvm,
slots = kvm_memslots(kvm);
memslot = id_to_memslot(slots, log->slot);
r = -ENOENT;
- if (!memslot->dirty_bitmap)
+ if (!memslot || !memslot->dirty_bitmap)
goto out;
/*
@@ -9320,9 +9320,9 @@ void kvm_arch_sync_events(struct kvm *kvm)
int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
{
int i, r;
- unsigned long hva;
+ unsigned long hva, uninitialized_var(old_npages);
struct kvm_memslots *slots = kvm_memslots(kvm);
- struct kvm_memory_slot *slot, old;
+ struct kvm_memory_slot *slot;
/* Called with kvm->slots_lock held. */
if (WARN_ON(id >= KVM_MEM_SLOTS_NUM))
@@ -9330,7 +9330,7 @@ int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
slot = id_to_memslot(slots, id);
if (size) {
- if (slot->npages)
+ if (slot && slot->npages)
return -EEXIST;
/*
@@ -9342,13 +9342,13 @@ int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
if (IS_ERR((void *)hva))
return PTR_ERR((void *)hva);
} else {
- if (!slot->npages)
+ if (!slot || !slot->npages)
return 0;
- hva = 0;
+ hva = slot->userspace_addr;
+ old_npages = slot->npages;
}
- old = *slot;
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
struct kvm_userspace_memory_region m;
@@ -9363,7 +9363,7 @@ int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
}
if (!size)
- vm_munmap(old.userspace_addr, old.npages * PAGE_SIZE);
+ vm_munmap(hva, old_npages * PAGE_SIZE);
return 0;
}
@@ -580,10 +580,11 @@ static inline int kvm_vcpu_get_idx(struct kvm_vcpu *vcpu)
BUG();
}
-#define kvm_for_each_memslot(memslot, slots) \
- for (memslot = &slots->memslots[0]; \
- memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\
- memslot++)
+#define kvm_for_each_memslot(memslot, slots) \
+ for (memslot = &slots->memslots[0]; \
+ memslot < slots->memslots + slots->used_slots; memslot++) \
+ if (WARN_ON_ONCE(!memslot->npages)) { \
+ } else
int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
@@ -643,12 +644,15 @@ static inline struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu)
return __kvm_memslots(vcpu->kvm, as_id);
}
-static inline struct kvm_memory_slot *
-id_to_memslot(struct kvm_memslots *slots, int id)
+static inline
+struct kvm_memory_slot *id_to_memslot(struct kvm_memslots *slots, int id)
{
int index = slots->id_to_index[id];
struct kvm_memory_slot *slot;
+ if (index < 0)
+ return NULL;
+
slot = &slots->memslots[index];
WARN_ON(slot->id != id);
@@ -993,6 +997,8 @@ bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args);
* used in non-modular code in arch/powerpc/kvm/book3s_hv_rm_mmu.c.
* gfn_to_memslot() itself isn't here as an inline because that would
* bloat other code too much.
+ *
+ * IMPORTANT: Slots are sorted from highest GFN to lowest GFN!
*/
static inline struct kvm_memory_slot *
search_memslots(struct kvm_memslots *slots, gfn_t gfn)
@@ -1536,8 +1536,13 @@ void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot)
{
struct kvm_memslots *slots = kvm_memslots(kvm);
struct kvm_memory_slot *memslot = id_to_memslot(slots, slot);
- phys_addr_t start = memslot->base_gfn << PAGE_SHIFT;
- phys_addr_t end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;
+ phys_addr_t start, end;
+
+ if (WARN_ON_ONCE(!memslot))
+ return;
+
+ start = memslot->base_gfn << PAGE_SHIFT;
+ end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;
spin_lock(&kvm->mmu_lock);
stage2_wp_range(kvm, start, end);
@@ -535,7 +535,7 @@ static struct kvm_memslots *kvm_alloc_memslots(void)
return NULL;
for (i = 0; i < KVM_MEM_SLOTS_NUM; i++)
- slots->id_to_index[i] = slots->memslots[i].id = i;
+ slots->id_to_index[i] = slots->memslots[i].id = -1;
return slots;
}
@@ -793,64 +793,94 @@ static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot)
return 0;
}
+static inline void kvm_shift_memslots_deleted(struct kvm_memslots *slots,
+ struct kvm_memory_slot *new)
+{
+ struct kvm_memory_slot *mslots = slots->memslots;
+ int i;
+
+ if (WARN_ON(slots->id_to_index[new->id] == -1))
+ return;
+
+ slots->used_slots--;
+
+ for (i = slots->id_to_index[new->id]; i < slots->used_slots; i++) {
+ mslots[i] = mslots[i + 1];
+ slots->id_to_index[mslots[i].id] = i;
+ }
+ mslots[i] = *new;
+ slots->id_to_index[new->id] = -1;
+}
+
+static inline int kvm_shift_memslots_forward(struct kvm_memslots *slots,
+ struct kvm_memory_slot *new)
+{
+ struct kvm_memory_slot *mslots = slots->memslots;
+ int i;
+
+ if (WARN_ON_ONCE(slots->id_to_index[new->id] == -1) ||
+ WARN_ON_ONCE(!slots->used_slots))
+ return -1;
+
+ for (i = slots->id_to_index[new->id]; i < slots->used_slots - 1; i++) {
+ if (new->base_gfn > mslots[i + 1].base_gfn)
+ break;
+
+ WARN_ON_ONCE(new->base_gfn == mslots[i + 1].base_gfn);
+
+ /* Shift the next memslot forward one and update its index. */
+ mslots[i] = mslots[i + 1];
+ slots->id_to_index[mslots[i].id] = i;
+ }
+ return i;
+}
+
+static inline int kvm_shift_memslots_back(struct kvm_memslots *slots,
+ struct kvm_memory_slot *new,
+ int start)
+{
+ struct kvm_memory_slot *mslots = slots->memslots;
+ int i;
+
+ for (i = start; i > 0; i--) {
+ if (new->base_gfn < mslots[i - 1].base_gfn)
+ break;
+
+ WARN_ON_ONCE(new->base_gfn == mslots[i - 1].base_gfn);
+
+ /* Shift the next memslot back one and update its index. */
+ mslots[i] = mslots[i - 1];
+ slots->id_to_index[mslots[i].id] = i;
+ }
+ return i;
+}
+
/*
* Insert memslot and re-sort memslots based on their GFN,
* so binary search could be used to lookup GFN.
* Sorting algorithm takes advantage of having initially
* sorted array and known changed memslot position.
+ *
+ * IMPORTANT: Slots are sorted from highest GFN to lowest GFN!
*/
static void update_memslots(struct kvm_memslots *slots,
struct kvm_memory_slot *new,
enum kvm_mr_change change)
{
- int id = new->id;
- int i = slots->id_to_index[id];
- struct kvm_memory_slot *mslots = slots->memslots;
-
- WARN_ON(mslots[i].id != id);
- switch (change) {
- case KVM_MR_CREATE:
- slots->used_slots++;
- WARN_ON(mslots[i].npages || !new->npages);
- break;
- case KVM_MR_DELETE:
- slots->used_slots--;
- WARN_ON(new->npages || !mslots[i].npages);
- break;
- default:
- break;
- }
-
- while (i < KVM_MEM_SLOTS_NUM - 1 &&
- new->base_gfn <= mslots[i + 1].base_gfn) {
- if (!mslots[i + 1].npages)
- break;
- mslots[i] = mslots[i + 1];
- slots->id_to_index[mslots[i].id] = i;
- i++;
+ int i;
+
+ if (change == KVM_MR_DELETE) {
+ kvm_shift_memslots_deleted(slots, new);
+ } else {
+ if (change == KVM_MR_CREATE)
+ i = slots->used_slots++;
+ else
+ i = kvm_shift_memslots_forward(slots, new);
+ i = kvm_shift_memslots_back(slots, new, i);
+
+ slots->memslots[i] = *new;
+ slots->id_to_index[new->id] = i;
}
-
- /*
- * The ">=" is needed when creating a slot with base_gfn == 0,
- * so that it moves before all those with base_gfn == npages == 0.
- *
- * On the other hand, if new->npages is zero, the above loop has
- * already left i pointing to the beginning of the empty part of
- * mslots, and the ">=" would move the hole backwards in this
- * case---which is wrong. So skip the loop when deleting a slot.
- */
- if (new->npages) {
- while (i > 0 &&
- new->base_gfn >= mslots[i - 1].base_gfn) {
- mslots[i] = mslots[i - 1];
- slots->id_to_index[mslots[i].id] = i;
- i--;
- }
- } else
- WARN_ON_ONCE(i != slots->used_slots);
-
- mslots[i] = *new;
- slots->id_to_index[mslots[i].id] = i;
}
static int check_memory_region_flags(const struct kvm_userspace_memory_region *mem)
@@ -1029,8 +1059,13 @@ int __kvm_set_memory_region(struct kvm *kvm,
* when the memslots are re-sorted by update_memslots().
*/
tmp = id_to_memslot(__kvm_memslots(kvm, as_id), id);
- old = *tmp;
- tmp = NULL;
+ if (tmp) {
+ old = *tmp;
+ tmp = NULL;
+ } else {
+ memset(&old, 0, sizeof(old));
+ old.id = id;
+ }
if (!mem->memory_size)
return kvm_delete_memslot(kvm, mem, &old, as_id);
@@ -1148,7 +1183,7 @@ int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log,
slots = __kvm_memslots(kvm, as_id);
*memslot = id_to_memslot(slots, id);
- if (!(*memslot)->dirty_bitmap)
+ if (!(*memslot) || !(*memslot)->dirty_bitmap)
return -ENOENT;
kvm_arch_sync_dirty_log(kvm, *memslot);
@@ -1206,10 +1241,10 @@ static int kvm_get_dirty_log_protect(struct kvm *kvm, struct kvm_dirty_log *log)
slots = __kvm_memslots(kvm, as_id);
memslot = id_to_memslot(slots, id);
+ if (!memslot || !memslot->dirty_bitmap)
+ return -ENOENT;
dirty_bitmap = memslot->dirty_bitmap;
- if (!dirty_bitmap)
- return -ENOENT;
kvm_arch_sync_dirty_log(kvm, memslot);
@@ -1317,10 +1352,10 @@ static int kvm_clear_dirty_log_protect(struct kvm *kvm,
slots = __kvm_memslots(kvm, as_id);
memslot = id_to_memslot(slots, id);
+ if (!memslot || !memslot->dirty_bitmap)
+ return -ENOENT;
dirty_bitmap = memslot->dirty_bitmap;
- if (!dirty_bitmap)
- return -ENOENT;
n = ALIGN(log->num_pages, BITS_PER_LONG) / 8;
Refactor memslot handling to treat the number of used slots as the de facto size of the memslot array, e.g. return NULL from id_to_memslot() when an invalid index is provided instead of relying on npages==0 to detect an invalid memslot. Rework the sorting and walking of memslots in advance of dynamically sizing memslots to aid bisection and debug, e.g. with luck, a bug in the refactoring will bisect here and/or hit a WARN instead of randomly corrupting memory. Alternatively, a global null/invalid memslot could be returned, i.e. so callers of id_to_memslot() don't have to explicitly check for a NULL memslot, but that approach runs the risk of introducing difficult-to- debug issues, e.g. if the global null slot is modified. Constifying the return from id_to_memslot() to combat such issues is possible, but would require a massive refactoring of arch specific code and would still be susceptible to casting shenanigans. Add function comments to update_memslots() and search_memslots() to explicitly (and loudly) state how memslots are sorted. No functional change intended. Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> --- arch/powerpc/kvm/book3s_hv.c | 2 +- arch/x86/kvm/x86.c | 14 ++-- include/linux/kvm_host.h | 18 +++-- virt/kvm/arm/mmu.c | 9 ++- virt/kvm/kvm_main.c | 145 ++++++++++++++++++++++------------- 5 files changed, 117 insertions(+), 71 deletions(-)