@@ -1553,7 +1553,7 @@ static void kvm_copy_memslots_arch(struct kvm_memslots *to,
static int kvm_set_memslot(struct kvm *kvm,
const struct kvm_userspace_memory_region *mem,
- struct kvm_memory_slot *new, int as_id,
+ struct kvm_memory_slot *new,
enum kvm_mr_change change)
{
struct kvm_memory_slot *slot, old;
@@ -1576,7 +1576,7 @@ static int kvm_set_memslot(struct kvm *kvm,
*/
mutex_lock(&kvm->slots_arch_lock);
- slots = kvm_dup_memslots(__kvm_memslots(kvm, as_id), change);
+ slots = kvm_dup_memslots(__kvm_memslots(kvm, new->as_id), change);
if (!slots) {
mutex_unlock(&kvm->slots_arch_lock);
return -ENOMEM;
@@ -1596,7 +1596,7 @@ static int kvm_set_memslot(struct kvm *kvm,
* dropped by update_memslots anyway. We'll also revert to the
* old memslots if preparing the new memory region fails.
*/
- slots = install_new_memslots(kvm, as_id, slots);
+ slots = install_new_memslots(kvm, new->as_id, slots);
/* From this point no new shadow pages pointing to a deleted,
* or moved, memslot will be created.
@@ -1618,7 +1618,7 @@ static int kvm_set_memslot(struct kvm *kvm,
* to retrieve memslots *after* acquiring slots_arch_lock, thus
* the active memslots are guaranteed to be fresh.
*/
- kvm_copy_memslots_arch(slots, __kvm_memslots(kvm, as_id));
+ kvm_copy_memslots_arch(slots, __kvm_memslots(kvm, new->as_id));
}
/*
@@ -1635,7 +1635,7 @@ static int kvm_set_memslot(struct kvm *kvm,
WARN_ON_ONCE(change != KVM_MR_CREATE);
memset(&old, 0, sizeof(old));
old.id = new->id;
- old.as_id = as_id;
+ old.as_id = new->as_id;
}
/* Copy the arch-specific data, again after (re)acquiring slots_arch_lock. */
@@ -1646,7 +1646,7 @@ static int kvm_set_memslot(struct kvm *kvm,
goto out_slots;
update_memslots(slots, new, change);
- slots = install_new_memslots(kvm, as_id, slots);
+ slots = install_new_memslots(kvm, new->as_id, slots);
/*
* Update the total number of memslot pages before calling the arch
@@ -1668,7 +1668,7 @@ static int kvm_set_memslot(struct kvm *kvm,
out_slots:
if (change == KVM_MR_DELETE || change == KVM_MR_MOVE)
- slots = install_new_memslots(kvm, as_id, slots);
+ slots = install_new_memslots(kvm, new->as_id, slots);
else
mutex_unlock(&kvm->slots_arch_lock);
kvfree(slots);
@@ -1740,13 +1740,9 @@ int __kvm_set_memory_region(struct kvm *kvm,
memset(&new, 0, sizeof(new));
new.id = id;
- /*
- * This is only for debugging purpose; it should never be
- * referenced for a removed memslot.
- */
new.as_id = as_id;
- return kvm_set_memslot(kvm, mem, &new, as_id, KVM_MR_DELETE);
+ return kvm_set_memslot(kvm, mem, &new, KVM_MR_DELETE);
}
new.as_id = as_id;
@@ -1809,7 +1805,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
bitmap_set(new.dirty_bitmap, 0, new.npages);
}
- r = kvm_set_memslot(kvm, mem, &new, as_id, change);
+ r = kvm_set_memslot(kvm, mem, &new, change);
if (r)
goto out_bitmap;