@@ -1486,7 +1486,8 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
}
int kvm_arch_prepare_memory_region(struct kvm *kvm,
- struct kvm_memory_slot *memslot,
+ const struct kvm_memory_slot *old,
+ struct kvm_memory_slot *new,
const struct kvm_userspace_memory_region *mem,
enum kvm_mr_change change)
{
@@ -1502,7 +1503,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
* Prevent userspace from creating a memory region outside of the IPA
* space addressable by the KVM guest IPA space.
*/
- if ((memslot->base_gfn + memslot->npages) > (kvm_phys_size(kvm) >> PAGE_SHIFT))
+ if ((new->base_gfn + new->npages) > (kvm_phys_size(kvm) >> PAGE_SHIFT))
return -EFAULT;
mmap_read_lock(current->mm);
@@ -1534,7 +1535,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
if (vma->vm_flags & VM_PFNMAP) {
/* IO region dirty page logging not allowed */
- if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) {
+ if (new->flags & KVM_MEM_LOG_DIRTY_PAGES) {
ret = -EINVAL;
break;
}
@@ -233,7 +233,8 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
}
int kvm_arch_prepare_memory_region(struct kvm *kvm,
- struct kvm_memory_slot *memslot,
+ const struct kvm_memory_slot *old,
+ struct kvm_memory_slot *new,
const struct kvm_userspace_memory_region *mem,
enum kvm_mr_change change)
{
@@ -706,11 +706,12 @@ void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
}
int kvm_arch_prepare_memory_region(struct kvm *kvm,
- struct kvm_memory_slot *memslot,
+ const struct kvm_memory_slot *old,
+ struct kvm_memory_slot *new,
const struct kvm_userspace_memory_region *mem,
enum kvm_mr_change change)
{
- return kvmppc_core_prepare_memory_region(kvm, memslot, mem, change);
+ return kvmppc_core_prepare_memory_region(kvm, new, mem, change);
}
void kvm_arch_commit_memory_region(struct kvm *kvm,
@@ -5016,7 +5016,8 @@ vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
/* Section: memory related */
int kvm_arch_prepare_memory_region(struct kvm *kvm,
- struct kvm_memory_slot *memslot,
+ const struct kvm_memory_slot *old,
+ struct kvm_memory_slot *new,
const struct kvm_userspace_memory_region *mem,
enum kvm_mr_change change)
{
@@ -11506,12 +11506,13 @@ void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
}
int kvm_arch_prepare_memory_region(struct kvm *kvm,
- struct kvm_memory_slot *memslot,
+ const struct kvm_memory_slot *old,
+ struct kvm_memory_slot *new,
const struct kvm_userspace_memory_region *mem,
enum kvm_mr_change change)
{
if (change == KVM_MR_CREATE || change == KVM_MR_MOVE)
- return kvm_alloc_memslot_metadata(kvm, memslot,
+ return kvm_alloc_memslot_metadata(kvm, new,
mem->memory_size >> PAGE_SHIFT);
return 0;
}
@@ -832,7 +832,8 @@ int __kvm_set_memory_region(struct kvm *kvm,
void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot);
void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen);
int kvm_arch_prepare_memory_region(struct kvm *kvm,
- struct kvm_memory_slot *memslot,
+ const struct kvm_memory_slot *old,
+ struct kvm_memory_slot *new,
const struct kvm_userspace_memory_region *mem,
enum kvm_mr_change change);
void kvm_arch_commit_memory_region(struct kvm *kvm,
@@ -1571,7 +1571,7 @@ static int kvm_set_memslot(struct kvm *kvm,
kvm_copy_memslots(slots, __kvm_memslots(kvm, as_id));
}
- r = kvm_arch_prepare_memory_region(kvm, new, mem, change);
+ r = kvm_arch_prepare_memory_region(kvm, old, new, mem, change);
if (r)
goto out_slots;