@@ -11572,13 +11572,13 @@ void __user * __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa,
}
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
- struct kvm_userspace_memory_region m;
+ struct kvm_userspace_memory_region_ext m;
- m.slot = id | (i << 16);
- m.flags = 0;
- m.guest_phys_addr = gpa;
- m.userspace_addr = hva;
- m.memory_size = size;
+ m.region.slot = id | (i << 16);
+ m.region.flags = 0;
+ m.region.guest_phys_addr = gpa;
+ m.region.userspace_addr = hva;
+ m.region.memory_size = size;
r = __kvm_set_memory_region(kvm, &m);
if (r < 0)
return ERR_PTR_USR(r);
@@ -977,9 +977,9 @@ enum kvm_mr_change {
};
int kvm_set_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem);
+ const struct kvm_userspace_memory_region_ext *region_ext);
int __kvm_set_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem);
+ const struct kvm_userspace_memory_region_ext *region_ext);
void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot);
void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen);
int kvm_arch_prepare_memory_region(struct kvm *kvm,
@@ -1815,8 +1815,9 @@ static bool kvm_check_memslot_overlap(struct kvm_memslots *slots, int id,
* Must be called holding kvm->slots_lock for write.
*/
int __kvm_set_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem)
+ const struct kvm_userspace_memory_region_ext *region_ext)
{
+ const struct kvm_userspace_memory_region *mem = ®ion_ext->region;
struct kvm_memory_slot *old, *new;
struct kvm_memslots *slots;
enum kvm_mr_change change;
@@ -1919,24 +1920,24 @@ int __kvm_set_memory_region(struct kvm *kvm,
EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
int kvm_set_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem)
+ const struct kvm_userspace_memory_region_ext *region_ext)
{
int r;
mutex_lock(&kvm->slots_lock);
- r = __kvm_set_memory_region(kvm, mem);
+ r = __kvm_set_memory_region(kvm, region_ext);
mutex_unlock(&kvm->slots_lock);
return r;
}
EXPORT_SYMBOL_GPL(kvm_set_memory_region);
static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
- struct kvm_userspace_memory_region *mem)
+ struct kvm_userspace_memory_region_ext *region_ext)
{
- if ((u16)mem->slot >= KVM_USER_MEM_SLOTS)
+ if ((u16)region_ext->region.slot >= KVM_USER_MEM_SLOTS)
return -EINVAL;
- return kvm_set_memory_region(kvm, mem);
+ return kvm_set_memory_region(kvm, region_ext);
}
#ifndef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
@@ -4482,14 +4483,23 @@ static long kvm_vm_ioctl(struct file *filp,
break;
}
case KVM_SET_USER_MEMORY_REGION: {
- struct kvm_userspace_memory_region kvm_userspace_mem;
+ struct kvm_userspace_memory_region_ext region_ext;
r = -EFAULT;
- if (copy_from_user(&kvm_userspace_mem, argp,
- sizeof(kvm_userspace_mem)))
+ if (copy_from_user(®ion_ext, argp,
+ sizeof(struct kvm_userspace_memory_region)))
goto out;
+ if (region_ext.region.flags & KVM_MEM_PRIVATE) {
+ int offset = offsetof(
+ struct kvm_userspace_memory_region_ext,
+ private_offset);
+ if (copy_from_user(®ion_ext.private_offset,
+ argp + offset,
+ sizeof(region_ext) - offset))
+ goto out;
+ }
- r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem);
+ r = kvm_vm_ioctl_set_memory_region(kvm, ®ion_ext);
break;
}
case KVM_GET_DIRTY_LOG: {