@@ -230,8 +230,8 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot,
- struct kvm_memory_slot old,
- struct kvm_userspace_memory_region *mem)
+ struct kvm_userspace_memory_region *mem,
+ enum kvm_mr_change change)
{
return 0;
}
@@ -1560,8 +1560,8 @@ int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot,
- struct kvm_memory_slot old,
- struct kvm_userspace_memory_region *mem)
+ struct kvm_userspace_memory_region *mem,
+ enum kvm_mr_change change)
{
unsigned long i;
unsigned long pfn;
@@ -412,8 +412,8 @@ int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot,
- struct kvm_memory_slot old,
- struct kvm_userspace_memory_region *mem)
+ struct kvm_userspace_memory_region *mem,
+ enum kvm_mr_change change)
{
return kvmppc_core_prepare_memory_region(kvm, memslot, mem);
}
@@ -974,8 +974,8 @@ int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
/* Section: memory related */
int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot,
- struct kvm_memory_slot old,
- struct kvm_userspace_memory_region *mem)
+ struct kvm_userspace_memory_region *mem,
+ enum kvm_mr_change change)
{
/* A few sanity checks. We can have exactly one memory slot which has
to start at guest virtual zero and which has to be located at a
@@ -6906,23 +6906,21 @@ out_free:
int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot,
- struct kvm_memory_slot old,
- struct kvm_userspace_memory_region *mem)
+ struct kvm_userspace_memory_region *mem,
+ enum kvm_mr_change change)
{
- int npages = memslot->npages;
-
/*
* Only private memory slots need to be mapped here since
* KVM_SET_MEMORY_REGION ioctl is no longer supported.
*/
- if ((memslot->id >= KVM_USER_MEM_SLOTS) && npages && !old.npages) {
+ if ((memslot->id >= KVM_USER_MEM_SLOTS) && (change == KVM_MR_CREATE)) {
unsigned long userspace_addr;
/*
* MAP_SHARED to prevent internal slot pages from being moved
* by fork()/COW.
*/
- userspace_addr = vm_mmap(NULL, 0, npages * PAGE_SIZE,
+ userspace_addr = vm_mmap(NULL, 0, memslot->npages * PAGE_SIZE,
PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_ANONYMOUS, 0);
@@ -479,8 +479,8 @@ void kvm_arch_free_memslot(struct kvm_memory_slot *free,
int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages);
int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot,
- struct kvm_memory_slot old,
- struct kvm_userspace_memory_region *mem);
+ struct kvm_userspace_memory_region *mem,
+ enum kvm_mr_change change);
void kvm_arch_commit_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
struct kvm_memory_slot old);
@@ -856,7 +856,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
slots = old_memslots;
}
- r = kvm_arch_prepare_memory_region(kvm, &new, old, mem);
+ r = kvm_arch_prepare_memory_region(kvm, &new, mem, change);
if (r)
goto out_slots;
This patch drops the parameter old, a copy of the old memory slot, and adds a new parameter named change to know the change being requested. This not only cleans up the code but also removes extra copying of the memory slot structure. Signed-off-by: Takuya Yoshikawa <yoshikawa_takuya_b1@lab.ntt.co.jp> --- arch/arm/kvm/arm.c | 4 ++-- arch/ia64/kvm/kvm-ia64.c | 4 ++-- arch/powerpc/kvm/powerpc.c | 4 ++-- arch/s390/kvm/kvm-s390.c | 4 ++-- arch/x86/kvm/x86.c | 10 ++++------ include/linux/kvm_host.h | 4 ++-- virt/kvm/kvm_main.c | 2 +- 7 files changed, 15 insertions(+), 17 deletions(-)