Message ID | 20210701154105.23215-6-jgross@suse.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | x86/kvm: add boot parameters for max vcpu configs | expand |
On 01/07/21 17:41, Juergen Gross wrote: > { > - if (!has_vhe()) > + if (!has_vhe()) { > + kfree(kvm->vcpus); > kfree(kvm); > - else > + } else { > + vfree(kvm->vcpus); > vfree(kvm); > + } > } > > int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) > diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h > index 79138c91f83d..39cbc4b6bffb 100644 > --- a/arch/x86/include/asm/kvm_host.h > +++ b/arch/x86/include/asm/kvm_host.h > @@ -1440,10 +1440,7 @@ static inline void kvm_ops_static_call_update(void) > } > > #define __KVM_HAVE_ARCH_VM_ALLOC > -static inline struct kvm *kvm_arch_alloc_vm(void) > -{ > - return __vmalloc(kvm_x86_ops.vm_size, GFP_KERNEL_ACCOUNT | __GFP_ZERO); > -} > +struct kvm *kvm_arch_alloc_vm(void); > void kvm_arch_free_vm(struct kvm *kvm); > > #define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c > index 3af398ef1fc9..a9b0bb2221ea 100644 > --- a/arch/x86/kvm/x86.c > +++ b/arch/x86/kvm/x86.c > @@ -10741,9 +10741,28 @@ void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) > static_call(kvm_x86_sched_in)(vcpu, cpu); > } > > +struct kvm *kvm_arch_alloc_vm(void) > +{ > + struct kvm *kvm; > + > + kvm = __vmalloc(kvm_x86_ops.vm_size, GFP_KERNEL_ACCOUNT | __GFP_ZERO); > + if (!kvm) > + return NULL; > + > + kvm->vcpus = __vmalloc(KVM_MAX_VCPUS * sizeof(void *), > + GFP_KERNEL_ACCOUNT | __GFP_ZERO); > + if (!kvm->vcpus) { > + vfree(kvm); > + kvm = NULL; > + } > + Let's keep this cleaner: 1) use kvfree in the common version of kvm_arch_free_vm 2) split __KVM_HAVE_ARCH_VM_ALLOC and __KVM_HAVE_ARCH_VM_FREE (ARM does not need it once kvfree is used) 3) define a __kvm_arch_free_vm version that is defined even if !__KVM_HAVE_ARCH_VM_FREE, and which can be used on x86. Paolo
On 26.07.21 15:40, Paolo Bonzini wrote: > On 01/07/21 17:41, Juergen Gross wrote: >> { >> - if (!has_vhe()) >> + if (!has_vhe()) { >> + kfree(kvm->vcpus); >> kfree(kvm); >> - else >> + } else { >> + vfree(kvm->vcpus); >> vfree(kvm); >> + } >> } >> int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) >> diff --git a/arch/x86/include/asm/kvm_host.h >> b/arch/x86/include/asm/kvm_host.h >> index 79138c91f83d..39cbc4b6bffb 100644 >> --- a/arch/x86/include/asm/kvm_host.h >> +++ b/arch/x86/include/asm/kvm_host.h >> @@ -1440,10 +1440,7 @@ static inline void >> kvm_ops_static_call_update(void) >> } >> #define __KVM_HAVE_ARCH_VM_ALLOC >> -static inline struct kvm *kvm_arch_alloc_vm(void) >> -{ >> - return __vmalloc(kvm_x86_ops.vm_size, GFP_KERNEL_ACCOUNT | >> __GFP_ZERO); >> -} >> +struct kvm *kvm_arch_alloc_vm(void); >> void kvm_arch_free_vm(struct kvm *kvm); >> #define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB >> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c >> index 3af398ef1fc9..a9b0bb2221ea 100644 >> --- a/arch/x86/kvm/x86.c >> +++ b/arch/x86/kvm/x86.c >> @@ -10741,9 +10741,28 @@ void kvm_arch_sched_in(struct kvm_vcpu *vcpu, >> int cpu) >> static_call(kvm_x86_sched_in)(vcpu, cpu); >> } >> +struct kvm *kvm_arch_alloc_vm(void) >> +{ >> + struct kvm *kvm; >> + >> + kvm = __vmalloc(kvm_x86_ops.vm_size, GFP_KERNEL_ACCOUNT | >> __GFP_ZERO); >> + if (!kvm) >> + return NULL; >> + >> + kvm->vcpus = __vmalloc(KVM_MAX_VCPUS * sizeof(void *), >> + GFP_KERNEL_ACCOUNT | __GFP_ZERO); >> + if (!kvm->vcpus) { >> + vfree(kvm); >> + kvm = NULL; >> + } >> + > > Let's keep this cleaner: > > 1) use kvfree in the common version of kvm_arch_free_vm > > 2) split __KVM_HAVE_ARCH_VM_ALLOC and __KVM_HAVE_ARCH_VM_FREE (ARM does > not need it once kvfree is used) > > 3) define a __kvm_arch_free_vm version that is defined even if > !__KVM_HAVE_ARCH_VM_FREE, and which can be used on x86. Okay, will do so. Juergen
On 2021-07-26 14:46, Juergen Gross wrote: > On 26.07.21 15:40, Paolo Bonzini wrote: >> On 01/07/21 17:41, Juergen Gross wrote: >>> { >>> - if (!has_vhe()) >>> + if (!has_vhe()) { >>> + kfree(kvm->vcpus); >>> kfree(kvm); >>> - else >>> + } else { >>> + vfree(kvm->vcpus); >>> vfree(kvm); >>> + } >>> } >>> int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) >>> diff --git a/arch/x86/include/asm/kvm_host.h >>> b/arch/x86/include/asm/kvm_host.h >>> index 79138c91f83d..39cbc4b6bffb 100644 >>> --- a/arch/x86/include/asm/kvm_host.h >>> +++ b/arch/x86/include/asm/kvm_host.h >>> @@ -1440,10 +1440,7 @@ static inline void >>> kvm_ops_static_call_update(void) >>> } >>> #define __KVM_HAVE_ARCH_VM_ALLOC >>> -static inline struct kvm *kvm_arch_alloc_vm(void) >>> -{ >>> - return __vmalloc(kvm_x86_ops.vm_size, GFP_KERNEL_ACCOUNT | >>> __GFP_ZERO); >>> -} >>> +struct kvm *kvm_arch_alloc_vm(void); >>> void kvm_arch_free_vm(struct kvm *kvm); >>> #define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB >>> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c >>> index 3af398ef1fc9..a9b0bb2221ea 100644 >>> --- a/arch/x86/kvm/x86.c >>> +++ b/arch/x86/kvm/x86.c >>> @@ -10741,9 +10741,28 @@ void kvm_arch_sched_in(struct kvm_vcpu >>> *vcpu, int cpu) >>> static_call(kvm_x86_sched_in)(vcpu, cpu); >>> } >>> +struct kvm *kvm_arch_alloc_vm(void) >>> +{ >>> + struct kvm *kvm; >>> + >>> + kvm = __vmalloc(kvm_x86_ops.vm_size, GFP_KERNEL_ACCOUNT | >>> __GFP_ZERO); >>> + if (!kvm) >>> + return NULL; >>> + >>> + kvm->vcpus = __vmalloc(KVM_MAX_VCPUS * sizeof(void *), >>> + GFP_KERNEL_ACCOUNT | __GFP_ZERO); >>> + if (!kvm->vcpus) { >>> + vfree(kvm); >>> + kvm = NULL; >>> + } >>> + >> >> Let's keep this cleaner: >> >> 1) use kvfree in the common version of kvm_arch_free_vm >> >> 2) split __KVM_HAVE_ARCH_VM_ALLOC and __KVM_HAVE_ARCH_VM_FREE (ARM >> does not need it once kvfree is used) >> >> 3) define a __kvm_arch_free_vm version that is defined even if >> !__KVM_HAVE_ARCH_VM_FREE, and which can be used on x86. > > Okay, will do so. I'd appreciate if you could Cc me on the whole series, and not just the single arm64 patch. Thanks, M.
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index e720148232a0..4f055408fe9f 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -280,18 +280,38 @@ long kvm_arch_dev_ioctl(struct file *filp, struct kvm *kvm_arch_alloc_vm(void) { + struct kvm *kvm; + if (!has_vhe()) - return kzalloc(sizeof(struct kvm), GFP_KERNEL); + kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL); + else + kvm = vzalloc(sizeof(struct kvm)); - return vzalloc(sizeof(struct kvm)); + if (!kvm) + return NULL; + + if (!has_vhe()) + kvm->vcpus = kcalloc(KVM_MAX_VCPUS, sizeof(void *), GFP_KERNEL); + else + kvm->vcpus = vzalloc(KVM_MAX_VCPUS * sizeof(void *)); + + if (!kvm->vcpus) { + kvm_arch_free_vm(kvm); + kvm = NULL; + } + + return kvm; } void kvm_arch_free_vm(struct kvm *kvm) { - if (!has_vhe()) + if (!has_vhe()) { + kfree(kvm->vcpus); kfree(kvm); - else + } else { + vfree(kvm->vcpus); vfree(kvm); + } } int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 79138c91f83d..39cbc4b6bffb 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1440,10 +1440,7 @@ static inline void kvm_ops_static_call_update(void) } #define __KVM_HAVE_ARCH_VM_ALLOC -static inline struct kvm *kvm_arch_alloc_vm(void) -{ - return __vmalloc(kvm_x86_ops.vm_size, GFP_KERNEL_ACCOUNT | __GFP_ZERO); -} +struct kvm *kvm_arch_alloc_vm(void); void kvm_arch_free_vm(struct kvm *kvm); #define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 3af398ef1fc9..a9b0bb2221ea 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -10741,9 +10741,28 @@ void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) static_call(kvm_x86_sched_in)(vcpu, cpu); } +struct kvm *kvm_arch_alloc_vm(void) +{ + struct kvm *kvm; + + kvm = __vmalloc(kvm_x86_ops.vm_size, GFP_KERNEL_ACCOUNT | __GFP_ZERO); + if (!kvm) + return NULL; + + kvm->vcpus = __vmalloc(KVM_MAX_VCPUS * sizeof(void *), + GFP_KERNEL_ACCOUNT | __GFP_ZERO); + if (!kvm->vcpus) { + vfree(kvm); + kvm = NULL; + } + + return kvm; +} + void kvm_arch_free_vm(struct kvm *kvm) { kfree(to_kvm_hv(kvm)->hv_pa_pg); + vfree(kvm->vcpus); vfree(kvm); } diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 8583ed3ff344..e424ef1078a1 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -525,7 +525,7 @@ struct kvm { struct mutex slots_lock; struct mm_struct *mm; /* userspace tied to this vm */ struct kvm_memslots __rcu *memslots[KVM_ADDRESS_SPACE_NUM]; - struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; + struct kvm_vcpu **vcpus; /* * created_vcpus is protected by kvm->lock, and is incremented @@ -1022,11 +1022,24 @@ void kvm_arch_pre_destroy_vm(struct kvm *kvm); */ static inline struct kvm *kvm_arch_alloc_vm(void) { - return kzalloc(sizeof(struct kvm), GFP_KERNEL); + struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL); + + if (!kvm) + return NULL; + + kvm->vcpus = kcalloc(KVM_MAX_VCPUS, sizeof(void *), GFP_KERNEL); + if (!kvm->vcpus) { + kfree(kvm); + kvm = NULL; + } + + return kvm; } static inline void kvm_arch_free_vm(struct kvm *kvm) { + if (kvm) + kfree(kvm->vcpus); kfree(kvm); } #endif
Prepare support of very large vcpu numbers per guest by moving the vcpu pointer array out of struct kvm. Signed-off-by: Juergen Gross <jgross@suse.com> --- arch/arm64/kvm/arm.c | 28 ++++++++++++++++++++++++---- arch/x86/include/asm/kvm_host.h | 5 +---- arch/x86/kvm/x86.c | 19 +++++++++++++++++++ include/linux/kvm_host.h | 17 +++++++++++++++-- 4 files changed, 59 insertions(+), 10 deletions(-)