@@ -1045,7 +1045,7 @@ struct kvm_x86_ops {
void (*vm_destroy)(struct kvm *kvm);
/* Create, but do not attach this VCPU */
- struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id);
+ int (*vcpu_create)(struct kvm *kvm, struct kvm_vcpu *vcpu, unsigned id);
void (*vcpu_free)(struct kvm_vcpu *vcpu);
void (*vcpu_reset)(struct kvm_vcpu *vcpu, bool init_event);
@@ -2144,9 +2144,9 @@ static int avic_init_vcpu(struct vcpu_svm *svm)
return ret;
}
-static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
+static int svm_create_vcpu(struct kvm *kvm, struct kvm_vcpu *vcpu,
+ unsigned int id)
{
- struct kvm_vcpu *vcpu;
struct vcpu_svm *svm;
struct page *page;
struct page *msrpm_pages;
@@ -2154,22 +2154,15 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
struct page *nested_msrpm_pages;
int err;
- BUILD_BUG_ON_MSG(offsetof(struct vcpu_svm, vcpu) != 0,
- "struct kvm_vcpu must be at offset 0 for arch usercopy region");
-
- svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL_ACCOUNT);
- if (!svm) {
- err = -ENOMEM;
- goto out;
- }
- vcpu = &svm->vcpu;
+ BUILD_BUG_ON(offsetof(struct vcpu_svm, vcpu) != 0);
+ svm = to_svm(vcpu);
vcpu->arch.user_fpu = kmem_cache_zalloc(x86_fpu_cache,
GFP_KERNEL_ACCOUNT);
if (!vcpu->arch.user_fpu) {
printk(KERN_ERR "kvm: failed to allocate kvm userspace's fpu\n");
err = -ENOMEM;
- goto free_partial_svm;
+ goto out;
}
vcpu->arch.guest_fpu = kmem_cache_zalloc(x86_fpu_cache,
@@ -2182,7 +2175,7 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
err = kvm_vcpu_init(vcpu, kvm, id);
if (err)
- goto free_svm;
+ goto free_guest_fpu;
err = -ENOMEM;
page = alloc_page(GFP_KERNEL_ACCOUNT);
@@ -2226,7 +2219,7 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
svm_init_osvw(vcpu);
- return vcpu;
+ return 0;
free_page4:
__free_page(hsave_page);
@@ -2238,14 +2231,12 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
__free_page(page);
uninit:
kvm_vcpu_uninit(vcpu);
-free_svm:
+free_guest_fpu:
kmem_cache_free(x86_fpu_cache, vcpu->arch.guest_fpu);
free_user_fpu:
kmem_cache_free(x86_fpu_cache, vcpu->arch.user_fpu);
-free_partial_svm:
- kmem_cache_free(kvm_vcpu_cache, svm);
out:
- return ERR_PTR(err);
+ return err;
}
static void svm_clear_current_vmcb(struct vmcb *vmcb)
@@ -2274,7 +2265,6 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu)
kvm_vcpu_uninit(vcpu);
kmem_cache_free(x86_fpu_cache, vcpu->arch.user_fpu);
kmem_cache_free(x86_fpu_cache, vcpu->arch.guest_fpu);
- kmem_cache_free(kvm_vcpu_cache, svm);
}
static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
@@ -6675,31 +6675,24 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
kvm_vcpu_uninit(vcpu);
kmem_cache_free(x86_fpu_cache, vcpu->arch.user_fpu);
kmem_cache_free(x86_fpu_cache, vcpu->arch.guest_fpu);
- kmem_cache_free(kvm_vcpu_cache, vmx);
}
-static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
+static int vmx_create_vcpu(struct kvm *kvm, struct kvm_vcpu *vcpu,
+ unsigned int id)
{
- struct kvm_vcpu *vcpu;
struct vcpu_vmx *vmx;
unsigned long *msr_bitmap;
int i, cpu, err;
- BUILD_BUG_ON_MSG(offsetof(struct vcpu_vmx, vcpu) != 0,
- "struct kvm_vcpu must be at offset 0 for arch usercopy region");
-
- vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL_ACCOUNT);
- if (!vmx)
- return ERR_PTR(-ENOMEM);
-
- vcpu = &vmx->vcpu;
+ BUILD_BUG_ON(offsetof(struct vcpu_vmx, vcpu) != 0);
+ vmx = to_vmx(vcpu);
vcpu->arch.user_fpu = kmem_cache_zalloc(x86_fpu_cache,
GFP_KERNEL_ACCOUNT);
if (!vcpu->arch.user_fpu) {
printk(KERN_ERR "kvm: failed to allocate kvm userspace's fpu\n");
err = -ENOMEM;
- goto free_partial_vcpu;
+ goto out;
}
vcpu->arch.guest_fpu = kmem_cache_zalloc(x86_fpu_cache,
@@ -6820,7 +6813,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
vmx->ept_pointer = INVALID_PAGE;
- return vcpu;
+ return 0;
free_vmcs:
free_loaded_vmcs(vmx->loaded_vmcs);
@@ -6833,9 +6826,8 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
kmem_cache_free(x86_fpu_cache, vcpu->arch.guest_fpu);
free_user_fpu:
kmem_cache_free(x86_fpu_cache, vcpu->arch.user_fpu);
-free_partial_vcpu:
- kmem_cache_free(kvm_vcpu_cache, vmx);
- return ERR_PTR(err);
+out:
+ return err;
}
#define L1TF_MSG_SMT "L1TF CPU bug present and SMT on, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html for details.\n"
@@ -9091,26 +9091,34 @@ static void fx_init(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
{
- void *wbinvd_dirty_mask = vcpu->arch.wbinvd_dirty_mask;
-
kvmclock_reset(vcpu);
kvm_x86_ops->vcpu_free(vcpu);
- free_cpumask_var(wbinvd_dirty_mask);
+
+ free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
+ kmem_cache_free(kvm_vcpu_cache, vcpu);
}
struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
unsigned int id)
{
struct kvm_vcpu *vcpu;
+ int r;
if (kvm_check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0)
printk_once(KERN_WARNING
"kvm: SMP vm created on host with unstable TSC; "
"guest TSC will not be reliable\n");
- vcpu = kvm_x86_ops->vcpu_create(kvm, id);
+ vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL_ACCOUNT);
+ if (!vcpu)
+ return ERR_PTR(-ENOMEM);
+ r = kvm_x86_ops->vcpu_create(kvm, vcpu, id);
+ if (r) {
+ kmem_cache_free(kvm_vcpu_cache, vcpu);
+ return ERR_PTR(r);
+ }
return vcpu;
}
Move allocation of VMX and SVM vcpus to common x86. Although the struct being allocated is technically a VMX/SVM struct, it can be interpreted directly as a 'struct kvm_vcpu' because of the pre-existing requirement that 'struct kvm_vcpu' be located at offset zero of the arch/vendor vcpu struct. Remove the message from the build-time assertions regarding placement of the struct, as compatibility with the arch usercopy region is no longer the sole dependent on 'struct kvm_vcpu' being at offset zero. Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> --- arch/x86/include/asm/kvm_host.h | 2 +- arch/x86/kvm/svm.c | 28 +++++++++------------------- arch/x86/kvm/vmx/vmx.c | 24 ++++++++---------------- arch/x86/kvm/x86.c | 16 ++++++++++++---- 4 files changed, 30 insertions(+), 40 deletions(-)