@@ -280,6 +280,11 @@ static inline void dump_handler(const char *symbol, void *start, void *end)
pr_debug("\tEND(%s)\n", symbol);
}
+int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
+{
+ return 0;
+}
+
struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
{
int err, size;
@@ -716,6 +716,11 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
kvmppc_core_flush_memslot(kvm, slot);
}
+int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
+{
+ return 0;
+}
+
struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
{
struct kvm_vcpu *vcpu;
@@ -3038,15 +3038,19 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
return rc;
}
+int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
+{
+ if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
+ return -EINVAL;
+ return 0;
+}
+
struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
unsigned int id)
{
struct kvm_vcpu *vcpu;
struct sie_page *sie_page;
- int rc = -EINVAL;
-
- if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
- goto out;
+ int rc;
rc = -ENOMEM;
@@ -9010,17 +9010,21 @@ static void fx_init(struct kvm_vcpu *vcpu)
vcpu->arch.cr0 |= X86_CR0_ET;
}
+int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
+{
+ if (kvm_check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0)
+ pr_warn_once("kvm: SMP vm created on host with unstable TSC; "
+ "guest TSC will not be reliable\n");
+
+ return 0;
+}
+
struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
unsigned int id)
{
struct kvm_vcpu *vcpu;
int r;
- if (kvm_check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0)
- printk_once(KERN_WARNING
- "kvm: SMP vm created on host with unstable TSC; "
- "guest TSC will not be reliable\n");
-
vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL_ACCOUNT);
if (!vcpu)
return ERR_PTR(-ENOMEM);
@@ -855,6 +855,7 @@ void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu);
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
+int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id);
struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id);
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu);
@@ -253,21 +253,22 @@ void kvm_arch_free_vm(struct kvm *kvm)
vfree(kvm);
}
+int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
+{
+ if (irqchip_in_kernel(kvm) && vgic_initialized(kvm))
+ return -EBUSY;
+
+ if (id >= kvm->arch.max_vcpus)
+ return -EINVAL;
+
+ return 0;
+}
+
struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
{
int err;
struct kvm_vcpu *vcpu;
- if (irqchip_in_kernel(kvm) && vgic_initialized(kvm)) {
- err = -EBUSY;
- goto out;
- }
-
- if (id >= kvm->arch.max_vcpus) {
- err = -EINVAL;
- goto out;
- }
-
vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
if (!vcpu) {
err = -ENOMEM;
@@ -2653,6 +2653,10 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
kvm->created_vcpus++;
mutex_unlock(&kvm->lock);
+ r = kvm_arch_vcpu_precreate(kvm, id);
+ if (r)
+ goto vcpu_decrement;
+
vcpu = kvm_arch_vcpu_create(kvm, id);
if (IS_ERR(vcpu)) {
r = PTR_ERR(vcpu);
Add a pre-allocation arch hook to handle checks that are currently done by arch specific code prior to allocating the vCPU object. This paves the way for moving the allocation to common KVM code. Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> --- arch/mips/kvm/mips.c | 5 +++++ arch/powerpc/kvm/powerpc.c | 5 +++++ arch/s390/kvm/kvm-s390.c | 12 ++++++++---- arch/x86/kvm/x86.c | 14 +++++++++----- include/linux/kvm_host.h | 1 + virt/kvm/arm/arm.c | 21 +++++++++++---------- virt/kvm/kvm_main.c | 4 ++++ 7 files changed, 43 insertions(+), 19 deletions(-)