From patchwork Mon Jun 8 09:32:57 2009 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Gleb Natapov X-Patchwork-Id: 28603 Received: from vger.kernel.org (vger.kernel.org [209.132.176.167]) by demeter.kernel.org (8.14.2/8.14.2) with ESMTP id n589XAsk006981 for ; Mon, 8 Jun 2009 09:33:10 GMT Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1752960AbZFHJdF (ORCPT ); Mon, 8 Jun 2009 05:33:05 -0400 Received: (majordomo@vger.kernel.org) by vger.kernel.org id S1752372AbZFHJdE (ORCPT ); Mon, 8 Jun 2009 05:33:04 -0400 Received: from mx2.redhat.com ([66.187.237.31]:58098 "EHLO mx2.redhat.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1752504AbZFHJc7 (ORCPT ); Mon, 8 Jun 2009 05:32:59 -0400 Received: from int-mx2.corp.redhat.com (int-mx2.corp.redhat.com [172.16.27.26]) by mx2.redhat.com (8.13.8/8.13.8) with ESMTP id n589X1bA028261 for ; Mon, 8 Jun 2009 05:33:01 -0400 Received: from ns3.rdu.redhat.com (ns3.rdu.redhat.com [10.11.255.199]) by int-mx2.corp.redhat.com (8.13.1/8.13.1) with ESMTP id n589X0eX024679; Mon, 8 Jun 2009 05:33:00 -0400 Received: from dhcp-1-237.tlv.redhat.com (dhcp-1-237.tlv.redhat.com [10.35.1.237]) by ns3.rdu.redhat.com (8.13.8/8.13.8) with ESMTP id n589WxNS022937; Mon, 8 Jun 2009 05:32:59 -0400 Received: by dhcp-1-237.tlv.redhat.com (Postfix, from userid 13519) id 074B51336CC; Mon, 8 Jun 2009 12:32:59 +0300 (IDT) From: Gleb Natapov To: avi@redhat.com Cc: kvm@vger.kernel.org Subject: [PATCH 3/4] Break dependency between vcpu index in vcpus array and vcpu_id. Date: Mon, 8 Jun 2009 12:32:57 +0300 Message-Id: <1244453578-29857-4-git-send-email-gleb@redhat.com> In-Reply-To: <1244453578-29857-1-git-send-email-gleb@redhat.com> References: <1244453578-29857-1-git-send-email-gleb@redhat.com> X-Scanned-By: MIMEDefang 2.58 on 172.16.27.26 Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org Archs are free to use vcpu_id as they see fit. For x86 it is used as vcpu's apic id. New ioctl is added to configure boot vcpu id that was assumed to be 0 till now. Signed-off-by: Gleb Natapov --- include/linux/kvm.h | 2 + include/linux/kvm_host.h | 2 + virt/kvm/kvm_main.c | 58 ++++++++++++++++++++++++++------------------- 3 files changed, 37 insertions(+), 25 deletions(-) diff --git a/include/linux/kvm.h b/include/linux/kvm.h index 29b62cc..c5109a4 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h @@ -430,6 +430,7 @@ struct kvm_trace_rec { #ifdef __KVM_HAVE_PIT #define KVM_CAP_PIT2 33 #endif +#define KVM_CAP_SET_BOOT_CPU_ID 34 #ifdef KVM_CAP_IRQ_ROUTING @@ -535,6 +536,7 @@ struct kvm_irqfd { #define KVM_DEASSIGN_DEV_IRQ _IOW(KVMIO, 0x75, struct kvm_assigned_irq) #define KVM_IRQFD _IOW(KVMIO, 0x76, struct kvm_irqfd) #define KVM_CREATE_PIT2 _IOW(KVMIO, 0x77, struct kvm_pit_config) +#define KVM_SET_BOOT_CPU_ID _IO(KVMIO, 0x78) /* * ioctls for vcpu fds diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 4d8e222..3be6768 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -130,8 +130,10 @@ struct kvm { int nmemslots; struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS]; + u32 bsp_vcpu_id; struct kvm_vcpu *bsp_vcpu; struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; + atomic_t online_vcpus; struct list_head vm_list; struct kvm_io_bus mmio_bus; struct kvm_io_bus pio_bus; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 9a415c5..52d01db 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -687,11 +687,6 @@ out: } #endif -static inline int valid_vcpu(int n) -{ - return likely(n >= 0 && n < KVM_MAX_VCPUS); -} - inline int kvm_is_mmio_pfn(pfn_t pfn) { if (pfn_valid(pfn)) { @@ -1699,24 +1694,18 @@ static struct file_operations kvm_vcpu_fops = { */ static int create_vcpu_fd(struct kvm_vcpu *vcpu) { - int fd = anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, 0); - if (fd < 0) - kvm_put_kvm(vcpu->kvm); - return fd; + return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, 0); } /* * Creates some virtual cpus. Good luck creating more than one. */ -static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n) +static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) { int r; struct kvm_vcpu *vcpu; - if (!valid_vcpu(n)) - return -EINVAL; - - vcpu = kvm_arch_vcpu_create(kvm, n); + vcpu = kvm_arch_vcpu_create(kvm, id); if (IS_ERR(vcpu)) return PTR_ERR(vcpu); @@ -1727,25 +1716,36 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n) return r; mutex_lock(&kvm->lock); - if (kvm->vcpus[n]) { - r = -EEXIST; + if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) { + r = -EINVAL; goto vcpu_destroy; } - kvm->vcpus[n] = vcpu; - if (n == 0) - kvm->bsp_vcpu = vcpu; - mutex_unlock(&kvm->lock); + + for (r = 0; r < atomic_read(&kvm->online_vcpus); r++) + if (kvm->vcpus[r]->vcpu_id == id) { + r = -EEXIST; + goto vcpu_destroy; + } + + BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]); /* Now it's all set up, let userspace reach it */ kvm_get_kvm(kvm); r = create_vcpu_fd(vcpu); - if (r < 0) - goto unlink; + if (r < 0) { + kvm_put_kvm(kvm); + goto vcpu_destroy; + } + + kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu; + smp_wmb(); + atomic_inc(&kvm->online_vcpus); + + if (kvm->bsp_vcpu_id == id) + kvm->bsp_vcpu = vcpu; + mutex_unlock(&kvm->lock); return r; -unlink: - mutex_lock(&kvm->lock); - kvm->vcpus[n] = NULL; vcpu_destroy: mutex_unlock(&kvm->lock); kvm_arch_vcpu_destroy(vcpu); @@ -2218,6 +2218,13 @@ static long kvm_vm_ioctl(struct file *filp, r = kvm_irqfd(kvm, data.fd, data.gsi, data.flags); break; } + case KVM_SET_BOOT_CPU_ID: + r = 0; + if (atomic_read(&kvm->online_vcpus) != 0) + r = -EBUSY; + else + kvm->bsp_vcpu_id = arg; + break; default: r = kvm_arch_vm_ioctl(filp, ioctl, arg); } @@ -2284,6 +2291,7 @@ static long kvm_dev_ioctl_check_extension_generic(long arg) case KVM_CAP_USER_MEMORY: case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS: + case KVM_CAP_SET_BOOT_CPU_ID: return 1; #ifdef CONFIG_HAVE_KVM_IRQCHIP case KVM_CAP_IRQ_ROUTING: