diff mbox

[RFC,v3,3/4] Break dependency between vcpu index in vcpus array and vcpu_id.

Message ID 1243504487-25796-4-git-send-email-gleb@redhat.com (mailing list archive)
State New, archived
Headers show

Commit Message

Gleb Natapov May 28, 2009, 9:54 a.m. UTC
Archs are free to use vcpu_id as they see fit. For x86 it is used as
vcpu's apic id. New ioctl is added to configure boot vcpu id that was
assumed to be 0 till now.

Signed-off-by: Gleb Natapov <gleb@redhat.com>
---
 include/linux/kvm.h      |    2 +
 include/linux/kvm_host.h |    2 +
 virt/kvm/kvm_main.c      |   50 ++++++++++++++++++++++++++-------------------
 3 files changed, 33 insertions(+), 21 deletions(-)

Comments

Marcelo Tosatti June 2, 2009, 12:38 p.m. UTC | #1
On Thu, May 28, 2009 at 12:54:46PM +0300, Gleb Natapov wrote:
> Archs are free to use vcpu_id as they see fit. For x86 it is used as
> vcpu's apic id. New ioctl is added to configure boot vcpu id that was
> assumed to be 0 till now.
> 
> Signed-off-by: Gleb Natapov <gleb@redhat.com>
> ---
>  include/linux/kvm.h      |    2 +
>  include/linux/kvm_host.h |    2 +
>  virt/kvm/kvm_main.c      |   50 ++++++++++++++++++++++++++-------------------
>  3 files changed, 33 insertions(+), 21 deletions(-)
> 
> diff --git a/include/linux/kvm.h b/include/linux/kvm.h
> index 632a856..d10ab5d 100644
> --- a/include/linux/kvm.h
> +++ b/include/linux/kvm.h
> @@ -430,6 +430,7 @@ struct kvm_trace_rec {
>  #ifdef __KVM_HAVE_PIT
>  #define KVM_CAP_PIT2 33
>  #endif
> +#define KVM_CAP_SET_BOOT_CPU_ID 34
>  
>  #ifdef KVM_CAP_IRQ_ROUTING
>  
> @@ -537,6 +538,7 @@ struct kvm_irqfd {
>  #define KVM_DEASSIGN_DEV_IRQ       _IOW(KVMIO, 0x75, struct kvm_assigned_irq)
>  #define KVM_IRQFD                  _IOW(KVMIO, 0x76, struct kvm_irqfd)
>  #define KVM_CREATE_PIT2		   _IOW(KVMIO, 0x77, struct kvm_pit_config)
> +#define KVM_SET_BOOT_CPU_ID        _IO(KVMIO, 0x78)
>  
>  /*
>   * ioctls for vcpu fds
> diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
> index e9e0cd8..e368a14 100644
> --- a/include/linux/kvm_host.h
> +++ b/include/linux/kvm_host.h
> @@ -130,8 +130,10 @@ struct kvm {
>  	int nmemslots;
>  	struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS +
>  					KVM_PRIVATE_MEM_SLOTS];
> +	u32 bsp_vcpu_id;
>  	struct kvm_vcpu *bsp_vcpu;
>  	struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
> +	atomic_t online_vcpus;
>  	struct list_head vm_list;
>  	struct kvm_io_bus mmio_bus;
>  	struct kvm_io_bus pio_bus;
> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
> index 5a55fe0..d65c637 100644
> --- a/virt/kvm/kvm_main.c
> +++ b/virt/kvm/kvm_main.c
> @@ -693,11 +693,6 @@ out:
>  }
>  #endif
>  
> -static inline int valid_vcpu(int n)
> -{
> -	return likely(n >= 0 && n < KVM_MAX_VCPUS);
> -}
> -
>  inline int kvm_is_mmio_pfn(pfn_t pfn)
>  {
>  	if (pfn_valid(pfn)) {
> @@ -1713,15 +1708,12 @@ static int create_vcpu_fd(struct kvm_vcpu *vcpu)
>  /*
>   * Creates some virtual cpus.  Good luck creating more than one.
>   */
> -static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
> +static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
>  {
>  	int r;
>  	struct kvm_vcpu *vcpu;
>  
> -	if (!valid_vcpu(n))
> -		return -EINVAL;
> -
> -	vcpu = kvm_arch_vcpu_create(kvm, n);
> +	vcpu = kvm_arch_vcpu_create(kvm, id);
>  	if (IS_ERR(vcpu))
>  		return PTR_ERR(vcpu);
>  
> @@ -1732,25 +1724,36 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
>  		return r;
>  
>  	mutex_lock(&kvm->lock);
> -	if (kvm->vcpus[n]) {
> -		r = -EEXIST;
> +	if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) {
> +		r = -EINVAL;
>  		goto vcpu_destroy;
>  	}
> -	kvm->vcpus[n] = vcpu;
> -	if (n == 0)
> -		kvm->bsp_vcpu = vcpu;
> -	mutex_unlock(&kvm->lock);
> +
> +	for (r = 0; r < atomic_read(&kvm->online_vcpus); r++)
> +		if (kvm->vcpus[r]->vcpu_id == id) {
> +			r = -EEXIST;
> +			goto vcpu_destroy;
> +		}
> +
> +	BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]);
>  
>  	/* Now it's all set up, let userspace reach it */
>  	kvm_get_kvm(kvm);
>  	r = create_vcpu_fd(vcpu);
> -	if (r < 0)
> -		goto unlink;
> +	if (r < 0) {
> +		kvm_put_kvm(kvm);
> +		goto vcpu_destroy;
> +	}
> +
> +	kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu;
> +	smp_wmb();
> +	atomic_inc(&kvm->online_vcpus);
> +
> +	if (kvm->bsp_vcpu_id == id)
> +		kvm->bsp_vcpu = vcpu;
> +	mutex_unlock(&kvm->lock);
>  	return r;
>  
> -unlink:
> -	mutex_lock(&kvm->lock);
> -	kvm->vcpus[n] = NULL;
>  vcpu_destroy:
>  	mutex_unlock(&kvm->lock);
>  	kvm_arch_vcpu_destroy(vcpu);
> @@ -2223,6 +2226,10 @@ static long kvm_vm_ioctl(struct file *filp,
>  		r = kvm_irqfd(kvm, data.fd, data.gsi, data.flags);
>  		break;
>  	}
> +	case KVM_SET_BOOT_CPU_ID:
> +		kvm->bsp_vcpu_id = arg;
> +		r = 0;
> +		break;

Don't you also need to update the bsp_vcpu pointer? And aren't these two
(bsp_vcpu pointer and bsp_vcpu_id) somewhat redundant? 

Other than looks fine.

>  	default:
>  		r = kvm_arch_vm_ioctl(filp, ioctl, arg);
>  	}
> @@ -2289,6 +2296,7 @@ static long kvm_dev_ioctl_check_extension_generic(long arg)
>  	case KVM_CAP_USER_MEMORY:
>  	case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
>  	case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
> +	case KVM_CAP_SET_BOOT_CPU_ID:
>  		return 1;
>  #ifdef CONFIG_HAVE_KVM_IRQCHIP
>  	case KVM_CAP_IRQ_ROUTING:
> -- 
> 1.6.2.1
> 
> --
> To unsubscribe from this list: send the line "unsubscribe kvm" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Gleb Natapov June 2, 2009, 1:42 p.m. UTC | #2
On Tue, Jun 02, 2009 at 09:38:08AM -0300, Marcelo Tosatti wrote:
> > @@ -2223,6 +2226,10 @@ static long kvm_vm_ioctl(struct file *filp,
> >  		r = kvm_irqfd(kvm, data.fd, data.gsi, data.flags);
> >  		break;
> >  	}
> > +	case KVM_SET_BOOT_CPU_ID:
> > +		kvm->bsp_vcpu_id = arg;
> > +		r = 0;
> > +		break;
> 
> Don't you also need to update the bsp_vcpu pointer? And aren't these two
Actually I think I need to return an error if userspace tries to change BSP
after BSP cpu is already created.

> (bsp_vcpu pointer and bsp_vcpu_id) somewhat redundant? 
> 
Sometimes pointer to bsp vcpu is needed. It can be found looping over all
vcpus, but I prefer to have the pointer handy.

--
			Gleb.
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/include/linux/kvm.h b/include/linux/kvm.h
index 632a856..d10ab5d 100644
--- a/include/linux/kvm.h
+++ b/include/linux/kvm.h
@@ -430,6 +430,7 @@  struct kvm_trace_rec {
 #ifdef __KVM_HAVE_PIT
 #define KVM_CAP_PIT2 33
 #endif
+#define KVM_CAP_SET_BOOT_CPU_ID 34
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
@@ -537,6 +538,7 @@  struct kvm_irqfd {
 #define KVM_DEASSIGN_DEV_IRQ       _IOW(KVMIO, 0x75, struct kvm_assigned_irq)
 #define KVM_IRQFD                  _IOW(KVMIO, 0x76, struct kvm_irqfd)
 #define KVM_CREATE_PIT2		   _IOW(KVMIO, 0x77, struct kvm_pit_config)
+#define KVM_SET_BOOT_CPU_ID        _IO(KVMIO, 0x78)
 
 /*
  * ioctls for vcpu fds
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index e9e0cd8..e368a14 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -130,8 +130,10 @@  struct kvm {
 	int nmemslots;
 	struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS +
 					KVM_PRIVATE_MEM_SLOTS];
+	u32 bsp_vcpu_id;
 	struct kvm_vcpu *bsp_vcpu;
 	struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
+	atomic_t online_vcpus;
 	struct list_head vm_list;
 	struct kvm_io_bus mmio_bus;
 	struct kvm_io_bus pio_bus;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 5a55fe0..d65c637 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -693,11 +693,6 @@  out:
 }
 #endif
 
-static inline int valid_vcpu(int n)
-{
-	return likely(n >= 0 && n < KVM_MAX_VCPUS);
-}
-
 inline int kvm_is_mmio_pfn(pfn_t pfn)
 {
 	if (pfn_valid(pfn)) {
@@ -1713,15 +1708,12 @@  static int create_vcpu_fd(struct kvm_vcpu *vcpu)
 /*
  * Creates some virtual cpus.  Good luck creating more than one.
  */
-static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
+static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
 {
 	int r;
 	struct kvm_vcpu *vcpu;
 
-	if (!valid_vcpu(n))
-		return -EINVAL;
-
-	vcpu = kvm_arch_vcpu_create(kvm, n);
+	vcpu = kvm_arch_vcpu_create(kvm, id);
 	if (IS_ERR(vcpu))
 		return PTR_ERR(vcpu);
 
@@ -1732,25 +1724,36 @@  static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
 		return r;
 
 	mutex_lock(&kvm->lock);
-	if (kvm->vcpus[n]) {
-		r = -EEXIST;
+	if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) {
+		r = -EINVAL;
 		goto vcpu_destroy;
 	}
-	kvm->vcpus[n] = vcpu;
-	if (n == 0)
-		kvm->bsp_vcpu = vcpu;
-	mutex_unlock(&kvm->lock);
+
+	for (r = 0; r < atomic_read(&kvm->online_vcpus); r++)
+		if (kvm->vcpus[r]->vcpu_id == id) {
+			r = -EEXIST;
+			goto vcpu_destroy;
+		}
+
+	BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]);
 
 	/* Now it's all set up, let userspace reach it */
 	kvm_get_kvm(kvm);
 	r = create_vcpu_fd(vcpu);
-	if (r < 0)
-		goto unlink;
+	if (r < 0) {
+		kvm_put_kvm(kvm);
+		goto vcpu_destroy;
+	}
+
+	kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu;
+	smp_wmb();
+	atomic_inc(&kvm->online_vcpus);
+
+	if (kvm->bsp_vcpu_id == id)
+		kvm->bsp_vcpu = vcpu;
+	mutex_unlock(&kvm->lock);
 	return r;
 
-unlink:
-	mutex_lock(&kvm->lock);
-	kvm->vcpus[n] = NULL;
 vcpu_destroy:
 	mutex_unlock(&kvm->lock);
 	kvm_arch_vcpu_destroy(vcpu);
@@ -2223,6 +2226,10 @@  static long kvm_vm_ioctl(struct file *filp,
 		r = kvm_irqfd(kvm, data.fd, data.gsi, data.flags);
 		break;
 	}
+	case KVM_SET_BOOT_CPU_ID:
+		kvm->bsp_vcpu_id = arg;
+		r = 0;
+		break;
 	default:
 		r = kvm_arch_vm_ioctl(filp, ioctl, arg);
 	}
@@ -2289,6 +2296,7 @@  static long kvm_dev_ioctl_check_extension_generic(long arg)
 	case KVM_CAP_USER_MEMORY:
 	case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
 	case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
+	case KVM_CAP_SET_BOOT_CPU_ID:
 		return 1;
 #ifdef CONFIG_HAVE_KVM_IRQCHIP
 	case KVM_CAP_IRQ_ROUTING: