diff mbox

[RFC,v2,3/4] Break dependency between vcpu index in vcpus array and vcpu_id.

Message ID 1243266636-16914-4-git-send-email-gleb@redhat.com (mailing list archive)
State New, archived
Headers show

Commit Message

Gleb Natapov May 25, 2009, 3:50 p.m. UTC
Archs are free to use vcpu_id as they see fit. For x86 it is used as
vcpu's apic id.

Signed-off-by: Gleb Natapov <gleb@redhat.com>
---
 include/linux/kvm_host.h |    1 +
 virt/kvm/kvm_main.c      |   45 ++++++++++++++++++++++++---------------------
 2 files changed, 25 insertions(+), 21 deletions(-)

Comments

Avi Kivity May 26, 2009, 8:34 a.m. UTC | #1
Gleb Natapov wrote:
> Archs are free to use vcpu_id as they see fit. For x86 it is used as
> vcpu's apic id.
>
>   

You need a KVM_CAP to inform userspace that the vcpu id has changed meaning.

>  inline int kvm_is_mmio_pfn(pfn_t pfn)
>  {
>  	if (pfn_valid(pfn)) {
> @@ -1713,15 +1708,12 @@ static int create_vcpu_fd(struct kvm_vcpu *vcpu)
>  /*
>   * Creates some virtual cpus.  Good luck creating more than one.
>   */
> -static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
> +static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
>  {
>  	int r;
>  	struct kvm_vcpu *vcpu;
>  
> -	if (!valid_vcpu(n))
> -		return -EINVAL;
> -
> -	vcpu = kvm_arch_vcpu_create(kvm, n);
> +	vcpu = kvm_arch_vcpu_create(kvm, id);
>  	if (IS_ERR(vcpu))
>  		return PTR_ERR(vcpu);
>  
> @@ -1732,25 +1724,36 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
>  		return r;
>  
>  	mutex_lock(&kvm->lock);
> -	if (kvm->vcpus[n]) {
> -		r = -EEXIST;
> +	if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) {
> +		r = -EINVAL;
>  		goto vcpu_destroy;
>  	}
> -	kvm->vcpus[n] = vcpu;
> -	if (n == 0)
> -		kvm->bsp_vcpu = vcpu;
> -	mutex_unlock(&kvm->lock);
> +
> +	for (r = 0; r < atomic_read(&kvm->online_vcpus); r++)
> +		if (kvm->vcpus[r]->vcpu_id == id) {
> +			r = -EEXIST;
> +			goto vcpu_destroy;
> +		}
> +
> +	BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]);
>  
>  	/* Now it's all set up, let userspace reach it */
>  	kvm_get_kvm(kvm);
>  	r = create_vcpu_fd(vcpu);
> -	if (r < 0)
> -		goto unlink;
> +	if (r < 0) {
> +		kvm_put_kvm(kvm);
> +		goto vcpu_destroy;
> +	}
> +
> +	kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu;
> +	smp_wmb();
> +	atomic_inc(&kvm->online_vcpus);
> +
> +	if (id == 0)
> +		kvm->bsp_vcpu = vcpu;
> +	mutex_unlock(&kvm->lock);
>  	return r;
>  
> -unlink:
> -	mutex_lock(&kvm->lock);
> -	kvm->vcpus[n] = NULL;
>  vcpu_destroy:
>  	mutex_unlock(&kvm->lock);
>  	kvm_arch_vcpu_destroy(vcpu);
>   

Don't the vcpu ioctls need to be updated?  They get the vcpu id as a 
parameter.
Gleb Natapov May 26, 2009, 8:55 a.m. UTC | #2
On Tue, May 26, 2009 at 11:34:01AM +0300, Avi Kivity wrote:
> Gleb Natapov wrote:
>> Archs are free to use vcpu_id as they see fit. For x86 it is used as
>> vcpu's apic id.
>>
>>   
>
> You need a KVM_CAP to inform userspace that the vcpu id has changed meaning.
>
Will add.

>>  inline int kvm_is_mmio_pfn(pfn_t pfn)
>>  {
>>  	if (pfn_valid(pfn)) {
>> @@ -1713,15 +1708,12 @@ static int create_vcpu_fd(struct kvm_vcpu *vcpu)
>>  /*
>>   * Creates some virtual cpus.  Good luck creating more than one.
>>   */
>> -static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
>> +static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
>>  {
>>  	int r;
>>  	struct kvm_vcpu *vcpu;
>>  -	if (!valid_vcpu(n))
>> -		return -EINVAL;
>> -
>> -	vcpu = kvm_arch_vcpu_create(kvm, n);
>> +	vcpu = kvm_arch_vcpu_create(kvm, id);
>>  	if (IS_ERR(vcpu))
>>  		return PTR_ERR(vcpu);
>>  @@ -1732,25 +1724,36 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm 
>> *kvm, int n)
>>  		return r;
>>   	mutex_lock(&kvm->lock);
>> -	if (kvm->vcpus[n]) {
>> -		r = -EEXIST;
>> +	if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) {
>> +		r = -EINVAL;
>>  		goto vcpu_destroy;
>>  	}
>> -	kvm->vcpus[n] = vcpu;
>> -	if (n == 0)
>> -		kvm->bsp_vcpu = vcpu;
>> -	mutex_unlock(&kvm->lock);
>> +
>> +	for (r = 0; r < atomic_read(&kvm->online_vcpus); r++)
>> +		if (kvm->vcpus[r]->vcpu_id == id) {
>> +			r = -EEXIST;
>> +			goto vcpu_destroy;
>> +		}
>> +
>> +	BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]);
>>   	/* Now it's all set up, let userspace reach it */
>>  	kvm_get_kvm(kvm);
>>  	r = create_vcpu_fd(vcpu);
>> -	if (r < 0)
>> -		goto unlink;
>> +	if (r < 0) {
>> +		kvm_put_kvm(kvm);
>> +		goto vcpu_destroy;
>> +	}
>> +
>> +	kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu;
>> +	smp_wmb();
>> +	atomic_inc(&kvm->online_vcpus);
>> +
>> +	if (id == 0)
>> +		kvm->bsp_vcpu = vcpu;
>> +	mutex_unlock(&kvm->lock);
>>  	return r;
>>  -unlink:
>> -	mutex_lock(&kvm->lock);
>> -	kvm->vcpus[n] = NULL;
>>  vcpu_destroy:
>>  	mutex_unlock(&kvm->lock);
>>  	kvm_arch_vcpu_destroy(vcpu);
>>   
>
> Don't the vcpu ioctls need to be updated?  They get the vcpu id as a  
> parameter.
>
Are you sure they do? vcpu ioctls are issued on vcpu fd, no need to pass
vcpu id as a parameter.

--
			Gleb.
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Avi Kivity May 26, 2009, 8:57 a.m. UTC | #3
Gleb Natapov wrote:

  

>> Don't the vcpu ioctls need to be updated?  They get the vcpu id as a  
>> parameter.
>>
>>     
> Are you sure they do? vcpu ioctls are issued on vcpu fd, no need to pass
> vcpu id as a parameter.
>
>   

You're right, I was confused with an earlier version of the interface, 
and with libkvm.

Moral: handle-based design = good.
diff mbox

Patch

diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index e9e0cd8..0934df3 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -132,6 +132,7 @@  struct kvm {
 					KVM_PRIVATE_MEM_SLOTS];
 	struct kvm_vcpu *bsp_vcpu;
 	struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
+	atomic_t online_vcpus;
 	struct list_head vm_list;
 	struct kvm_io_bus mmio_bus;
 	struct kvm_io_bus pio_bus;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 5a55fe0..8f3fff8 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -693,11 +693,6 @@  out:
 }
 #endif
 
-static inline int valid_vcpu(int n)
-{
-	return likely(n >= 0 && n < KVM_MAX_VCPUS);
-}
-
 inline int kvm_is_mmio_pfn(pfn_t pfn)
 {
 	if (pfn_valid(pfn)) {
@@ -1713,15 +1708,12 @@  static int create_vcpu_fd(struct kvm_vcpu *vcpu)
 /*
  * Creates some virtual cpus.  Good luck creating more than one.
  */
-static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
+static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
 {
 	int r;
 	struct kvm_vcpu *vcpu;
 
-	if (!valid_vcpu(n))
-		return -EINVAL;
-
-	vcpu = kvm_arch_vcpu_create(kvm, n);
+	vcpu = kvm_arch_vcpu_create(kvm, id);
 	if (IS_ERR(vcpu))
 		return PTR_ERR(vcpu);
 
@@ -1732,25 +1724,36 @@  static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
 		return r;
 
 	mutex_lock(&kvm->lock);
-	if (kvm->vcpus[n]) {
-		r = -EEXIST;
+	if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) {
+		r = -EINVAL;
 		goto vcpu_destroy;
 	}
-	kvm->vcpus[n] = vcpu;
-	if (n == 0)
-		kvm->bsp_vcpu = vcpu;
-	mutex_unlock(&kvm->lock);
+
+	for (r = 0; r < atomic_read(&kvm->online_vcpus); r++)
+		if (kvm->vcpus[r]->vcpu_id == id) {
+			r = -EEXIST;
+			goto vcpu_destroy;
+		}
+
+	BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]);
 
 	/* Now it's all set up, let userspace reach it */
 	kvm_get_kvm(kvm);
 	r = create_vcpu_fd(vcpu);
-	if (r < 0)
-		goto unlink;
+	if (r < 0) {
+		kvm_put_kvm(kvm);
+		goto vcpu_destroy;
+	}
+
+	kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu;
+	smp_wmb();
+	atomic_inc(&kvm->online_vcpus);
+
+	if (id == 0)
+		kvm->bsp_vcpu = vcpu;
+	mutex_unlock(&kvm->lock);
 	return r;
 
-unlink:
-	mutex_lock(&kvm->lock);
-	kvm->vcpus[n] = NULL;
 vcpu_destroy:
 	mutex_unlock(&kvm->lock);
 	kvm_arch_vcpu_destroy(vcpu);