diff mbox series

vhost/vsock: switch to a mutex for vhost_vsock_hash

Message ID 20181105173322.3463-1-stefanha@redhat.com (mailing list archive)
State New, archived
Headers show
Series vhost/vsock: switch to a mutex for vhost_vsock_hash | expand

Commit Message

Stefan Hajnoczi Nov. 5, 2018, 5:33 p.m. UTC
Now that there are no more data path users of vhost_vsock_lock, it can
be turned into a mutex.  It's only used by .release() and in the
.ioctl() path.

Depends-on: <20181105103547.22018-1-stefanha@redhat.com>
Suggested-by: Jason Wang <jasowang@redhat.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
---
Hi Jason,
Thanks for pointing out that spin_lock_bh() is no longer necessary.
Let's switch to a mutex.
---
 drivers/vhost/vsock.c | 16 ++++++++--------
 1 file changed, 8 insertions(+), 8 deletions(-)

Comments

Jason Wang Nov. 6, 2018, 2:33 a.m. UTC | #1
On 2018/11/6 上午1:33, Stefan Hajnoczi wrote:
> Now that there are no more data path users of vhost_vsock_lock, it can
> be turned into a mutex.  It's only used by .release() and in the
> .ioctl() path.
>
> Depends-on: <20181105103547.22018-1-stefanha@redhat.com>
> Suggested-by: Jason Wang <jasowang@redhat.com>
> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
> ---
> Hi Jason,
> Thanks for pointing out that spin_lock_bh() is no longer necessary.
> Let's switch to a mutex.
> ---
>   drivers/vhost/vsock.c | 16 ++++++++--------
>   1 file changed, 8 insertions(+), 8 deletions(-)
>
> diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
> index 51879ed18652..d95b0c04cc9b 100644
> --- a/drivers/vhost/vsock.c
> +++ b/drivers/vhost/vsock.c
> @@ -27,14 +27,14 @@ enum {
>   };
>   
>   /* Used to track all the vhost_vsock instances on the system. */
> -static DEFINE_SPINLOCK(vhost_vsock_lock);
> +static DEFINE_MUTEX(vhost_vsock_mutex);
>   static DEFINE_READ_MOSTLY_HASHTABLE(vhost_vsock_hash, 8);
>   
>   struct vhost_vsock {
>   	struct vhost_dev dev;
>   	struct vhost_virtqueue vqs[2];
>   
> -	/* Link to global vhost_vsock_hash, writes use vhost_vsock_lock */
> +	/* Link to global vhost_vsock_hash, writes use vhost_vsock_mutex */
>   	struct hlist_node hash;
>   
>   	struct vhost_work send_pkt_work;
> @@ -51,7 +51,7 @@ static u32 vhost_transport_get_local_cid(void)
>   	return VHOST_VSOCK_DEFAULT_HOST_CID;
>   }
>   
> -/* Callers that dereference the return value must hold vhost_vsock_lock or the
> +/* Callers that dereference the return value must hold vhost_vsock_mutex or the
>    * RCU read lock.
>    */
>   static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
> @@ -576,10 +576,10 @@ static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
>   {
>   	struct vhost_vsock *vsock = file->private_data;
>   
> -	spin_lock_bh(&vhost_vsock_lock);
> +	mutex_lock(&vhost_vsock_mutex);
>   	if (vsock->guest_cid)
>   		hash_del_rcu(&vsock->hash);
> -	spin_unlock_bh(&vhost_vsock_lock);
> +	mutex_unlock(&vhost_vsock_mutex);
>   
>   	/* Wait for other CPUs to finish using vsock */
>   	synchronize_rcu();
> @@ -623,10 +623,10 @@ static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid)
>   		return -EINVAL;
>   
>   	/* Refuse if CID is already in use */
> -	spin_lock_bh(&vhost_vsock_lock);
> +	mutex_lock(&vhost_vsock_mutex);
>   	other = vhost_vsock_get(guest_cid);
>   	if (other && other != vsock) {
> -		spin_unlock_bh(&vhost_vsock_lock);
> +		mutex_unlock(&vhost_vsock_mutex);
>   		return -EADDRINUSE;
>   	}
>   
> @@ -635,7 +635,7 @@ static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid)
>   
>   	vsock->guest_cid = guest_cid;
>   	hash_add_rcu(vhost_vsock_hash, &vsock->hash, guest_cid);
> -	spin_unlock_bh(&vhost_vsock_lock);
> +	mutex_unlock(&vhost_vsock_mutex);
>   
>   	return 0;
>   }


Acked-by: Jason Wang <jasowang@redhat.com>
Stefan Hajnoczi Nov. 29, 2018, 2:03 p.m. UTC | #2
On Mon, Nov 05, 2018 at 05:33:22PM +0000, Stefan Hajnoczi wrote:
> Now that there are no more data path users of vhost_vsock_lock, it can
> be turned into a mutex.  It's only used by .release() and in the
> .ioctl() path.
> 
> Depends-on: <20181105103547.22018-1-stefanha@redhat.com>
> Suggested-by: Jason Wang <jasowang@redhat.com>
> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
> ---
> Hi Jason,
> Thanks for pointing out that spin_lock_bh() is no longer necessary.
> Let's switch to a mutex.
> ---
>  drivers/vhost/vsock.c | 16 ++++++++--------
>  1 file changed, 8 insertions(+), 8 deletions(-)

Ping?

> diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
> index 51879ed18652..d95b0c04cc9b 100644
> --- a/drivers/vhost/vsock.c
> +++ b/drivers/vhost/vsock.c
> @@ -27,14 +27,14 @@ enum {
>  };
>  
>  /* Used to track all the vhost_vsock instances on the system. */
> -static DEFINE_SPINLOCK(vhost_vsock_lock);
> +static DEFINE_MUTEX(vhost_vsock_mutex);
>  static DEFINE_READ_MOSTLY_HASHTABLE(vhost_vsock_hash, 8);
>  
>  struct vhost_vsock {
>  	struct vhost_dev dev;
>  	struct vhost_virtqueue vqs[2];
>  
> -	/* Link to global vhost_vsock_hash, writes use vhost_vsock_lock */
> +	/* Link to global vhost_vsock_hash, writes use vhost_vsock_mutex */
>  	struct hlist_node hash;
>  
>  	struct vhost_work send_pkt_work;
> @@ -51,7 +51,7 @@ static u32 vhost_transport_get_local_cid(void)
>  	return VHOST_VSOCK_DEFAULT_HOST_CID;
>  }
>  
> -/* Callers that dereference the return value must hold vhost_vsock_lock or the
> +/* Callers that dereference the return value must hold vhost_vsock_mutex or the
>   * RCU read lock.
>   */
>  static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
> @@ -576,10 +576,10 @@ static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
>  {
>  	struct vhost_vsock *vsock = file->private_data;
>  
> -	spin_lock_bh(&vhost_vsock_lock);
> +	mutex_lock(&vhost_vsock_mutex);
>  	if (vsock->guest_cid)
>  		hash_del_rcu(&vsock->hash);
> -	spin_unlock_bh(&vhost_vsock_lock);
> +	mutex_unlock(&vhost_vsock_mutex);
>  
>  	/* Wait for other CPUs to finish using vsock */
>  	synchronize_rcu();
> @@ -623,10 +623,10 @@ static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid)
>  		return -EINVAL;
>  
>  	/* Refuse if CID is already in use */
> -	spin_lock_bh(&vhost_vsock_lock);
> +	mutex_lock(&vhost_vsock_mutex);
>  	other = vhost_vsock_get(guest_cid);
>  	if (other && other != vsock) {
> -		spin_unlock_bh(&vhost_vsock_lock);
> +		mutex_unlock(&vhost_vsock_mutex);
>  		return -EADDRINUSE;
>  	}
>  
> @@ -635,7 +635,7 @@ static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid)
>  
>  	vsock->guest_cid = guest_cid;
>  	hash_add_rcu(vhost_vsock_hash, &vsock->hash, guest_cid);
> -	spin_unlock_bh(&vhost_vsock_lock);
> +	mutex_unlock(&vhost_vsock_mutex);
>  
>  	return 0;
>  }
> -- 
> 2.19.1
>
Michael S. Tsirkin Nov. 29, 2018, 2:33 p.m. UTC | #3
On Thu, Nov 29, 2018 at 02:03:26PM +0000, Stefan Hajnoczi wrote:
> On Mon, Nov 05, 2018 at 05:33:22PM +0000, Stefan Hajnoczi wrote:
> > Now that there are no more data path users of vhost_vsock_lock, it can
> > be turned into a mutex.  It's only used by .release() and in the
> > .ioctl() path.
> > 
> > Depends-on: <20181105103547.22018-1-stefanha@redhat.com>
> > Suggested-by: Jason Wang <jasowang@redhat.com>
> > Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
> > ---
> > Hi Jason,
> > Thanks for pointing out that spin_lock_bh() is no longer necessary.
> > Let's switch to a mutex.
> > ---
> >  drivers/vhost/vsock.c | 16 ++++++++--------
> >  1 file changed, 8 insertions(+), 8 deletions(-)
> 
> Ping?

I'll put it on the next tree for 4.21.
Thanks!


> > diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
> > index 51879ed18652..d95b0c04cc9b 100644
> > --- a/drivers/vhost/vsock.c
> > +++ b/drivers/vhost/vsock.c
> > @@ -27,14 +27,14 @@ enum {
> >  };
> >  
> >  /* Used to track all the vhost_vsock instances on the system. */
> > -static DEFINE_SPINLOCK(vhost_vsock_lock);
> > +static DEFINE_MUTEX(vhost_vsock_mutex);
> >  static DEFINE_READ_MOSTLY_HASHTABLE(vhost_vsock_hash, 8);
> >  
> >  struct vhost_vsock {
> >  	struct vhost_dev dev;
> >  	struct vhost_virtqueue vqs[2];
> >  
> > -	/* Link to global vhost_vsock_hash, writes use vhost_vsock_lock */
> > +	/* Link to global vhost_vsock_hash, writes use vhost_vsock_mutex */
> >  	struct hlist_node hash;
> >  
> >  	struct vhost_work send_pkt_work;
> > @@ -51,7 +51,7 @@ static u32 vhost_transport_get_local_cid(void)
> >  	return VHOST_VSOCK_DEFAULT_HOST_CID;
> >  }
> >  
> > -/* Callers that dereference the return value must hold vhost_vsock_lock or the
> > +/* Callers that dereference the return value must hold vhost_vsock_mutex or the
> >   * RCU read lock.
> >   */
> >  static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
> > @@ -576,10 +576,10 @@ static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
> >  {
> >  	struct vhost_vsock *vsock = file->private_data;
> >  
> > -	spin_lock_bh(&vhost_vsock_lock);
> > +	mutex_lock(&vhost_vsock_mutex);
> >  	if (vsock->guest_cid)
> >  		hash_del_rcu(&vsock->hash);
> > -	spin_unlock_bh(&vhost_vsock_lock);
> > +	mutex_unlock(&vhost_vsock_mutex);
> >  
> >  	/* Wait for other CPUs to finish using vsock */
> >  	synchronize_rcu();
> > @@ -623,10 +623,10 @@ static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid)
> >  		return -EINVAL;
> >  
> >  	/* Refuse if CID is already in use */
> > -	spin_lock_bh(&vhost_vsock_lock);
> > +	mutex_lock(&vhost_vsock_mutex);
> >  	other = vhost_vsock_get(guest_cid);
> >  	if (other && other != vsock) {
> > -		spin_unlock_bh(&vhost_vsock_lock);
> > +		mutex_unlock(&vhost_vsock_mutex);
> >  		return -EADDRINUSE;
> >  	}
> >  
> > @@ -635,7 +635,7 @@ static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid)
> >  
> >  	vsock->guest_cid = guest_cid;
> >  	hash_add_rcu(vhost_vsock_hash, &vsock->hash, guest_cid);
> > -	spin_unlock_bh(&vhost_vsock_lock);
> > +	mutex_unlock(&vhost_vsock_mutex);
> >  
> >  	return 0;
> >  }
> > -- 
> > 2.19.1
> >
diff mbox series

Patch

diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index 51879ed18652..d95b0c04cc9b 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -27,14 +27,14 @@  enum {
 };
 
 /* Used to track all the vhost_vsock instances on the system. */
-static DEFINE_SPINLOCK(vhost_vsock_lock);
+static DEFINE_MUTEX(vhost_vsock_mutex);
 static DEFINE_READ_MOSTLY_HASHTABLE(vhost_vsock_hash, 8);
 
 struct vhost_vsock {
 	struct vhost_dev dev;
 	struct vhost_virtqueue vqs[2];
 
-	/* Link to global vhost_vsock_hash, writes use vhost_vsock_lock */
+	/* Link to global vhost_vsock_hash, writes use vhost_vsock_mutex */
 	struct hlist_node hash;
 
 	struct vhost_work send_pkt_work;
@@ -51,7 +51,7 @@  static u32 vhost_transport_get_local_cid(void)
 	return VHOST_VSOCK_DEFAULT_HOST_CID;
 }
 
-/* Callers that dereference the return value must hold vhost_vsock_lock or the
+/* Callers that dereference the return value must hold vhost_vsock_mutex or the
  * RCU read lock.
  */
 static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
@@ -576,10 +576,10 @@  static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
 {
 	struct vhost_vsock *vsock = file->private_data;
 
-	spin_lock_bh(&vhost_vsock_lock);
+	mutex_lock(&vhost_vsock_mutex);
 	if (vsock->guest_cid)
 		hash_del_rcu(&vsock->hash);
-	spin_unlock_bh(&vhost_vsock_lock);
+	mutex_unlock(&vhost_vsock_mutex);
 
 	/* Wait for other CPUs to finish using vsock */
 	synchronize_rcu();
@@ -623,10 +623,10 @@  static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid)
 		return -EINVAL;
 
 	/* Refuse if CID is already in use */
-	spin_lock_bh(&vhost_vsock_lock);
+	mutex_lock(&vhost_vsock_mutex);
 	other = vhost_vsock_get(guest_cid);
 	if (other && other != vsock) {
-		spin_unlock_bh(&vhost_vsock_lock);
+		mutex_unlock(&vhost_vsock_mutex);
 		return -EADDRINUSE;
 	}
 
@@ -635,7 +635,7 @@  static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid)
 
 	vsock->guest_cid = guest_cid;
 	hash_add_rcu(vhost_vsock_hash, &vsock->hash, guest_cid);
-	spin_unlock_bh(&vhost_vsock_lock);
+	mutex_unlock(&vhost_vsock_mutex);
 
 	return 0;
 }