diff mbox

[v9,1/3] tcm_vhost: Refactor the lock nesting rule

Message ID 1366859406-8963-2-git-send-email-asias@redhat.com (mailing list archive)
State New, archived
Headers show

Commit Message

Asias He April 25, 2013, 3:10 a.m. UTC
We want to use tcm_vhost_mutex to make sure hotplug/hotunplug will not
happen when set_endpoint/clear_endpoint is in process.

Signed-off-by: Asias He <asias@redhat.com>
---
 drivers/vhost/tcm_vhost.c | 32 +++++++++++++++++++-------------
 1 file changed, 19 insertions(+), 13 deletions(-)

Comments

Michael S. Tsirkin April 25, 2013, 5:44 a.m. UTC | #1
On Thu, Apr 25, 2013 at 11:10:04AM +0800, Asias He wrote:
> We want to use tcm_vhost_mutex to make sure hotplug/hotunplug will not
> happen when set_endpoint/clear_endpoint is in process.
> 
> Signed-off-by: Asias He <asias@redhat.com>

Acked-by: Michael S. Tsirkin <mst@redhat.com>

> ---
>  drivers/vhost/tcm_vhost.c | 32 +++++++++++++++++++-------------
>  1 file changed, 19 insertions(+), 13 deletions(-)
> 
> diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c
> index 957a0b9..822cd1f 100644
> --- a/drivers/vhost/tcm_vhost.c
> +++ b/drivers/vhost/tcm_vhost.c
> @@ -808,6 +808,9 @@ static void vhost_scsi_flush(struct vhost_scsi *vs)
>  /*
>   * Called from vhost_scsi_ioctl() context to walk the list of available
>   * tcm_vhost_tpg with an active struct tcm_vhost_nexus
> + *
> + *  The lock nesting rule is:
> + *    tcm_vhost_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex
>   */
>  static int vhost_scsi_set_endpoint(
>  	struct vhost_scsi *vs,
> @@ -820,26 +823,27 @@ static int vhost_scsi_set_endpoint(
>  	int index, ret, i, len;
>  	bool match = false;
>  
> +	mutex_lock(&tcm_vhost_mutex);
>  	mutex_lock(&vs->dev.mutex);
> +
>  	/* Verify that ring has been setup correctly. */
>  	for (index = 0; index < vs->dev.nvqs; ++index) {
>  		/* Verify that ring has been setup correctly. */
>  		if (!vhost_vq_access_ok(&vs->vqs[index])) {
> -			mutex_unlock(&vs->dev.mutex);
> -			return -EFAULT;
> +			ret = -EFAULT;
> +			goto out;
>  		}
>  	}
>  
>  	len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
>  	vs_tpg = kzalloc(len, GFP_KERNEL);
>  	if (!vs_tpg) {
> -		mutex_unlock(&vs->dev.mutex);
> -		return -ENOMEM;
> +		ret = -ENOMEM;
> +		goto out;
>  	}
>  	if (vs->vs_tpg)
>  		memcpy(vs_tpg, vs->vs_tpg, len);
>  
> -	mutex_lock(&tcm_vhost_mutex);
>  	list_for_each_entry(tv_tpg, &tcm_vhost_list, tv_tpg_list) {
>  		mutex_lock(&tv_tpg->tv_tpg_mutex);
>  		if (!tv_tpg->tpg_nexus) {
> @@ -854,11 +858,10 @@ static int vhost_scsi_set_endpoint(
>  
>  		if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
>  			if (vs->vs_tpg && vs->vs_tpg[tv_tpg->tport_tpgt]) {
> -				mutex_unlock(&tv_tpg->tv_tpg_mutex);
> -				mutex_unlock(&tcm_vhost_mutex);
> -				mutex_unlock(&vs->dev.mutex);
>  				kfree(vs_tpg);
> -				return -EEXIST;
> +				mutex_unlock(&tv_tpg->tv_tpg_mutex);
> +				ret = -EEXIST;
> +				goto out;
>  			}
>  			tv_tpg->tv_tpg_vhost_count++;
>  			vs_tpg[tv_tpg->tport_tpgt] = tv_tpg;
> @@ -867,7 +870,6 @@ static int vhost_scsi_set_endpoint(
>  		}
>  		mutex_unlock(&tv_tpg->tv_tpg_mutex);
>  	}
> -	mutex_unlock(&tcm_vhost_mutex);
>  
>  	if (match) {
>  		memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
> @@ -893,7 +895,9 @@ static int vhost_scsi_set_endpoint(
>  	kfree(vs->vs_tpg);
>  	vs->vs_tpg = vs_tpg;
>  
> +out:
>  	mutex_unlock(&vs->dev.mutex);
> +	mutex_unlock(&tcm_vhost_mutex);
>  	return ret;
>  }
>  
> @@ -908,6 +912,7 @@ static int vhost_scsi_clear_endpoint(
>  	int index, ret, i;
>  	u8 target;
>  
> +	mutex_lock(&tcm_vhost_mutex);
>  	mutex_lock(&vs->dev.mutex);
>  	/* Verify that ring has been setup correctly. */
>  	for (index = 0; index < vs->dev.nvqs; ++index) {
> @@ -918,8 +923,8 @@ static int vhost_scsi_clear_endpoint(
>  	}
>  
>  	if (!vs->vs_tpg) {
> -		mutex_unlock(&vs->dev.mutex);
> -		return 0;
> +		ret = 0;
> +		goto err_dev;
>  	}
>  
>  	for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
> @@ -965,13 +970,14 @@ static int vhost_scsi_clear_endpoint(
>  	kfree(vs->vs_tpg);
>  	vs->vs_tpg = NULL;
>  	mutex_unlock(&vs->dev.mutex);
> -
> +	mutex_unlock(&tcm_vhost_mutex);
>  	return 0;
>  
>  err_tpg:
>  	mutex_unlock(&tv_tpg->tv_tpg_mutex);
>  err_dev:
>  	mutex_unlock(&vs->dev.mutex);
> +	mutex_unlock(&tcm_vhost_mutex);
>  	return ret;
>  }
>  
> -- 
> 1.8.1.4
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c
index 957a0b9..822cd1f 100644
--- a/drivers/vhost/tcm_vhost.c
+++ b/drivers/vhost/tcm_vhost.c
@@ -808,6 +808,9 @@  static void vhost_scsi_flush(struct vhost_scsi *vs)
 /*
  * Called from vhost_scsi_ioctl() context to walk the list of available
  * tcm_vhost_tpg with an active struct tcm_vhost_nexus
+ *
+ *  The lock nesting rule is:
+ *    tcm_vhost_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex
  */
 static int vhost_scsi_set_endpoint(
 	struct vhost_scsi *vs,
@@ -820,26 +823,27 @@  static int vhost_scsi_set_endpoint(
 	int index, ret, i, len;
 	bool match = false;
 
+	mutex_lock(&tcm_vhost_mutex);
 	mutex_lock(&vs->dev.mutex);
+
 	/* Verify that ring has been setup correctly. */
 	for (index = 0; index < vs->dev.nvqs; ++index) {
 		/* Verify that ring has been setup correctly. */
 		if (!vhost_vq_access_ok(&vs->vqs[index])) {
-			mutex_unlock(&vs->dev.mutex);
-			return -EFAULT;
+			ret = -EFAULT;
+			goto out;
 		}
 	}
 
 	len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
 	vs_tpg = kzalloc(len, GFP_KERNEL);
 	if (!vs_tpg) {
-		mutex_unlock(&vs->dev.mutex);
-		return -ENOMEM;
+		ret = -ENOMEM;
+		goto out;
 	}
 	if (vs->vs_tpg)
 		memcpy(vs_tpg, vs->vs_tpg, len);
 
-	mutex_lock(&tcm_vhost_mutex);
 	list_for_each_entry(tv_tpg, &tcm_vhost_list, tv_tpg_list) {
 		mutex_lock(&tv_tpg->tv_tpg_mutex);
 		if (!tv_tpg->tpg_nexus) {
@@ -854,11 +858,10 @@  static int vhost_scsi_set_endpoint(
 
 		if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
 			if (vs->vs_tpg && vs->vs_tpg[tv_tpg->tport_tpgt]) {
-				mutex_unlock(&tv_tpg->tv_tpg_mutex);
-				mutex_unlock(&tcm_vhost_mutex);
-				mutex_unlock(&vs->dev.mutex);
 				kfree(vs_tpg);
-				return -EEXIST;
+				mutex_unlock(&tv_tpg->tv_tpg_mutex);
+				ret = -EEXIST;
+				goto out;
 			}
 			tv_tpg->tv_tpg_vhost_count++;
 			vs_tpg[tv_tpg->tport_tpgt] = tv_tpg;
@@ -867,7 +870,6 @@  static int vhost_scsi_set_endpoint(
 		}
 		mutex_unlock(&tv_tpg->tv_tpg_mutex);
 	}
-	mutex_unlock(&tcm_vhost_mutex);
 
 	if (match) {
 		memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
@@ -893,7 +895,9 @@  static int vhost_scsi_set_endpoint(
 	kfree(vs->vs_tpg);
 	vs->vs_tpg = vs_tpg;
 
+out:
 	mutex_unlock(&vs->dev.mutex);
+	mutex_unlock(&tcm_vhost_mutex);
 	return ret;
 }
 
@@ -908,6 +912,7 @@  static int vhost_scsi_clear_endpoint(
 	int index, ret, i;
 	u8 target;
 
+	mutex_lock(&tcm_vhost_mutex);
 	mutex_lock(&vs->dev.mutex);
 	/* Verify that ring has been setup correctly. */
 	for (index = 0; index < vs->dev.nvqs; ++index) {
@@ -918,8 +923,8 @@  static int vhost_scsi_clear_endpoint(
 	}
 
 	if (!vs->vs_tpg) {
-		mutex_unlock(&vs->dev.mutex);
-		return 0;
+		ret = 0;
+		goto err_dev;
 	}
 
 	for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
@@ -965,13 +970,14 @@  static int vhost_scsi_clear_endpoint(
 	kfree(vs->vs_tpg);
 	vs->vs_tpg = NULL;
 	mutex_unlock(&vs->dev.mutex);
-
+	mutex_unlock(&tcm_vhost_mutex);
 	return 0;
 
 err_tpg:
 	mutex_unlock(&tv_tpg->tv_tpg_mutex);
 err_dev:
 	mutex_unlock(&vs->dev.mutex);
+	mutex_unlock(&tcm_vhost_mutex);
 	return ret;
 }