diff mbox

rbd: protect against duplicate client creation

Message ID 51A66A29.6060805@inktank.com (mailing list archive)
State New, archived
Headers show

Commit Message

Alex Elder May 29, 2013, 8:50 p.m. UTC
If more than one rbd image has the same ceph cluster configuration
(same options, same set of monitors, same keys) they normally share
a single rbd client.

When an image is getting mapped, rbd looks to see if an existing
client can be used, and creates a new one if not.

The lookup and creation are not done under a common lock though, so
mapping two images concurrently could lead to duplicate clients
getting set up needlessly.  This isn't a major problem, but it's
wasteful and different from what's intended.

This patch fixes that by using the control mutex to protect
both the lookup and (if needed) creation of the client.  It
was previously used just when creating.

This resolves:
    http://tracker.ceph.com/issues/3094

Signed-off-by: Alex Elder <elder@inktank.com>
---
 drivers/block/rbd.c |    7 +++----
 1 file changed, 3 insertions(+), 4 deletions(-)

 {
@@ -537,8 +537,6 @@ static struct rbd_client *rbd_client_create(struct
ceph_options *ceph_opts)
 	kref_init(&rbdc->kref);
 	INIT_LIST_HEAD(&rbdc->node);

-	mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
-
 	rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0);
 	if (IS_ERR(rbdc->client))
 		goto out_mutex;
@@ -552,7 +550,6 @@ static struct rbd_client *rbd_client_create(struct
ceph_options *ceph_opts)
 	list_add_tail(&rbdc->node, &rbd_client_list);
 	spin_unlock(&rbd_client_list_lock);

-	mutex_unlock(&ctl_mutex);
 	dout("%s: rbdc %p\n", __func__, rbdc);

 	return rbdc;
@@ -684,11 +681,13 @@ static struct rbd_client *rbd_get_client(struct
ceph_options *ceph_opts)
 {
 	struct rbd_client *rbdc;

+	mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
 	rbdc = rbd_client_find(ceph_opts);
 	if (rbdc)	/* using an existing client */
 		ceph_destroy_options(ceph_opts);
 	else
 		rbdc = rbd_client_create(ceph_opts);
+	mutex_unlock(&ctl_mutex);

 	return rbdc;
 }

Comments

Josh Durgin May 30, 2013, 5:36 p.m. UTC | #1
Alex Elder <elder@inktank.com> wrote:

>If more than one rbd image has the same ceph cluster configuration
>(same options, same set of monitors, same keys) they normally share
>a single rbd client.
>
>When an image is getting mapped, rbd looks to see if an existing
>client can be used, and creates a new one if not.
>
>The lookup and creation are not done under a common lock though, so
>mapping two images concurrently could lead to duplicate clients
>getting set up needlessly.  This isn't a major problem, but it's
>wasteful and different from what's intended.
>
>This patch fixes that by using the control mutex to protect
>both the lookup and (if needed) creation of the client.  It
>was previously used just when creating.
>
>This resolves:
>    http://tracker.ceph.com/issues/3094
>
>Signed-off-by: Alex Elder <elder@inktank.com>
>---
> drivers/block/rbd.c |    7 +++----
> 1 file changed, 3 insertions(+), 4 deletions(-)
>
>diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
>index aec2438..d255541 100644
>--- a/drivers/block/rbd.c
>+++ b/drivers/block/rbd.c
>@@ -522,7 +522,7 @@ static const struct block_device_operations
>rbd_bd_ops = {
>
> /*
>  * Initialize an rbd client instance.  Success or not, this function
>- * consumes ceph_opts.
>+ * consumes ceph_opts.  Caller holds ctl_mutex
>  */
>static struct rbd_client *rbd_client_create(struct ceph_options
>*ceph_opts)
> {
>@@ -537,8 +537,6 @@ static struct rbd_client *rbd_client_create(struct
>ceph_options *ceph_opts)
> 	kref_init(&rbdc->kref);
> 	INIT_LIST_HEAD(&rbdc->node);
>
>-	mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
>-
> 	rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0);
> 	if (IS_ERR(rbdc->client))
> 		goto out_mutex;
>@@ -552,7 +550,6 @@ static struct rbd_client *rbd_client_create(struct
>ceph_options *ceph_opts)
> 	list_add_tail(&rbdc->node, &rbd_client_list);
> 	spin_unlock(&rbd_client_list_lock);
>
>-	mutex_unlock(&ctl_mutex);
> 	dout("%s: rbdc %p\n", __func__, rbdc);
>
> 	return rbdc;
>@@ -684,11 +681,13 @@ static struct rbd_client *rbd_get_client(struct
>ceph_options *ceph_opts)
> {
> 	struct rbd_client *rbdc;
>
>+	mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
> 	rbdc = rbd_client_find(ceph_opts);
> 	if (rbdc)	/* using an existing client */
> 		ceph_destroy_options(ceph_opts);
> 	else
> 		rbdc = rbd_client_create(ceph_opts);
>+	mutex_unlock(&ctl_mutex);
>
> 	return rbdc;
> }

Reviewed-by: Josh Durgin <josh.durgin@inktank.com>
--
To unsubscribe from this list: send the line "unsubscribe ceph-devel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index aec2438..d255541 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -522,7 +522,7 @@  static const struct block_device_operations
rbd_bd_ops = {

 /*
  * Initialize an rbd client instance.  Success or not, this function
- * consumes ceph_opts.
+ * consumes ceph_opts.  Caller holds ctl_mutex
  */
 static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)