diff mbox

[4/5] nvmet-rdma: add a NVMe over Fabrics RDMA target driver

Message ID 051801d1c297$c7d8a7d0$5789f770$@opengridcomputing.com (mailing list archive)
State Not Applicable
Headers show

Commit Message

Steve Wise June 9, 2016, 9:42 p.m. UTC
<snip>

> > +
> > +static struct nvmet_rdma_queue *
> > +nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev,
> > +		struct rdma_cm_id *cm_id,
> > +		struct rdma_cm_event *event)
> > +{
> > +	struct nvmet_rdma_queue *queue;
> > +	int ret;
> > +
> > +	queue = kzalloc(sizeof(*queue), GFP_KERNEL);
> > +	if (!queue) {
> > +		ret = NVME_RDMA_CM_NO_RSC;
> > +		goto out_reject;
> > +	}
> > +
> > +	ret = nvmet_sq_init(&queue->nvme_sq);
> > +	if (ret)
> > +		goto out_free_queue;
> > +
> > +	ret = nvmet_rdma_parse_cm_connect_req(&event->param.conn,
> queue);
> > +	if (ret)
> > +		goto out_destroy_sq;
> > +
> > +	/*
> > +	 * Schedules the actual release because calling rdma_destroy_id from
> > +	 * inside a CM callback would trigger a deadlock. (great API
design..)
> > +	 */
> > +	INIT_WORK(&queue->release_work,
> nvmet_rdma_release_queue_work);
> > +	queue->dev = ndev;
> > +	queue->cm_id = cm_id;
> > +
> > +	spin_lock_init(&queue->state_lock);
> > +	queue->state = NVMET_RDMA_Q_CONNECTING;
> > +	INIT_LIST_HEAD(&queue->rsp_wait_list);
> > +	INIT_LIST_HEAD(&queue->rsp_wr_wait_list);
> > +	spin_lock_init(&queue->rsp_wr_wait_lock);
> > +	INIT_LIST_HEAD(&queue->free_rsps);
> > +	spin_lock_init(&queue->rsps_lock);
> > +
> > +	queue->idx = ida_simple_get(&nvmet_rdma_queue_ida, 0, 0,
> GFP_KERNEL);
> > +	if (queue->idx < 0) {
> > +		ret = NVME_RDMA_CM_NO_RSC;
> > +		goto out_free_queue;
> > +	}
> > +
> > +	ret = nvmet_rdma_alloc_rsps(queue);
> > +	if (ret) {
> > +		ret = NVME_RDMA_CM_NO_RSC;
> > +		goto out_ida_remove;
> > +	}
> > +
> > +	if (!ndev->srq) {
> > +		queue->cmds = nvmet_rdma_alloc_cmds(ndev,
> > +				queue->recv_queue_size,
> > +				!queue->host_qid);
> > +		if (IS_ERR(queue->cmds)) {
> > +			ret = NVME_RDMA_CM_NO_RSC;
> > +			goto out_free_cmds;
> > +		}
> > +	}
> > +

Should the above error path actually goto a block that frees the rsps?  Like
this?



--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Comments

Ming Lin June 9, 2016, 9:54 p.m. UTC | #1
On Thu, Jun 9, 2016 at 2:42 PM, Steve Wise <swise@opengridcomputing.com> wrote:

> Should the above error path actually goto a block that frees the rsps?  Like
> this?
>
> diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
> index c184ee5..8aaa36f 100644
> --- a/drivers/nvme/target/rdma.c
> +++ b/drivers/nvme/target/rdma.c
> @@ -1053,7 +1053,7 @@ nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev,
>                                 !queue->host_qid);
>                 if (IS_ERR(queue->cmds)) {
>                         ret = NVME_RDMA_CM_NO_RSC;
> -                       goto out_free_cmds;
> +                       goto out_free_responses;
>                 }
>         }
>
> @@ -1073,6 +1073,8 @@ out_free_cmds:
>                                 queue->recv_queue_size,
>                                 !queue->host_qid);
>         }
> +out_free_responses:
> +        nvmet_rdma_free_rsps(queue);
>  out_ida_remove:
>         ida_simple_remove(&nvmet_rdma_queue_ida, queue->idx);
>  out_destroy_sq:

Yes. Nice catch.
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Christoph Hellwig June 14, 2016, 2:32 p.m. UTC | #2
On Thu, Jun 09, 2016 at 04:42:11PM -0500, Steve Wise wrote:
> 
> <snip>
> 
> > > +
> > > +static struct nvmet_rdma_queue *
> > > +nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev,
> > > +		struct rdma_cm_id *cm_id,
> > > +		struct rdma_cm_event *event)
> > > +{
> > > +	struct nvmet_rdma_queue *queue;
> > > +	int ret;
> > > +
> > > +	queue = kzalloc(sizeof(*queue), GFP_KERNEL);
> > > +	if (!queue) {
> > > +		ret = NVME_RDMA_CM_NO_RSC;
> > > +		goto out_reject;
> > > +	}
> > > +
> > > +	ret = nvmet_sq_init(&queue->nvme_sq);
> > > +	if (ret)
> > > +		goto out_free_queue;
> > > +
> > > +	ret = nvmet_rdma_parse_cm_connect_req(&event->param.conn,
> > queue);
> > > +	if (ret)
> > > +		goto out_destroy_sq;
> > > +
> > > +	/*
> > > +	 * Schedules the actual release because calling rdma_destroy_id from
> > > +	 * inside a CM callback would trigger a deadlock. (great API
> design..)
> > > +	 */
> > > +	INIT_WORK(&queue->release_work,
> > nvmet_rdma_release_queue_work);
> > > +	queue->dev = ndev;
> > > +	queue->cm_id = cm_id;
> > > +
> > > +	spin_lock_init(&queue->state_lock);
> > > +	queue->state = NVMET_RDMA_Q_CONNECTING;
> > > +	INIT_LIST_HEAD(&queue->rsp_wait_list);
> > > +	INIT_LIST_HEAD(&queue->rsp_wr_wait_list);
> > > +	spin_lock_init(&queue->rsp_wr_wait_lock);
> > > +	INIT_LIST_HEAD(&queue->free_rsps);
> > > +	spin_lock_init(&queue->rsps_lock);
> > > +
> > > +	queue->idx = ida_simple_get(&nvmet_rdma_queue_ida, 0, 0,
> > GFP_KERNEL);
> > > +	if (queue->idx < 0) {
> > > +		ret = NVME_RDMA_CM_NO_RSC;
> > > +		goto out_free_queue;
> > > +	}
> > > +
> > > +	ret = nvmet_rdma_alloc_rsps(queue);
> > > +	if (ret) {
> > > +		ret = NVME_RDMA_CM_NO_RSC;
> > > +		goto out_ida_remove;
> > > +	}
> > > +
> > > +	if (!ndev->srq) {
> > > +		queue->cmds = nvmet_rdma_alloc_cmds(ndev,
> > > +				queue->recv_queue_size,
> > > +				!queue->host_qid);
> > > +		if (IS_ERR(queue->cmds)) {
> > > +			ret = NVME_RDMA_CM_NO_RSC;
> > > +			goto out_free_cmds;
> > > +		}
> > > +	}
> > > +
> 
> Should the above error path actually goto a block that frees the rsps?  Like
> this?

Yes, this looks good.  Thanks a lot, I'll include it in when reposting.
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index c184ee5..8aaa36f 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -1053,7 +1053,7 @@  nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev,
                                !queue->host_qid);
                if (IS_ERR(queue->cmds)) {
                        ret = NVME_RDMA_CM_NO_RSC;
-                       goto out_free_cmds;
+                       goto out_free_responses;
                }
        }

@@ -1073,6 +1073,8 @@  out_free_cmds:
                                queue->recv_queue_size,
                                !queue->host_qid);
        }
+out_free_responses:
+        nvmet_rdma_free_rsps(queue);
 out_ida_remove:
        ida_simple_remove(&nvmet_rdma_queue_ida, queue->idx);
 out_destroy_sq: