diff mbox series

[3/4] nvme: separate command prep and issue

Message ID 20211117033807.185715-4-axboe@kernel.dk (mailing list archive)
State New, archived
Headers show
Series Add support for list issue | expand

Commit Message

Jens Axboe Nov. 17, 2021, 3:38 a.m. UTC
Add a nvme_prep_rq() helper to setup a command, and nvme_queue_rq() is
adapted to use this helper.

Signed-off-by: Jens Axboe <axboe@kernel.dk>
---
 drivers/nvme/host/pci.c | 54 +++++++++++++++++++++++++----------------
 1 file changed, 33 insertions(+), 21 deletions(-)

Comments

Christoph Hellwig Nov. 17, 2021, 6:17 a.m. UTC | #1
On Tue, Nov 16, 2021 at 08:38:06PM -0700, Jens Axboe wrote:
> +	ret = nvme_prep_rq(dev, ns, req, &iod->cmd);
> +	if (ret == BLK_STS_OK) {
> +		nvme_submit_cmd(nvmeq, &iod->cmd, bd->last);
> +		return BLK_STS_OK;
> +	}
> +	return ret;

I'd prefer the traditional handle errors outside the straight path
order here:

	ret = nvme_prep_rq(dev, ns, req, &iod->cmd);
	if (ret)
		return ret;
	nvme_submit_cmd(nvmeq, &iod->cmd, bd->last);
	return BLK_STS_OK;
Jens Axboe Nov. 17, 2021, 3:45 p.m. UTC | #2
On 11/16/21 11:17 PM, Christoph Hellwig wrote:
> On Tue, Nov 16, 2021 at 08:38:06PM -0700, Jens Axboe wrote:
>> +	ret = nvme_prep_rq(dev, ns, req, &iod->cmd);
>> +	if (ret == BLK_STS_OK) {
>> +		nvme_submit_cmd(nvmeq, &iod->cmd, bd->last);
>> +		return BLK_STS_OK;
>> +	}
>> +	return ret;
> 
> I'd prefer the traditional handle errors outside the straight path
> order here:
> 
> 	ret = nvme_prep_rq(dev, ns, req, &iod->cmd);
> 	if (ret)
> 		return ret;
> 	nvme_submit_cmd(nvmeq, &iod->cmd, bd->last);
> 	return BLK_STS_OK;

Sure, changed.
Chaitanya Kulkarni Nov. 18, 2021, 7:59 a.m. UTC | #3
On 11/16/2021 10:17 PM, Christoph Hellwig wrote:
> On Tue, Nov 16, 2021 at 08:38:06PM -0700, Jens Axboe wrote:
>> +	ret = nvme_prep_rq(dev, ns, req, &iod->cmd);
>> +	if (ret == BLK_STS_OK) {
>> +		nvme_submit_cmd(nvmeq, &iod->cmd, bd->last);
>> +		return BLK_STS_OK;
>> +	}
>> +	return ret;
> 
> I'd prefer the traditional handle errors outside the straight path
> order here:
> 
> 	ret = nvme_prep_rq(dev, ns, req, &iod->cmd);
> 	if (ret)
> 		return ret;
> 	nvme_submit_cmd(nvmeq, &iod->cmd, bd->last);
> 	return BLK_STS_OK;
> 

perhaps consider adding unlikely() for the error condition above ?

	ret = nvme_prep_rq(dev, ns, req, &iod->cmd);
  	if (unlikely(ret))
  		return ret;
  	nvme_submit_cmd(nvmeq, &iod->cmd, bd->last);
  	return BLK_STS_OK;
diff mbox series

Patch

diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index c33cd1177b37..d2b654fc3603 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -937,18 +937,10 @@  static blk_status_t nvme_map_metadata(struct nvme_dev *dev, struct request *req,
 	return BLK_STS_OK;
 }
 
-/*
- * NOTE: ns is NULL when called on the admin queue.
- */
-static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
-			 const struct blk_mq_queue_data *bd)
+static blk_status_t nvme_prep_rq(struct nvme_dev *dev, struct nvme_ns *ns,
+				 struct request *req, struct nvme_command *cmnd)
 {
-	struct nvme_ns *ns = hctx->queue->queuedata;
-	struct nvme_queue *nvmeq = hctx->driver_data;
-	struct nvme_dev *dev = nvmeq->dev;
-	struct request *req = bd->rq;
 	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
-	struct nvme_command *cmnd = &iod->cmd;
 	blk_status_t ret;
 
 	iod->aborted = false;
@@ -956,16 +948,6 @@  static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
 	iod->npages = -1;
 	iod->nents = 0;
 
-	/*
-	 * We should not need to do this, but we're still using this to
-	 * ensure we can drain requests on a dying queue.
-	 */
-	if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags)))
-		return BLK_STS_IOERR;
-
-	if (!nvme_check_ready(&dev->ctrl, req, true))
-		return nvme_fail_nonready_command(&dev->ctrl, req);
-
 	ret = nvme_setup_cmd(ns, req);
 	if (ret)
 		return ret;
@@ -983,7 +965,6 @@  static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
 	}
 
 	blk_mq_start_request(req);
-	nvme_submit_cmd(nvmeq, cmnd, bd->last);
 	return BLK_STS_OK;
 out_unmap_data:
 	nvme_unmap_data(dev, req);
@@ -992,6 +973,37 @@  static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
 	return ret;
 }
 
+/*
+ * NOTE: ns is NULL when called on the admin queue.
+ */
+static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
+			 const struct blk_mq_queue_data *bd)
+{
+	struct nvme_ns *ns = hctx->queue->queuedata;
+	struct nvme_queue *nvmeq = hctx->driver_data;
+	struct nvme_dev *dev = nvmeq->dev;
+	struct request *req = bd->rq;
+	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+	blk_status_t ret;
+
+	/*
+	 * We should not need to do this, but we're still using this to
+	 * ensure we can drain requests on a dying queue.
+	 */
+	if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags)))
+		return BLK_STS_IOERR;
+
+	if (!nvme_check_ready(&dev->ctrl, req, true))
+		return nvme_fail_nonready_command(&dev->ctrl, req);
+
+	ret = nvme_prep_rq(dev, ns, req, &iod->cmd);
+	if (ret == BLK_STS_OK) {
+		nvme_submit_cmd(nvmeq, &iod->cmd, bd->last);
+		return BLK_STS_OK;
+	}
+	return ret;
+}
+
 static __always_inline void nvme_pci_unmap_rq(struct request *req)
 {
 	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);