diff mbox series

[V3,2/6] nvme-core: split nvme_alloc_request()

Message ID 20201022010234.8304-3-chaitanya.kulkarni@wdc.com (mailing list archive)
State New, archived
Headers show
Series nvmet: passthru fixes and improvements | expand

Commit Message

Chaitanya Kulkarni Oct. 22, 2020, 1:02 a.m. UTC
Right now nvme_alloc_request() allocates a request from block layer
based on the value of the qid. When qid set to NVME_QID_ANY it used
blk_mq_alloc_request() else blk_mq_alloc_request_hctx().

The function nvme_alloc_request() is called from different context, The
only place where it uses non NVME_QID_ANY value is for fabrics connect
commands :-

nvme_submit_sync_cmd()		NVME_QID_ANY
nvme_features()			NVME_QID_ANY
nvme_sec_submit()		NVME_QID_ANY
nvmf_reg_read32()		NVME_QID_ANY
nvmf_reg_read64()		NVME_QID_ANY
nvmf_reg_write32()		NVME_QID_ANY
nvmf_connect_admin_queue()	NVME_QID_ANY
nvme_submit_user_cmd()		NVME_QID_ANY
	nvme_alloc_request()
nvme_keep_alive()		NVME_QID_ANY
	nvme_alloc_request()
nvme_timeout()			NVME_QID_ANY
	nvme_alloc_request()
nvme_delete_queue()		NVME_QID_ANY
	nvme_alloc_request()
nvmet_passthru_execute_cmd()	NVME_QID_ANY
	nvme_alloc_request()
nvmf_connect_io_queue() 	QID
	__nvme_submit_sync_cmd()
		nvme_alloc_request()

With passthru nvme_alloc_request() now falls into the I/O fast path such
that blk_mq_alloc_request_hctx() is never gets called and that adds
additional branch check and the code in fast path.

Split the nvme_alloc_request() into nvme_alloc_request_qid_any() and
nvme_alloc_request_qid().

Replace each call of the nvme_alloc_request() with NVME_QID_ANY param
with a call to newly added nvme_alloc_request_qid_any().

Replace a call to nvme_alloc_request() with QID param with a call to
newly added nvme_alloc_request_qid_any() and nvme_alloc_request_qid()
based on the qid value set in the __nvme_submit_sync_cmd().

Signed-off-by: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
---
 drivers/nvme/host/core.c       | 44 +++++++++++++++++++++++-----------
 drivers/nvme/host/lightnvm.c   |  5 ++--
 drivers/nvme/host/nvme.h       |  4 ++--
 drivers/nvme/host/pci.c        |  6 ++---
 drivers/nvme/target/passthru.c |  2 +-
 5 files changed, 38 insertions(+), 23 deletions(-)

Comments

Christoph Hellwig Nov. 3, 2020, 6:24 p.m. UTC | #1
On Wed, Oct 21, 2020 at 06:02:30PM -0700, Chaitanya Kulkarni wrote:
> +static inline unsigned int nvme_req_op(struct nvme_command *cmd)
> +{
> +	return nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN;
> +}

Why is this added here while nvme_init_req_from_cmd is added in a prep
patch?  I'm actually fine either way, but doing it differnetly for the
different helpers is a little inconsistent.

> +
> +struct request *nvme_alloc_request_qid_any(struct request_queue *q,
> +		struct nvme_command *cmd, blk_mq_req_flags_t flags)

I'd call this just nvme_alloc_request to keep the short name for the
normal use case.

> +	struct request *req;
> +
> +	req = blk_mq_alloc_request(q, nvme_req_op(cmd), flags);
> +	if (unlikely(IS_ERR(req)))
> +		return req;
> +
> +	nvme_init_req_from_cmd(req, cmd);
> +	return req;

Could be simplified to:

	req = blk_mq_alloc_request(q, nvme_req_op(cmd), flags);
	if (!IS_ERR(req))
		nvme_init_req_from_cmd(req, cmd);
	return req;

Note that IS_ERR already contains an embedded unlikely().

> +static struct request *nvme_alloc_request_qid(struct request_queue *q,
>  		struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid)
>  {
>  	struct request *req;
>  
> +	req = blk_mq_alloc_request_hctx(q, nvme_req_op(cmd), flags,
> +			qid ? qid - 1 : 0);
>  	if (IS_ERR(req))
>  		return req;
>  
>  	nvme_init_req_from_cmd(req, cmd);
>  	return req;

Same here.

>  }
> -EXPORT_SYMBOL_GPL(nvme_alloc_request);

I think nvme_alloc_request_qid needs to be exported as well.

FYI, this also doesn't apply to the current nvme-5.10 tree any more.
Chaitanya Kulkarni Nov. 4, 2020, 9:03 p.m. UTC | #2
On 11/3/20 10:24, Christoph Hellwig wrote:
> On Wed, Oct 21, 2020 at 06:02:30PM -0700, Chaitanya Kulkarni wrote:
>> +static inline unsigned int nvme_req_op(struct nvme_command *cmd)
>> +{
>> +	return nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN;
>> +}
> Why is this added here while nvme_init_req_from_cmd is added in a prep
> patch?  I'm actually fine either way, but doing it differnetly for the
> different helpers is a little inconsistent.

I'll move this into the first prep patch.

>> +
>> +struct request *nvme_alloc_request_qid_any(struct request_queue *q,
>> +		struct nvme_command *cmd, blk_mq_req_flags_t flags)
> I'd call this just nvme_alloc_request to keep the short name for the
> normal use case.
Okay.
>
>> +	struct request *req;
>> +
>> +	req = blk_mq_alloc_request(q, nvme_req_op(cmd), flags);
>> +	if (unlikely(IS_ERR(req)))
>> +		return req;
>> +
>> +	nvme_init_req_from_cmd(req, cmd);
>> +	return req;
> Could be simplified to:
>
> 	req = blk_mq_alloc_request(q, nvme_req_op(cmd), flags);
> 	if (!IS_ERR(req))
> 		nvme_init_req_from_cmd(req, cmd);
> 	return req;
>
> Note that IS_ERR already contains an embedded unlikely().
Sure.
>> +static struct request *nvme_alloc_request_qid(struct request_queue *q,
>>  		struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid)
>>  {
>>  	struct request *req;
>>  
>> +	req = blk_mq_alloc_request_hctx(q, nvme_req_op(cmd), flags,
>> +			qid ? qid - 1 : 0);
>>  	if (IS_ERR(req))
>>  		return req;
>>  
>>  	nvme_init_req_from_cmd(req, cmd);
>>  	return req;
> Same here.
Will do.
>>  }
>> -EXPORT_SYMBOL_GPL(nvme_alloc_request);
> I think nvme_alloc_request_qid needs to be exported as well.
>
> FYI, this also doesn't apply to the current nvme-5.10 tree any more.
>
Since it conflicts with the timeout series will rebase and resend once
we get

the timeout series in, otherwise it makes reviews confusing and stale at
times.
diff mbox series

Patch

diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 5bc52594fe63..87e56ef48f5d 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -522,26 +522,38 @@  static inline void nvme_init_req_from_cmd(struct request *req,
 	nvme_req(req)->cmd = cmd;
 }
 
-struct request *nvme_alloc_request(struct request_queue *q,
+static inline unsigned int nvme_req_op(struct nvme_command *cmd)
+{
+	return nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN;
+}
+
+struct request *nvme_alloc_request_qid_any(struct request_queue *q,
+		struct nvme_command *cmd, blk_mq_req_flags_t flags)
+{
+	struct request *req;
+
+	req = blk_mq_alloc_request(q, nvme_req_op(cmd), flags);
+	if (unlikely(IS_ERR(req)))
+		return req;
+
+	nvme_init_req_from_cmd(req, cmd);
+	return req;
+}
+EXPORT_SYMBOL_GPL(nvme_alloc_request_qid_any);
+
+static struct request *nvme_alloc_request_qid(struct request_queue *q,
 		struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid)
 {
-	unsigned op = nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN;
 	struct request *req;
 
-	if (qid == NVME_QID_ANY) {
-		req = blk_mq_alloc_request(q, op, flags);
-	} else {
-		req = blk_mq_alloc_request_hctx(q, op, flags,
-				qid ? qid - 1 : 0);
-	}
+	req = blk_mq_alloc_request_hctx(q, nvme_req_op(cmd), flags,
+			qid ? qid - 1 : 0);
 	if (IS_ERR(req))
 		return req;
 
 	nvme_init_req_from_cmd(req, cmd);
-
 	return req;
 }
-EXPORT_SYMBOL_GPL(nvme_alloc_request);
 
 static int nvme_toggle_streams(struct nvme_ctrl *ctrl, bool enable)
 {
@@ -899,7 +911,11 @@  int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
 	struct request *req;
 	int ret;
 
-	req = nvme_alloc_request(q, cmd, flags, qid);
+	if (qid == NVME_QID_ANY)
+		req = nvme_alloc_request_qid_any(q, cmd, flags);
+	else
+		req = nvme_alloc_request_qid(q, cmd, flags, qid);
+
 	if (IS_ERR(req))
 		return PTR_ERR(req);
 
@@ -1069,7 +1085,7 @@  static int nvme_submit_user_cmd(struct request_queue *q,
 	void *meta = NULL;
 	int ret;
 
-	req = nvme_alloc_request(q, cmd, 0, NVME_QID_ANY);
+	req = nvme_alloc_request_qid_any(q, cmd, 0);
 	if (IS_ERR(req))
 		return PTR_ERR(req);
 
@@ -1143,8 +1159,8 @@  static int nvme_keep_alive(struct nvme_ctrl *ctrl)
 {
 	struct request *rq;
 
-	rq = nvme_alloc_request(ctrl->admin_q, &ctrl->ka_cmd, BLK_MQ_REQ_RESERVED,
-			NVME_QID_ANY);
+	rq = nvme_alloc_request_qid_any(ctrl->admin_q, &ctrl->ka_cmd,
+			BLK_MQ_REQ_RESERVED);
 	if (IS_ERR(rq))
 		return PTR_ERR(rq);
 
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index 8e562d0f2c30..b1ee1a0310f6 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -653,7 +653,7 @@  static struct request *nvme_nvm_alloc_request(struct request_queue *q,
 
 	nvme_nvm_rqtocmd(rqd, ns, cmd);
 
-	rq = nvme_alloc_request(q, (struct nvme_command *)cmd, 0, NVME_QID_ANY);
+	rq = nvme_alloc_request_qid_any(q, (struct nvme_command *)cmd, 0);
 	if (IS_ERR(rq))
 		return rq;
 
@@ -767,8 +767,7 @@  static int nvme_nvm_submit_user_cmd(struct request_queue *q,
 	DECLARE_COMPLETION_ONSTACK(wait);
 	int ret = 0;
 
-	rq = nvme_alloc_request(q, (struct nvme_command *)vcmd, 0,
-			NVME_QID_ANY);
+	rq = nvme_alloc_request_qid_any(q, (struct nvme_command *)vcmd, 0);
 	if (IS_ERR(rq)) {
 		ret = -ENOMEM;
 		goto err_cmd;
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index cc111136a981..f39a0a387a51 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -608,8 +608,8 @@  int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout);
 void nvme_start_freeze(struct nvme_ctrl *ctrl);
 
 #define NVME_QID_ANY -1
-struct request *nvme_alloc_request(struct request_queue *q,
-		struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid);
+struct request *nvme_alloc_request_qid_any(struct request_queue *q,
+		struct nvme_command *cmd, blk_mq_req_flags_t flags);
 void nvme_cleanup_cmd(struct request *req);
 blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
 		struct nvme_command *cmd);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index df8f3612107f..94f329b5f980 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1289,8 +1289,8 @@  static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
 		"I/O %d QID %d timeout, aborting\n",
 		 req->tag, nvmeq->qid);
 
-	abort_req = nvme_alloc_request(dev->ctrl.admin_q, &cmd,
-			BLK_MQ_REQ_NOWAIT, NVME_QID_ANY);
+	abort_req = nvme_alloc_request_qid_any(dev->ctrl.admin_q, &cmd,
+			BLK_MQ_REQ_NOWAIT);
 	if (IS_ERR(abort_req)) {
 		atomic_inc(&dev->ctrl.abort_limit);
 		return BLK_EH_RESET_TIMER;
@@ -2204,7 +2204,7 @@  static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
 	cmd.delete_queue.opcode = opcode;
 	cmd.delete_queue.qid = cpu_to_le16(nvmeq->qid);
 
-	req = nvme_alloc_request(q, &cmd, BLK_MQ_REQ_NOWAIT, NVME_QID_ANY);
+	req = nvme_alloc_request_qid_any(q, &cmd, BLK_MQ_REQ_NOWAIT);
 	if (IS_ERR(req))
 		return PTR_ERR(req);
 
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index 56c571052216..76affbc3bd9a 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -236,7 +236,7 @@  static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
 		q = ns->queue;
 	}
 
-	rq = nvme_alloc_request(q, req->cmd, BLK_MQ_REQ_NOWAIT, NVME_QID_ANY);
+	rq = nvme_alloc_request_qid_any(q, req->cmd, BLK_MQ_REQ_NOWAIT);
 	if (IS_ERR(rq)) {
 		status = NVME_SC_INTERNAL;
 		goto out_put_ns;