diff mbox series

[PATCHv3,4/4] nvme: use return value from blk_execute_rq()

Message ID 20210521202145.3674904-5-kbusch@kernel.org (mailing list archive)
State New, archived
Headers show
Series block and nvme passthrough error handling | expand

Commit Message

Keith Busch May 21, 2021, 8:21 p.m. UTC
We don't have an nvme status to report if the driver's .queue_rq()
returns an error without dispatching the requested nvme command. Check
the return value from blk_execute_rq() for all passthrough commands so
the caller may know their command was not successful.

If the command is from the target passthrough interface and fails to
dispatch, synthesize the response back to the host as a internal target
error.

Signed-off-by: Keith Busch <kbusch@kernel.org>
---
v2->v3:

  Initialize nvme status to 0

  Add helper function for blk_status_t to nvme passthrough return code

 drivers/nvme/host/core.c       | 33 +++++++++++++++++++++++++--------
 drivers/nvme/host/ioctl.c      |  6 +-----
 drivers/nvme/host/nvme.h       |  2 +-
 drivers/nvme/target/passthru.c |  8 ++++----
 4 files changed, 31 insertions(+), 18 deletions(-)

Comments

Christoph Hellwig May 24, 2021, 8:04 a.m. UTC | #1
> +/*
> + * Return values:
> + * 0:  success
> + * >0: nvme controller's cqe status response
> + * <0: kernel error in lieu of controller response
> + */

For better reading flow I'd reformat this as:

/*
 * Return value:
 *   0:	success
 * > 0:	nvme controller's cqe status response
 * < 0: kernel error in lieu of controller response
 */

> +static int nvme_passthrough_status(struct request *rq, blk_status_t status)
> +{
> +	if (nvme_req(rq)->flags & NVME_REQ_CANCELLED)
> +		return -EINTR;
> +	else if (nvme_req(rq)->status)
> +		return nvme_req(rq)->status;
> +	return blk_status_to_errno(status);
> +}

I find this a little odd disconnected from the actual execure call.
What about a helper like this instead:

static int nvme_execute_rq(struct gendisk *disk, struct request *rq,
		bool at_head)
{
	blk_status_t status;

	status = blk_execute_rq(disk, req, at_head);
	if (nvme_req(rq)->flags & NVME_REQ_CANCELLED)
		return -EINTR;
	if (nvme_req(rq)->status)
		return nvme_req(rq)->status;
	return blk_status_to_errno(status);
}

> -	nvme_execute_passthru_rq(rq);
> +	status = nvme_execute_passthru_rq(rq);
>  
> -	status = nvme_req(rq)->status;
>  	if (status == NVME_SC_SUCCESS &&
>  	    req->cmd->common.opcode == nvme_admin_identify) {
>  		switch (req->cmd->identify.cns) {
> @@ -168,7 +167,8 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
>  			nvmet_passthru_override_id_ns(req);
>  			break;
>  		}
> -	}
> +	} else if (status < 0)
> +		status = NVME_SC_INTERNAL;

Don't we need a better translation here?
Keith Busch June 7, 2021, 4:58 p.m. UTC | #2
On Mon, May 24, 2021 at 10:04:28AM +0200, Christoph Hellwig wrote:
> > @@ -168,7 +167,8 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
> >  			nvmet_passthru_override_id_ns(req);
> >  			break;
> >  		}
> > -	}
> > +	} else if (status < 0)
> > +		status = NVME_SC_INTERNAL;
> 
> Don't we need a better translation here?

Did you have something in mind? I couldn't think of anything more
appropriate than the generic internal error. The errno's we get here are
-EINTR or -EIO. Both indicate we can't communicate with the back-end
device, but these problems are internal to the target from the host's
perspective.
Christoph Hellwig June 8, 2021, 5:26 a.m. UTC | #3
On Tue, Jun 08, 2021 at 01:58:27AM +0900, Keith Busch wrote:
> On Mon, May 24, 2021 at 10:04:28AM +0200, Christoph Hellwig wrote:
> > > @@ -168,7 +167,8 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
> > >  			nvmet_passthru_override_id_ns(req);
> > >  			break;
> > >  		}
> > > -	}
> > > +	} else if (status < 0)
> > > +		status = NVME_SC_INTERNAL;
> > 
> > Don't we need a better translation here?
> 
> Did you have something in mind? I couldn't think of anything more
> appropriate than the generic internal error. The errno's we get here are
> -EINTR or -EIO. Both indicate we can't communicate with the back-end
> device, but these problems are internal to the target from the host's
> perspective.

Ok.
diff mbox series

Patch

diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 1a73eed61eee..99e10476f377 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -589,6 +589,7 @@  EXPORT_SYMBOL_NS_GPL(nvme_put_ns, NVME_TARGET_PASSTHRU);
 
 static inline void nvme_clear_nvme_request(struct request *req)
 {
+	nvme_req(req)->status = 0;
 	nvme_req(req)->retries = 0;
 	nvme_req(req)->flags = 0;
 	req->rq_flags |= RQF_DONTPREP;
@@ -1012,6 +1013,21 @@  blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req)
 }
 EXPORT_SYMBOL_GPL(nvme_setup_cmd);
 
+/*
+ * Return values:
+ * 0:  success
+ * >0: nvme controller's cqe status response
+ * <0: kernel error in lieu of controller response
+ */
+static int nvme_passthrough_status(struct request *rq, blk_status_t status)
+{
+	if (nvme_req(rq)->flags & NVME_REQ_CANCELLED)
+		return -EINTR;
+	else if (nvme_req(rq)->status)
+		return nvme_req(rq)->status;
+	return blk_status_to_errno(status);
+}
+
 /*
  * Returns 0 on success.  If the result is negative, it's a Linux error code;
  * if the result is positive, it's an NVM Express status code
@@ -1022,6 +1038,7 @@  int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
 		blk_mq_req_flags_t flags)
 {
 	struct request *req;
+	blk_status_t status;
 	int ret;
 
 	if (qid == NVME_QID_ANY)
@@ -1040,13 +1057,10 @@  int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
 			goto out;
 	}
 
-	blk_execute_rq(NULL, req, at_head);
-	if (result)
+	status = blk_execute_rq(NULL, req, at_head);
+	ret = nvme_passthrough_status(req, status);
+	if (result && ret >= 0)
 		*result = nvme_req(req)->result;
-	if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
-		ret = -EINTR;
-	else
-		ret = nvme_req(req)->status;
  out:
 	blk_mq_free_request(req);
 	return ret;
@@ -1134,18 +1148,21 @@  static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
 	}
 }
 
-void nvme_execute_passthru_rq(struct request *rq)
+int nvme_execute_passthru_rq(struct request *rq)
 {
 	struct nvme_command *cmd = nvme_req(rq)->cmd;
 	struct nvme_ctrl *ctrl = nvme_req(rq)->ctrl;
 	struct nvme_ns *ns = rq->q->queuedata;
 	struct gendisk *disk = ns ? ns->disk : NULL;
+	blk_status_t status;
 	u32 effects;
 
 	effects = nvme_passthru_start(ctrl, ns, cmd->common.opcode);
-	blk_execute_rq(disk, rq, 0);
+	status = blk_execute_rq(disk, rq, 0);
 	if (effects) /* nothing to be done for zero cmd effects */
 		nvme_passthru_end(ctrl, effects);
+
+	return nvme_passthrough_status(rq, status);
 }
 EXPORT_SYMBOL_NS_GPL(nvme_execute_passthru_rq, NVME_TARGET_PASSTHRU);
 
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index 9557ead02de1..4e6bcc3c1c0d 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -93,11 +93,7 @@  static int nvme_submit_user_cmd(struct request_queue *q,
 		}
 	}
 
-	nvme_execute_passthru_rq(req);
-	if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
-		ret = -EINTR;
-	else
-		ret = nvme_req(req)->status;
+	ret = nvme_execute_passthru_rq(req);
 	if (result)
 		*result = le64_to_cpu(nvme_req(req)->result.u64);
 	if (meta && !ret && !write) {
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 84bff995a308..9ea827871d67 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -877,7 +877,7 @@  static inline void nvme_hwmon_exit(struct nvme_ctrl *ctrl)
 
 u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
 			 u8 opcode);
-void nvme_execute_passthru_rq(struct request *rq);
+int nvme_execute_passthru_rq(struct request *rq);
 struct nvme_ctrl *nvme_ctrl_from_file(struct file *file);
 struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid);
 void nvme_put_ns(struct nvme_ns *ns);
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index 39b1473f7204..ba28ce42cb55 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -153,11 +153,10 @@  static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
 {
 	struct nvmet_req *req = container_of(w, struct nvmet_req, p.work);
 	struct request *rq = req->p.rq;
-	u16 status;
+	int status;
 
-	nvme_execute_passthru_rq(rq);
+	status = nvme_execute_passthru_rq(rq);
 
-	status = nvme_req(rq)->status;
 	if (status == NVME_SC_SUCCESS &&
 	    req->cmd->common.opcode == nvme_admin_identify) {
 		switch (req->cmd->identify.cns) {
@@ -168,7 +167,8 @@  static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
 			nvmet_passthru_override_id_ns(req);
 			break;
 		}
-	}
+	} else if (status < 0)
+		status = NVME_SC_INTERNAL;
 
 	req->cqe->result = nvme_req(rq)->result;
 	nvmet_req_complete(req, status);