===================================================================
@@ -311,7 +311,7 @@ static inline enum nvme_disposition nvme
nvme_req(req)->retries >= nvme_max_retries)
return COMPLETE;
- if (req->cmd_flags & REQ_NVME_MPATH) {
+ if (req->cmd_flags & (REQ_NVME_MPATH | REQ_FAILFAST_TRANSPORT)) {
if (nvme_is_path_error(nvme_req(req)->status) ||
blk_queue_dying(req->q))
return FAILOVER;
@@ -323,10 +323,8 @@ static inline enum nvme_disposition nvme
return RETRY;
}
-static inline void nvme_end_req(struct request *req)
+static inline void __nvme_end_req(struct request *req, blk_status_t status)
{
- blk_status_t status = nvme_error_status(nvme_req(req)->status);
-
if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
req_op(req) == REQ_OP_ZONE_APPEND)
req->__sector = nvme_lba_to_sect(req->q->queuedata,
@@ -336,6 +334,28 @@ static inline void nvme_end_req(struct r
blk_mq_end_request(req, status);
}
+static inline void nvme_end_req(struct request *req)
+{
+ __nvme_end_req(req, nvme_error_status(nvme_req(req)->status));
+}
+
+static inline void nvme_end_req_with_failover(struct request *req)
+{
+ u16 nvme_status = nvme_req(req)->status;
+ blk_status_t status = nvme_error_status(nvme_status);
+
+ if (unlikely(nvme_status & NVME_SC_DNR))
+ goto out;
+
+ if (!blk_path_error(status)) {
+ pr_debug("Request meant for failover but blk_status_t (errno=%d) was not retryable.\n",
+ blk_status_to_errno(status));
+ status = BLK_STS_IOERR;
+ }
+out:
+ __nvme_end_req(req, status);
+}
+
void nvme_complete_rq(struct request *req)
{
trace_nvme_complete_rq(req);
@@ -352,7 +372,10 @@ void nvme_complete_rq(struct request *re
nvme_retry_req(req);
return;
case FAILOVER:
- nvme_failover_req(req);
+ if (req->cmd_flags & REQ_NVME_MPATH)
+ nvme_failover_req(req);
+ else
+ nvme_end_req_with_failover(req);
return;
}
}