@@ -44,9 +44,10 @@ static unsigned char shutdown_timeout = 5;
module_param(shutdown_timeout, byte, 0644);
MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown");
-static u8 nvme_max_retries = 5;
+u8 nvme_max_retries = 5;
module_param_named(max_retries, nvme_max_retries, byte, 0644);
MODULE_PARM_DESC(max_retries, "max number of retries a command may have");
+EXPORT_SYMBOL_GPL(nvme_max_retries);
static unsigned long default_ps_max_latency_us = 100000;
module_param(default_ps_max_latency_us, ulong, 0644);
@@ -261,48 +262,6 @@ static void nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl)
nvme_put_ctrl(ctrl);
}
-static blk_status_t nvme_error_status(u16 status)
-{
- switch (status & 0x7ff) {
- case NVME_SC_SUCCESS:
- return BLK_STS_OK;
- case NVME_SC_CAP_EXCEEDED:
- return BLK_STS_NOSPC;
- case NVME_SC_LBA_RANGE:
- case NVME_SC_CMD_INTERRUPTED:
- case NVME_SC_NS_NOT_READY:
- return BLK_STS_TARGET;
- case NVME_SC_BAD_ATTRIBUTES:
- case NVME_SC_ONCS_NOT_SUPPORTED:
- case NVME_SC_INVALID_OPCODE:
- case NVME_SC_INVALID_FIELD:
- case NVME_SC_INVALID_NS:
- return BLK_STS_NOTSUPP;
- case NVME_SC_WRITE_FAULT:
- case NVME_SC_READ_ERROR:
- case NVME_SC_UNWRITTEN_BLOCK:
- case NVME_SC_ACCESS_DENIED:
- case NVME_SC_READ_ONLY:
- case NVME_SC_COMPARE_FAILED:
- return BLK_STS_MEDIUM;
- case NVME_SC_GUARD_CHECK:
- case NVME_SC_APPTAG_CHECK:
- case NVME_SC_REFTAG_CHECK:
- case NVME_SC_INVALID_PI:
- return BLK_STS_PROTECTION;
- case NVME_SC_RESERVATION_CONFLICT:
- return BLK_STS_NEXUS;
- case NVME_SC_HOST_PATH_ERROR:
- return BLK_STS_TRANSPORT;
- case NVME_SC_ZONE_TOO_MANY_ACTIVE:
- return BLK_STS_ZONE_ACTIVE_RESOURCE;
- case NVME_SC_ZONE_TOO_MANY_OPEN:
- return BLK_STS_ZONE_OPEN_RESOURCE;
- default:
- return BLK_STS_IOERR;
- }
-}
-
static void nvme_retry_req(struct request *req)
{
unsigned long delay = 0;
@@ -318,34 +277,6 @@ static void nvme_retry_req(struct request *req)
blk_mq_delay_kick_requeue_list(req->q, delay);
}
-enum nvme_disposition {
- COMPLETE,
- RETRY,
- FAILOVER,
-};
-
-static inline enum nvme_disposition nvme_decide_disposition(struct request *req)
-{
- if (likely(nvme_req(req)->status == 0))
- return COMPLETE;
-
- if (blk_noretry_request(req) ||
- (nvme_req(req)->status & NVME_SC_DNR) ||
- nvme_req(req)->retries >= nvme_max_retries)
- return COMPLETE;
-
- if (req->cmd_flags & REQ_NVME_MPATH) {
- if (nvme_is_path_error(nvme_req(req)->status) ||
- blk_queue_dying(req->q))
- return FAILOVER;
- } else {
- if (blk_queue_dying(req->q))
- return COMPLETE;
- }
-
- return RETRY;
-}
-
static inline void nvme_end_req(struct request *req)
{
blk_status_t status = nvme_error_status(nvme_req(req)->status);
@@ -903,4 +903,76 @@ static inline bool nvme_multi_css(struct nvme_ctrl *ctrl)
return (ctrl->ctrl_config & NVME_CC_CSS_MASK) == NVME_CC_CSS_CSI;
}
+static inline blk_status_t nvme_error_status(u16 status)
+{
+ switch (status & 0x7ff) {
+ case NVME_SC_SUCCESS:
+ return BLK_STS_OK;
+ case NVME_SC_CAP_EXCEEDED:
+ return BLK_STS_NOSPC;
+ case NVME_SC_LBA_RANGE:
+ case NVME_SC_CMD_INTERRUPTED:
+ case NVME_SC_NS_NOT_READY:
+ return BLK_STS_TARGET;
+ case NVME_SC_BAD_ATTRIBUTES:
+ case NVME_SC_ONCS_NOT_SUPPORTED:
+ case NVME_SC_INVALID_OPCODE:
+ case NVME_SC_INVALID_FIELD:
+ case NVME_SC_INVALID_NS:
+ return BLK_STS_NOTSUPP;
+ case NVME_SC_WRITE_FAULT:
+ case NVME_SC_READ_ERROR:
+ case NVME_SC_UNWRITTEN_BLOCK:
+ case NVME_SC_ACCESS_DENIED:
+ case NVME_SC_READ_ONLY:
+ case NVME_SC_COMPARE_FAILED:
+ return BLK_STS_MEDIUM;
+ case NVME_SC_GUARD_CHECK:
+ case NVME_SC_APPTAG_CHECK:
+ case NVME_SC_REFTAG_CHECK:
+ case NVME_SC_INVALID_PI:
+ return BLK_STS_PROTECTION;
+ case NVME_SC_RESERVATION_CONFLICT:
+ return BLK_STS_NEXUS;
+ case NVME_SC_HOST_PATH_ERROR:
+ return BLK_STS_TRANSPORT;
+ case NVME_SC_ZONE_TOO_MANY_ACTIVE:
+ return BLK_STS_ZONE_ACTIVE_RESOURCE;
+ case NVME_SC_ZONE_TOO_MANY_OPEN:
+ return BLK_STS_ZONE_OPEN_RESOURCE;
+ default:
+ return BLK_STS_IOERR;
+ }
+}
+
+enum nvme_disposition {
+ COMPLETE,
+ RETRY,
+ FAILOVER,
+};
+
+extern u8 nvme_max_retries;
+
+static inline enum nvme_disposition nvme_decide_disposition(struct request *req)
+{
+ if (likely(nvme_req(req)->status == 0))
+ return COMPLETE;
+
+ if (blk_noretry_request(req) ||
+ (nvme_req(req)->status & NVME_SC_DNR) ||
+ nvme_req(req)->retries >= nvme_max_retries)
+ return COMPLETE;
+
+ if (req->cmd_flags & REQ_NVME_MPATH) {
+ if (nvme_is_path_error(nvme_req(req)->status) ||
+ blk_queue_dying(req->q))
+ return FAILOVER;
+ } else {
+ if (blk_queue_dying(req->q))
+ return COMPLETE;
+ }
+
+ return RETRY;
+}
+
#endif /* _NVME_H */
These are called for every IO completion, move them inline in the nvme private header rather than have them be a function call out of the PCI part of the nvme drivers. We also need them for batched handling, hence the patch also serves as a preparation for that. Signed-off-by: Jens Axboe <axboe@kernel.dk> --- drivers/nvme/host/core.c | 73 ++-------------------------------------- drivers/nvme/host/nvme.h | 72 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 74 insertions(+), 71 deletions(-)