@@ -276,7 +276,7 @@ static inline int nvme_setup_discard(struct nvme_ns *ns, struct request *req,
*/
req->__data_len = nr_bytes;
- return 0;
+ return BLK_MQ_RQ_QUEUE_OK;
}
static inline void nvme_setup_rw(struct nvme_ns *ns, struct request *req,
@@ -324,7 +324,7 @@ static inline void nvme_setup_rw(struct nvme_ns *ns, struct request *req,
int nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
struct nvme_command *cmd)
{
- int ret = 0;
+ int ret = BLK_MQ_RQ_QUEUE_OK;
if (req->cmd_type == REQ_TYPE_DRV_PRIV)
memcpy(cmd, req->cmd, sizeof(*cmd));
@@ -328,7 +328,7 @@ static int nvme_init_iod(struct request *rq, unsigned size,
rq->retries = 0;
rq->cmd_flags |= REQ_DONTPREP;
}
- return 0;
+ return BLK_MQ_RQ_QUEUE_OK;
}
static void nvme_free_iod(struct nvme_dev *dev, struct request *req)
@@ -598,17 +598,17 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
map_len = nvme_map_len(req);
ret = nvme_init_iod(req, map_len, dev);
- if (ret)
+ if (ret != BLK_MQ_RQ_QUEUE_OK)
return ret;
ret = nvme_setup_cmd(ns, req, &cmnd);
- if (ret)
+ if (ret != BLK_MQ_RQ_QUEUE_OK)
goto out;
if (req->nr_phys_segments)
ret = nvme_map_data(dev, req, map_len, &cmnd);
- if (ret)
+ if (ret != BLK_MQ_RQ_QUEUE_OK)
goto out;
cmnd.common.command_id = req->tag;
@@ -1399,7 +1399,7 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
sizeof(struct nvme_command), DMA_TO_DEVICE);
ret = nvme_setup_cmd(ns, rq, c);
- if (ret)
+ if (ret != BLK_MQ_RQ_QUEUE_OK)
return ret;
c->common.command_id = rq->tag;
@@ -168,7 +168,7 @@ static int nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
int ret;
ret = nvme_setup_cmd(ns, req, &iod->cmd);
- if (ret)
+ if (ret != BLK_MQ_RQ_QUEUE_OK)
return ret;
iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
@@ -178,7 +178,7 @@ static int nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
nvme_cleanup_cmd(req);
blk_mq_start_request(req);
nvme_loop_queue_response(&iod->req);
- return 0;
+ return BLK_MQ_RQ_QUEUE_OK;
}
if (blk_rq_bytes(req)) {
@@ -197,7 +197,7 @@ static int nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
blk_mq_start_request(req);
schedule_work(&iod->work);
- return 0;
+ return BLK_MQ_RQ_QUEUE_OK;
}
static void nvme_loop_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
@@ -1801,7 +1801,7 @@ static inline int prep_to_mq(int ret)
{
switch (ret) {
case BLKPREP_OK:
- return 0;
+ return BLK_MQ_RQ_QUEUE_OK;
case BLKPREP_DEFER:
return BLK_MQ_RQ_QUEUE_BUSY;
default:
@@ -1888,7 +1888,7 @@ static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
int reason;
ret = prep_to_mq(scsi_prep_state_check(sdev, req));
- if (ret)
+ if (ret != BLK_MQ_RQ_QUEUE_OK)
goto out;
ret = BLK_MQ_RQ_QUEUE_BUSY;
@@ -1905,7 +1905,7 @@ static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
if (!(req->cmd_flags & REQ_DONTPREP)) {
ret = prep_to_mq(scsi_mq_prep_fn(req));
- if (ret)
+ if (ret != BLK_MQ_RQ_QUEUE_OK)
goto out_dec_host_busy;
req->cmd_flags |= REQ_DONTPREP;
} else {