@@ -447,7 +447,7 @@ static int lo_req_flush(struct loop_device *lo, struct request *rq)
static void lo_complete_rq(struct request *rq)
{
- struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
+ struct loop_cmd *cmd = blk_rq_to_pdu(rq);
if (unlikely(req_op(cmd->rq) == REQ_OP_READ && cmd->use_aio &&
cmd->ret >= 0 && cmd->ret < blk_rq_bytes(cmd->rq))) {
@@ -507,7 +507,7 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
static int do_req_filebacked(struct loop_device *lo, struct request *rq)
{
- struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
+ struct loop_cmd *cmd = blk_rq_to_pdu(rq);
loff_t pos = ((loff_t) blk_rq_pos(rq) << 9) + lo->lo_offset;
/*
@@ -1645,7 +1645,7 @@ EXPORT_SYMBOL(loop_unregister_transfer);
static int loop_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
- struct loop_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
+ struct loop_cmd *cmd = blk_rq_to_pdu(bd->rq);
struct loop_device *lo = cmd->rq->q->queuedata;
blk_mq_start_request(bd->rq);
@@ -1700,7 +1700,7 @@ static void loop_queue_work(struct kthread_work *work)
static int loop_init_request(struct blk_mq_tag_set *set, struct request *rq,
unsigned int hctx_idx, unsigned int numa_node)
{
- struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
+ struct loop_cmd *cmd = blk_rq_to_pdu(rq);
cmd->rq = rq;
kthread_init_work(&cmd->work, loop_queue_work);
@@ -173,7 +173,7 @@ static bool mtip_check_surprise_removal(struct pci_dev *pdev)
static void mtip_init_cmd_header(struct request *rq)
{
struct driver_data *dd = rq->q->queuedata;
- struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
+ struct mtip_cmd *cmd = blk_rq_to_pdu(rq);
u32 host_cap_64 = readl(dd->mmio + HOST_CAP) & HOST_CAP_64;
/* Point the command headers at the command tables. */
@@ -202,7 +202,7 @@ static struct mtip_cmd *mtip_get_int_command(struct driver_data *dd)
/* Internal cmd isn't submitted via .queue_rq */
mtip_init_cmd_header(rq);
- return blk_mq_rq_to_pdu(rq);
+ return blk_rq_to_pdu(rq);
}
static struct mtip_cmd *mtip_cmd_from_tag(struct driver_data *dd,
@@ -210,7 +210,7 @@ static struct mtip_cmd *mtip_cmd_from_tag(struct driver_data *dd,
{
struct blk_mq_hw_ctx *hctx = dd->queue->queue_hw_ctx[0];
- return blk_mq_rq_to_pdu(blk_mq_tag_to_rq(hctx->tags, tag));
+ return blk_rq_to_pdu(blk_mq_tag_to_rq(hctx->tags, tag));
}
/*
@@ -534,7 +534,7 @@ static int mtip_get_smart_attr(struct mtip_port *port, unsigned int id,
static void mtip_complete_command(struct mtip_cmd *cmd, int status)
{
- struct request *req = blk_mq_rq_from_pdu(cmd);
+ struct request *req = blk_rq_from_pdu(cmd);
cmd->status = status;
blk_mq_complete_request(req);
@@ -1033,7 +1033,7 @@ static int mtip_exec_internal_command(struct mtip_port *port,
dbg_printk(MTIP_DRV_NAME "Unable to allocate tag for PIO cmd\n");
return -EFAULT;
}
- rq = blk_mq_rq_from_pdu(int_cmd);
+ rq = blk_rq_from_pdu(int_cmd);
rq->special = &icmd;
set_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
@@ -2731,7 +2731,7 @@ static int mtip_ftl_rebuild_poll(struct driver_data *dd)
static void mtip_softirq_done_fn(struct request *rq)
{
- struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
+ struct mtip_cmd *cmd = blk_rq_to_pdu(rq);
struct driver_data *dd = rq->q->queuedata;
/* Unmap the DMA scatter list entries */
@@ -2747,7 +2747,7 @@ static void mtip_softirq_done_fn(struct request *rq)
static void mtip_abort_cmd(struct request *req, void *data,
bool reserved)
{
- struct mtip_cmd *cmd = blk_mq_rq_to_pdu(req);
+ struct mtip_cmd *cmd = blk_rq_to_pdu(req);
struct driver_data *dd = data;
dbg_printk(MTIP_DRV_NAME " Aborting request, tag = %d\n", req->tag);
@@ -3569,7 +3569,7 @@ static inline bool is_se_active(struct driver_data *dd)
static int mtip_submit_request(struct blk_mq_hw_ctx *hctx, struct request *rq)
{
struct driver_data *dd = hctx->queue->queuedata;
- struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
+ struct mtip_cmd *cmd = blk_rq_to_pdu(rq);
unsigned int nents;
if (is_se_active(dd))
@@ -3613,7 +3613,7 @@ static bool mtip_check_unal_depth(struct blk_mq_hw_ctx *hctx,
struct request *rq)
{
struct driver_data *dd = hctx->queue->queuedata;
- struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
+ struct mtip_cmd *cmd = blk_rq_to_pdu(rq);
if (rq_data_dir(rq) == READ || !dd->unal_qdepth)
return false;
@@ -3638,7 +3638,7 @@ static int mtip_issue_reserved_cmd(struct blk_mq_hw_ctx *hctx,
{
struct driver_data *dd = hctx->queue->queuedata;
struct mtip_int_cmd *icmd = rq->special;
- struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
+ struct mtip_cmd *cmd = blk_rq_to_pdu(rq);
struct mtip_cmd_sg *command_sg;
if (mtip_commands_active(dd->port))
@@ -3696,7 +3696,7 @@ static void mtip_free_cmd(struct blk_mq_tag_set *set, struct request *rq,
unsigned int hctx_idx)
{
struct driver_data *dd = set->driver_data;
- struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
+ struct mtip_cmd *cmd = blk_rq_to_pdu(rq);
if (!cmd->command)
return;
@@ -3709,7 +3709,7 @@ static int mtip_init_cmd(struct blk_mq_tag_set *set, struct request *rq,
unsigned int hctx_idx, unsigned int numa_node)
{
struct driver_data *dd = set->driver_data;
- struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
+ struct mtip_cmd *cmd = blk_rq_to_pdu(rq);
cmd->command = dmam_alloc_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ,
&cmd->command_dma, GFP_KERNEL);
@@ -3728,7 +3728,7 @@ static enum blk_eh_timer_return mtip_cmd_timeout(struct request *req,
struct driver_data *dd = req->q->queuedata;
if (reserved) {
- struct mtip_cmd *cmd = blk_mq_rq_to_pdu(req);
+ struct mtip_cmd *cmd = blk_rq_to_pdu(req);
cmd->status = -ETIME;
return BLK_EH_HANDLED;
@@ -3959,7 +3959,7 @@ static int mtip_block_initialize(struct driver_data *dd)
static void mtip_no_dev_cleanup(struct request *rq, void *data, bool reserv)
{
- struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
+ struct mtip_cmd *cmd = blk_rq_to_pdu(rq);
cmd->status = -ENODEV;
blk_mq_complete_request(rq);
@@ -248,7 +248,7 @@ static void nbd_size_set(struct nbd_device *nbd, loff_t blocksize,
static void nbd_complete_rq(struct request *req)
{
- struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
+ struct nbd_cmd *cmd = blk_rq_to_pdu(req);
dev_dbg(nbd_to_dev(cmd->nbd), "request %p: %s\n", cmd,
cmd->status ? "failed" : "done");
@@ -281,7 +281,7 @@ static void sock_shutdown(struct nbd_device *nbd)
static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
bool reserved)
{
- struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
+ struct nbd_cmd *cmd = blk_rq_to_pdu(req);
struct nbd_device *nbd = cmd->nbd;
struct nbd_config *config;
@@ -390,7 +390,7 @@ static int sock_xmit(struct nbd_device *nbd, int index, int send,
/* always call with the tx_lock held */
static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
{
- struct request *req = blk_mq_rq_from_pdu(cmd);
+ struct request *req = blk_rq_from_pdu(cmd);
struct nbd_config *config = nbd->config;
struct nbd_sock *nsock = config->socks[index];
int result;
@@ -574,7 +574,7 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
tag, req);
return ERR_PTR(-ENOENT);
}
- cmd = blk_mq_rq_to_pdu(req);
+ cmd = blk_rq_to_pdu(req);
if (ntohl(reply.error)) {
dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
ntohl(reply.error));
@@ -640,7 +640,7 @@ static void recv_work(struct work_struct *work)
break;
}
- blk_mq_complete_request(blk_mq_rq_from_pdu(cmd));
+ blk_mq_complete_request(blk_rq_from_pdu(cmd));
}
atomic_dec(&config->recv_threads);
wake_up(&config->recv_wq);
@@ -654,7 +654,7 @@ static void nbd_clear_req(struct request *req, void *data, bool reserved)
if (!blk_mq_request_started(req))
return;
- cmd = blk_mq_rq_to_pdu(req);
+ cmd = blk_rq_to_pdu(req);
cmd->status = -EIO;
blk_mq_complete_request(req);
}
@@ -725,7 +725,7 @@ static int wait_for_reconnect(struct nbd_device *nbd)
static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
{
- struct request *req = blk_mq_rq_from_pdu(cmd);
+ struct request *req = blk_rq_from_pdu(cmd);
struct nbd_device *nbd = cmd->nbd;
struct nbd_config *config;
struct nbd_sock *nsock;
@@ -801,7 +801,7 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
static int nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
- struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
+ struct nbd_cmd *cmd = blk_rq_to_pdu(bd->rq);
int ret;
/*
@@ -1410,7 +1410,7 @@ static void nbd_dbg_close(void)
static int nbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
unsigned int hctx_idx, unsigned int numa_node)
{
- struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq);
+ struct nbd_cmd *cmd = blk_rq_to_pdu(rq);
cmd->nbd = set->driver_data;
return 0;
}
@@ -269,7 +269,7 @@ static void null_cmd_end_timer(struct nullb_cmd *cmd)
static void null_softirq_done_fn(struct request *rq)
{
if (queue_mode == NULL_Q_MQ)
- end_cmd(blk_mq_rq_to_pdu(rq));
+ end_cmd(blk_rq_to_pdu(rq));
else
end_cmd(rq->special);
}
@@ -359,7 +359,7 @@ static void null_request_fn(struct request_queue *q)
static int null_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
- struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
+ struct nullb_cmd *cmd = blk_rq_to_pdu(bd->rq);
might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
@@ -4010,7 +4010,7 @@ static void rbd_wait_state_locked(struct rbd_device *rbd_dev)
static void rbd_queue_workfn(struct work_struct *work)
{
- struct request *rq = blk_mq_rq_from_pdu(work);
+ struct request *rq = blk_rq_from_pdu(work);
struct rbd_device *rbd_dev = rq->q->queuedata;
struct rbd_img_request *img_request;
struct ceph_snap_context *snapc = NULL;
@@ -4156,7 +4156,7 @@ static int rbd_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
struct request *rq = bd->rq;
- struct work_struct *work = blk_mq_rq_to_pdu(rq);
+ struct work_struct *work = blk_rq_to_pdu(rq);
queue_work(rbd_wq, work);
return BLK_MQ_RQ_QUEUE_OK;
@@ -4351,7 +4351,7 @@ static int rbd_dev_refresh(struct rbd_device *rbd_dev)
static int rbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
unsigned int hctx_idx, unsigned int numa_node)
{
- struct work_struct *work = blk_mq_rq_to_pdu(rq);
+ struct work_struct *work = blk_rq_to_pdu(rq);
INIT_WORK(work, rbd_queue_workfn);
return 0;
@@ -113,7 +113,7 @@ static int virtblk_add_req_scsi(struct virtqueue *vq, struct virtblk_req *vbr,
static inline void virtblk_scsi_request_done(struct request *req)
{
- struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
+ struct virtblk_req *vbr = blk_rq_to_pdu(req);
struct virtio_blk *vblk = req->q->queuedata;
struct scsi_request *sreq = &vbr->sreq;
@@ -174,7 +174,7 @@ static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr,
static inline void virtblk_request_done(struct request *req)
{
- struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
+ struct virtblk_req *vbr = blk_rq_to_pdu(req);
switch (req_op(req)) {
case REQ_OP_SCSI_IN:
@@ -199,7 +199,7 @@ static void virtblk_done(struct virtqueue *vq)
do {
virtqueue_disable_cb(vq);
while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) {
- struct request *req = blk_mq_rq_from_pdu(vbr);
+ struct request *req = blk_rq_from_pdu(vbr);
blk_mq_complete_request(req);
req_done = true;
@@ -219,7 +219,7 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
{
struct virtio_blk *vblk = hctx->queue->queuedata;
struct request *req = bd->rq;
- struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
+ struct virtblk_req *vbr = blk_rq_to_pdu(req);
unsigned long flags;
unsigned int num;
int qid = hctx->queue_num;
@@ -307,7 +307,7 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str)
goto out;
blk_execute_rq(vblk->disk->queue, vblk->disk, req, false);
- err = virtblk_result(blk_mq_rq_to_pdu(req));
+ err = virtblk_result(blk_rq_to_pdu(req));
out:
blk_put_request(req);
return err;
@@ -576,7 +576,7 @@ static int virtblk_init_request(struct blk_mq_tag_set *set, struct request *rq,
unsigned int hctx_idx, unsigned int numa_node)
{
struct virtio_blk *vblk = set->driver_data;
- struct virtblk_req *vbr = blk_mq_rq_to_pdu(rq);
+ struct virtblk_req *vbr = blk_rq_to_pdu(rq);
#ifdef CONFIG_VIRTIO_BLK_SCSI
vbr->sreq.sense = vbr->sense;
@@ -121,7 +121,7 @@ struct blkif_req {
static inline struct blkif_req *blkif_req(struct request *rq)
{
- return blk_mq_rq_to_pdu(rq);
+ return blk_rq_to_pdu(rq);
}
static DEFINE_MUTEX(blkfront_mutex);
@@ -743,7 +743,7 @@ static void ide_port_tune_devices(ide_hwif_t *hwif)
static void ide_initialize_rq(struct request *rq)
{
- struct ide_request *req = blk_mq_rq_to_pdu(rq);
+ struct ide_request *req = blk_rq_to_pdu(rq);
scsi_req_init(&req->sreq);
req->sreq.sense = req->sense;
@@ -163,7 +163,7 @@ static void end_clone_bio(struct bio *clone)
static struct dm_rq_target_io *tio_from_request(struct request *rq)
{
- return blk_mq_rq_to_pdu(rq);
+ return blk_rq_to_pdu(rq);
}
static void rq_end_stats(struct mapped_device *md, struct request *orig)
@@ -551,7 +551,7 @@ static void dm_start_request(struct mapped_device *md, struct request *orig)
static int __dm_rq_init_rq(struct mapped_device *md, struct request *rq)
{
- struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
+ struct dm_rq_target_io *tio = blk_rq_to_pdu(rq);
/*
* Must initialize md member of tio, otherwise it won't
@@ -731,7 +731,7 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
struct request *rq = bd->rq;
- struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
+ struct dm_rq_target_io *tio = blk_rq_to_pdu(rq);
struct mapped_device *md = tio->md;
struct dm_target *ti = md->immutable_target;
@@ -191,7 +191,7 @@ static int ubiblock_read(struct ubiblock_pdu *pdu)
{
int ret, leb, offset, bytes_left, to_read;
u64 pos;
- struct request *req = blk_mq_rq_from_pdu(pdu);
+ struct request *req = blk_rq_from_pdu(pdu);
struct ubiblock *dev = req->q->queuedata;
to_read = blk_rq_bytes(req);
@@ -299,7 +299,7 @@ static void ubiblock_do_work(struct work_struct *work)
{
int ret;
struct ubiblock_pdu *pdu = container_of(work, struct ubiblock_pdu, work);
- struct request *req = blk_mq_rq_from_pdu(pdu);
+ struct request *req = blk_rq_from_pdu(pdu);
blk_mq_start_request(req);
@@ -321,7 +321,7 @@ static int ubiblock_queue_rq(struct blk_mq_hw_ctx *hctx,
{
struct request *req = bd->rq;
struct ubiblock *dev = hctx->queue->queuedata;
- struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req);
+ struct ubiblock_pdu *pdu = blk_rq_to_pdu(req);
switch (req_op(req)) {
case REQ_OP_READ:
@@ -338,7 +338,7 @@ static int ubiblock_init_request(struct blk_mq_tag_set *set,
struct request *req, unsigned int hctx_idx,
unsigned int numa_node)
{
- struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req);
+ struct ubiblock_pdu *pdu = blk_rq_to_pdu(req);
sg_init_table(pdu->usgl.sg, UBI_MAX_SG_COUNT);
INIT_WORK(&pdu->work, ubiblock_do_work);
@@ -1143,7 +1143,7 @@ static void __nvme_fc_final_op_cleanup(struct request *rq);
static int
nvme_fc_reinit_request(void *data, struct request *rq)
{
- struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
+ struct nvme_fc_fcp_op *op = blk_rq_to_pdu(rq);
struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
memset(cmdiu, 0, sizeof(*cmdiu));
@@ -1171,7 +1171,7 @@ static void
nvme_fc_exit_request(struct blk_mq_tag_set *set, struct request *rq,
unsigned int hctx_idx)
{
- struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
+ struct nvme_fc_fcp_op *op = blk_rq_to_pdu(rq);
return __nvme_fc_exit_request(set->driver_data, op);
}
@@ -1434,7 +1434,7 @@ nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq,
unsigned int hctx_idx, unsigned int numa_node)
{
struct nvme_fc_ctrl *ctrl = set->driver_data;
- struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
+ struct nvme_fc_fcp_op *op = blk_rq_to_pdu(rq);
struct nvme_fc_queue *queue = &ctrl->queues[hctx_idx+1];
return __nvme_fc_init_request(ctrl, queue, op, rq, queue->rqcnt++);
@@ -1445,7 +1445,7 @@ nvme_fc_init_admin_request(struct blk_mq_tag_set *set, struct request *rq,
unsigned int hctx_idx, unsigned int numa_node)
{
struct nvme_fc_ctrl *ctrl = set->driver_data;
- struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
+ struct nvme_fc_fcp_op *op = blk_rq_to_pdu(rq);
struct nvme_fc_queue *queue = &ctrl->queues[0];
return __nvme_fc_init_request(ctrl, queue, op, rq, queue->rqcnt++);
@@ -1770,7 +1770,7 @@ nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
static enum blk_eh_timer_return
nvme_fc_timeout(struct request *rq, bool reserved)
{
- struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
+ struct nvme_fc_fcp_op *op = blk_rq_to_pdu(rq);
struct nvme_fc_ctrl *ctrl = op->ctrl;
int ret;
@@ -1986,7 +1986,7 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
struct nvme_fc_queue *queue = hctx->driver_data;
struct nvme_fc_ctrl *ctrl = queue->ctrl;
struct request *rq = bd->rq;
- struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
+ struct nvme_fc_fcp_op *op = blk_rq_to_pdu(rq);
struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
struct nvme_command *sqe = &cmdiu->sqe;
enum nvmefc_fcp_datadir io_dir;
@@ -2029,7 +2029,7 @@ nvme_fc_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
if (!req)
return 0;
- op = blk_mq_rq_to_pdu(req);
+ op = blk_rq_to_pdu(req);
if ((atomic_read(&op->state) == FCPOP_STATE_ACTIVE) &&
(ctrl->lport->ops->poll_queue))
@@ -2071,7 +2071,7 @@ nvme_fc_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
static void
__nvme_fc_final_op_cleanup(struct request *rq)
{
- struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
+ struct nvme_fc_fcp_op *op = blk_rq_to_pdu(rq);
struct nvme_fc_ctrl *ctrl = op->ctrl;
atomic_set(&op->state, FCPOP_STATE_IDLE);
@@ -2088,7 +2088,7 @@ __nvme_fc_final_op_cleanup(struct request *rq)
static void
nvme_fc_complete_rq(struct request *rq)
{
- struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
+ struct nvme_fc_fcp_op *op = blk_rq_to_pdu(rq);
struct nvme_fc_ctrl *ctrl = op->ctrl;
unsigned long flags;
bool completed = false;
@@ -2130,7 +2130,7 @@ nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved)
{
struct nvme_ctrl *nctrl = data;
struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
- struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req);
+ struct nvme_fc_fcp_op *op = blk_rq_to_pdu(req);
unsigned long flags;
int status;
@@ -96,7 +96,7 @@ enum {
static inline struct nvme_request *nvme_req(struct request *req)
{
- return blk_mq_rq_to_pdu(req);
+ return blk_rq_to_pdu(req);
}
/* The below value is the specific amount of delay needed before checking
@@ -355,7 +355,7 @@ static int nvme_admin_init_request(struct blk_mq_tag_set *set,
unsigned int numa_node)
{
struct nvme_dev *dev = set->driver_data;
- struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ struct nvme_iod *iod = blk_rq_to_pdu(req);
struct nvme_queue *nvmeq = dev->queues[0];
BUG_ON(!nvmeq);
@@ -381,7 +381,7 @@ static int nvme_init_request(struct blk_mq_tag_set *set, struct request *req,
unsigned int hctx_idx, unsigned int numa_node)
{
struct nvme_dev *dev = set->driver_data;
- struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ struct nvme_iod *iod = blk_rq_to_pdu(req);
struct nvme_queue *nvmeq = dev->queues[hctx_idx + 1];
BUG_ON(!nvmeq);
@@ -423,13 +423,13 @@ static void __nvme_submit_cmd(struct nvme_queue *nvmeq,
static __le64 **iod_list(struct request *req)
{
- struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ struct nvme_iod *iod = blk_rq_to_pdu(req);
return (__le64 **)(iod->sg + blk_rq_nr_phys_segments(req));
}
static int nvme_init_iod(struct request *rq, struct nvme_dev *dev)
{
- struct nvme_iod *iod = blk_mq_rq_to_pdu(rq);
+ struct nvme_iod *iod = blk_rq_to_pdu(rq);
int nseg = blk_rq_nr_phys_segments(rq);
unsigned int size = blk_rq_payload_bytes(rq);
@@ -451,7 +451,7 @@ static int nvme_init_iod(struct request *rq, struct nvme_dev *dev)
static void nvme_free_iod(struct nvme_dev *dev, struct request *req)
{
- struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ struct nvme_iod *iod = blk_rq_to_pdu(req);
const int last_prp = dev->ctrl.page_size / 8 - 1;
int i;
__le64 **list = iod_list(req);
@@ -539,7 +539,7 @@ static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi)
static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req)
{
- struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ struct nvme_iod *iod = blk_rq_to_pdu(req);
struct dma_pool *pool;
int length = blk_rq_payload_bytes(req);
struct scatterlist *sg = iod->sg;
@@ -619,7 +619,7 @@ static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req)
static int nvme_map_data(struct nvme_dev *dev, struct request *req,
struct nvme_command *cmnd)
{
- struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ struct nvme_iod *iod = blk_rq_to_pdu(req);
struct request_queue *q = req->q;
enum dma_data_direction dma_dir = rq_data_dir(req) ?
DMA_TO_DEVICE : DMA_FROM_DEVICE;
@@ -668,7 +668,7 @@ static int nvme_map_data(struct nvme_dev *dev, struct request *req,
static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
{
- struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ struct nvme_iod *iod = blk_rq_to_pdu(req);
enum dma_data_direction dma_dir = rq_data_dir(req) ?
DMA_TO_DEVICE : DMA_FROM_DEVICE;
@@ -746,7 +746,7 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
static void nvme_pci_complete_rq(struct request *req)
{
- struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ struct nvme_iod *iod = blk_rq_to_pdu(req);
nvme_unmap_data(iod->nvmeq->dev, req);
nvme_complete_rq(req);
@@ -941,7 +941,7 @@ static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid)
static void abort_endio(struct request *req, int error)
{
- struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ struct nvme_iod *iod = blk_rq_to_pdu(req);
struct nvme_queue *nvmeq = iod->nvmeq;
dev_warn(nvmeq->dev->ctrl.device,
@@ -952,7 +952,7 @@ static void abort_endio(struct request *req, int error)
static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
{
- struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ struct nvme_iod *iod = blk_rq_to_pdu(req);
struct nvme_queue *nvmeq = iod->nvmeq;
struct nvme_dev *dev = nvmeq->dev;
struct request *abort_req;
@@ -279,7 +279,7 @@ static int nvme_rdma_reinit_request(void *data, struct request *rq)
{
struct nvme_rdma_ctrl *ctrl = data;
struct nvme_rdma_device *dev = ctrl->device;
- struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
+ struct nvme_rdma_request *req = blk_rq_to_pdu(rq);
int ret = 0;
if (!req->mr->need_inval)
@@ -304,7 +304,7 @@ static int nvme_rdma_reinit_request(void *data, struct request *rq)
static void __nvme_rdma_exit_request(struct nvme_rdma_ctrl *ctrl,
struct request *rq, unsigned int queue_idx)
{
- struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
+ struct nvme_rdma_request *req = blk_rq_to_pdu(rq);
struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx];
struct nvme_rdma_device *dev = queue->device;
@@ -330,7 +330,7 @@ static void nvme_rdma_exit_admin_request(struct blk_mq_tag_set *set,
static int __nvme_rdma_init_request(struct nvme_rdma_ctrl *ctrl,
struct request *rq, unsigned int queue_idx)
{
- struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
+ struct nvme_rdma_request *req = blk_rq_to_pdu(rq);
struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx];
struct nvme_rdma_device *dev = queue->device;
struct ib_device *ibdev = dev->dev;
@@ -881,7 +881,7 @@ static int nvme_rdma_inv_rkey(struct nvme_rdma_queue *queue,
static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue,
struct request *rq)
{
- struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
+ struct nvme_rdma_request *req = blk_rq_to_pdu(rq);
struct nvme_rdma_ctrl *ctrl = queue->ctrl;
struct nvme_rdma_device *dev = queue->device;
struct ib_device *ibdev = dev->dev;
@@ -990,7 +990,7 @@ static int nvme_rdma_map_sg_fr(struct nvme_rdma_queue *queue,
static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
struct request *rq, struct nvme_command *c)
{
- struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
+ struct nvme_rdma_request *req = blk_rq_to_pdu(rq);
struct nvme_rdma_device *dev = queue->device;
struct ib_device *ibdev = dev->dev;
int count, ret;
@@ -1179,7 +1179,7 @@ static int nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue,
nvme_rdma_error_recovery(queue->ctrl);
return ret;
}
- req = blk_mq_rq_to_pdu(rq);
+ req = blk_rq_to_pdu(rq);
if (rq->tag == tag)
ret = 1;
@@ -1419,7 +1419,7 @@ static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
static enum blk_eh_timer_return
nvme_rdma_timeout(struct request *rq, bool reserved)
{
- struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
+ struct nvme_rdma_request *req = blk_rq_to_pdu(rq);
/* queue error recovery */
nvme_rdma_error_recovery(req->queue->ctrl);
@@ -1454,7 +1454,7 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
struct nvme_ns *ns = hctx->queue->queuedata;
struct nvme_rdma_queue *queue = hctx->driver_data;
struct request *rq = bd->rq;
- struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
+ struct nvme_rdma_request *req = blk_rq_to_pdu(rq);
struct nvme_rdma_qe *sqe = &req->sqe;
struct nvme_command *c = sqe->data;
bool flush = false;
@@ -1526,7 +1526,7 @@ static int nvme_rdma_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
static void nvme_rdma_complete_rq(struct request *rq)
{
- struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
+ struct nvme_rdma_request *req = blk_rq_to_pdu(rq);
nvme_rdma_unmap_data(req->queue, rq);
nvme_complete_rq(rq);
@@ -90,7 +90,7 @@ static inline int nvme_loop_queue_idx(struct nvme_loop_queue *queue)
static void nvme_loop_complete_rq(struct request *req)
{
- struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
+ struct nvme_loop_iod *iod = blk_rq_to_pdu(req);
nvme_cleanup_cmd(req);
sg_free_table_chained(&iod->sg_table, true);
@@ -148,7 +148,7 @@ static void nvme_loop_execute_work(struct work_struct *work)
static enum blk_eh_timer_return
nvme_loop_timeout(struct request *rq, bool reserved)
{
- struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(rq);
+ struct nvme_loop_iod *iod = blk_rq_to_pdu(rq);
/* queue error recovery */
schedule_work(&iod->queue->ctrl->reset_work);
@@ -165,7 +165,7 @@ static int nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
struct nvme_ns *ns = hctx->queue->queuedata;
struct nvme_loop_queue *queue = hctx->driver_data;
struct request *req = bd->rq;
- struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
+ struct nvme_loop_iod *iod = blk_rq_to_pdu(req);
int ret;
ret = nvme_setup_cmd(ns, req, &iod->cmd);
@@ -234,7 +234,7 @@ static int nvme_loop_init_request(struct blk_mq_tag_set *set,
struct request *req, unsigned int hctx_idx,
unsigned int numa_node)
{
- return nvme_loop_init_iod(set->driver_data, blk_mq_rq_to_pdu(req),
+ return nvme_loop_init_iod(set->driver_data, blk_rq_to_pdu(req),
hctx_idx + 1);
}
@@ -242,7 +242,7 @@ static int nvme_loop_init_admin_request(struct blk_mq_tag_set *set,
struct request *req, unsigned int hctx_idx,
unsigned int numa_node)
{
- return nvme_loop_init_iod(set->driver_data, blk_mq_rq_to_pdu(req), 0);
+ return nvme_loop_init_iod(set->driver_data, blk_rq_to_pdu(req), 0);
}
static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
@@ -1136,7 +1136,7 @@ EXPORT_SYMBOL(scsi_init_io);
/* Called from inside blk_get_request() */
static void scsi_initialize_rq(struct request *rq)
{
- struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
+ struct scsi_cmnd *cmd = blk_rq_to_pdu(rq);
scsi_req_init(&cmd->req);
}
@@ -1319,7 +1319,7 @@ scsi_prep_return(struct request_queue *q, struct request *req, int ret)
static int scsi_prep_fn(struct request_queue *q, struct request *req)
{
struct scsi_device *sdev = q->queuedata;
- struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
+ struct scsi_cmnd *cmd = blk_rq_to_pdu(req);
int ret;
ret = scsi_prep_state_check(sdev, req);
@@ -1851,7 +1851,7 @@ static inline int prep_to_mq(int ret)
static int scsi_mq_prep_fn(struct request *req)
{
- struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
+ struct scsi_cmnd *cmd = blk_rq_to_pdu(req);
struct scsi_device *sdev = req->q->queuedata;
struct Scsi_Host *shost = sdev->host;
unsigned char *sense_buf = cmd->sense_buffer;
@@ -1897,7 +1897,7 @@ static int scsi_mq_prep_fn(struct request *req)
if (blk_bidi_rq(req)) {
struct request *next_rq = req->next_rq;
- struct scsi_data_buffer *bidi_sdb = blk_mq_rq_to_pdu(next_rq);
+ struct scsi_data_buffer *bidi_sdb = blk_rq_to_pdu(next_rq);
memset(bidi_sdb, 0, sizeof(struct scsi_data_buffer));
bidi_sdb->table.sgl =
@@ -1924,7 +1924,7 @@ static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
struct request_queue *q = req->q;
struct scsi_device *sdev = q->queuedata;
struct Scsi_Host *shost = sdev->host;
- struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
+ struct scsi_cmnd *cmd = blk_rq_to_pdu(req);
int ret;
int reason;
@@ -2012,7 +2012,7 @@ static int scsi_init_request(struct blk_mq_tag_set *set, struct request *rq,
unsigned int hctx_idx, unsigned int numa_node)
{
struct Scsi_Host *shost = set->driver_data;
- struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
+ struct scsi_cmnd *cmd = blk_rq_to_pdu(rq);
cmd->sense_buffer =
scsi_alloc_sense_buffer(shost, GFP_KERNEL, numa_node);
@@ -2026,7 +2026,7 @@ static void scsi_exit_request(struct blk_mq_tag_set *set, struct request *rq,
unsigned int hctx_idx)
{
struct Scsi_Host *shost = set->driver_data;
- struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
+ struct scsi_cmnd *cmd = blk_rq_to_pdu(rq);
scsi_free_sense_buffer(shost, cmd->sense_buffer);
}
@@ -2105,7 +2105,7 @@ EXPORT_SYMBOL_GPL(__scsi_init_queue);
static int scsi_init_rq(struct request_queue *q, struct request *rq, gfp_t gfp)
{
struct Scsi_Host *shost = q->rq_alloc_data;
- struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
+ struct scsi_cmnd *cmd = blk_rq_to_pdu(rq);
memset(cmd, 0, sizeof(*cmd));
@@ -2131,7 +2131,7 @@ static int scsi_init_rq(struct request_queue *q, struct request *rq, gfp_t gfp)
static void scsi_exit_rq(struct request_queue *q, struct request *rq)
{
struct Scsi_Host *shost = q->rq_alloc_data;
- struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
+ struct scsi_cmnd *cmd = blk_rq_to_pdu(rq);
if (cmd->prot_sdb)
kmem_cache_free(scsi_sdb_cache, cmd->prot_sdb);
@@ -265,19 +265,6 @@ int blk_mq_reinit_tagset(struct blk_mq_tag_set *set);
int blk_mq_map_queues(struct blk_mq_tag_set *set);
void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
-/*
- * Driver command data is immediately after the request. So subtract request
- * size to get back to the original request, add request size to get the PDU.
- */
-static inline struct request *blk_mq_rq_from_pdu(void *pdu)
-{
- return pdu - sizeof(struct request);
-}
-static inline void *blk_mq_rq_to_pdu(struct request *rq)
-{
- return rq + 1;
-}
-
#define queue_for_each_hw_ctx(q, hctx, i) \
for ((i) = 0; (i) < (q)->nr_hw_queues && \
({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++)
@@ -258,6 +258,19 @@ static inline unsigned short req_get_ioprio(struct request *req)
return req->ioprio;
}
+/*
+ * Driver command data is immediately after the request. So subtract request
+ * size to get back to the original request, add request size to get the PDU.
+ */
+static inline struct request *blk_rq_from_pdu(void *pdu)
+{
+ return pdu - sizeof(struct request);
+}
+static inline void *blk_rq_to_pdu(struct request *rq)
+{
+ return rq + 1;
+}
+
#include <linux/elevator.h>
struct blk_queue_ctx;
@@ -58,7 +58,7 @@ struct ide_request {
static inline struct ide_request *ide_req(struct request *rq)
{
- return blk_mq_rq_to_pdu(rq);
+ return blk_rq_to_pdu(rq);
}
static inline bool ata_misc_request(struct request *rq)
@@ -18,7 +18,7 @@ struct scsi_request {
static inline struct scsi_request *scsi_req(struct request *rq)
{
- return blk_mq_rq_to_pdu(rq);
+ return blk_rq_to_pdu(rq);
}
static inline void scsi_req_free_cmd(struct scsi_request *req)
Commit 6d247d7f71d1 ("block: allow specifying size for extra command data") added support for .cmd_size to blk-sq. Due to that patch the blk_mq_rq_{to,from}_pdu() functions are also useful for single-queue block drivers. Hence remove "_mq" from the name of these functions. This patch does not change any functionality. Most of this patch has been generated by running the following shell command: sed -i 's/blk_mq_rq_to_pdu/blk_rq_to_pdu/g; s/blk_mq_rq_from_pdu/blk_rq_from_pdu/g' \ $(git grep -lE 'blk_mq_rq_(to|from)_pdu') Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Hannes Reinecke <hare@suse.com> Cc: Omar Sandoval <osandov@fb.com> --- drivers/block/loop.c | 8 ++++---- drivers/block/mtip32xx/mtip32xx.c | 28 ++++++++++++++-------------- drivers/block/nbd.c | 18 +++++++++--------- drivers/block/null_blk.c | 4 ++-- drivers/block/rbd.c | 6 +++--- drivers/block/virtio_blk.c | 12 ++++++------ drivers/block/xen-blkfront.c | 2 +- drivers/ide/ide-probe.c | 2 +- drivers/md/dm-rq.c | 6 +++--- drivers/mtd/ubi/block.c | 8 ++++---- drivers/nvme/host/fc.c | 20 ++++++++++---------- drivers/nvme/host/nvme.h | 2 +- drivers/nvme/host/pci.c | 22 +++++++++++----------- drivers/nvme/host/rdma.c | 18 +++++++++--------- drivers/nvme/target/loop.c | 10 +++++----- drivers/scsi/scsi_lib.c | 18 +++++++++--------- include/linux/blk-mq.h | 13 ------------- include/linux/blkdev.h | 13 +++++++++++++ include/linux/ide.h | 2 +- include/scsi/scsi_request.h | 2 +- 20 files changed, 107 insertions(+), 107 deletions(-)