@@ -43,6 +43,7 @@
static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
+static int blk_mq_poll(struct request_queue *q, blk_qc_t cookie);
static void blk_mq_poll_stats_start(struct request_queue *q);
static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);
@@ -3212,6 +3213,9 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
q->tag_set = set;
+ if (q->mq_ops->poll)
+ q->poll_fn = blk_mq_poll;
+
q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
if (set->nr_maps > HCTX_TYPE_POLL &&
set->map[HCTX_TYPE_POLL].nr_queues)
@@ -3856,7 +3860,8 @@ int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin)
if (current->plug)
blk_flush_plug_list(current->plug, false);
- hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)];
+ hctx = queue_is_mq(q) ?
+ q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)] : NULL;
/*
* If we sleep, have the caller restart the poll loop to reset
@@ -3864,21 +3869,26 @@ int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin)
* caller is responsible for checking if the IO completed. If
* the IO isn't complete, we'll get called again and will go
* straight to the busy poll loop.
+ *
+ * Currently dm doesn't support hybrid polling.
*/
- if (blk_mq_poll_hybrid(q, hctx, cookie))
+ if (hctx && blk_mq_poll_hybrid(q, hctx, cookie))
return 1;
- hctx->poll_considered++;
+ if (hctx)
+ hctx->poll_considered++;
state = current->state;
do {
int ret;
- hctx->poll_invoked++;
+ if (hctx)
+ hctx->poll_invoked++;
- ret = q->mq_ops->poll(hctx);
+ ret = q->poll_fn(q, cookie);
if (ret > 0) {
- hctx->poll_success++;
+ if (hctx)
+ hctx->poll_success++;
__set_current_state(TASK_RUNNING);
return ret;
}
@@ -3898,6 +3908,14 @@ int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin)
}
EXPORT_SYMBOL_GPL(blk_poll);
+static int blk_mq_poll(struct request_queue *q, blk_qc_t cookie)
+{
+ struct blk_mq_hw_ctx *hctx;
+
+ hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)];
+ return q->mq_ops->poll(hctx);
+}
+
unsigned int blk_mq_rq_cpu(struct request *rq)
{
return rq->mq_ctx->cpu;
@@ -288,6 +288,8 @@ static inline unsigned short req_get_ioprio(struct request *req)
struct blk_queue_ctx;
+typedef int (poll_q_fn) (struct request_queue *q, blk_qc_t);
+
struct bio_vec;
enum blk_eh_timer_return {
@@ -486,6 +488,7 @@ struct request_queue {
struct blk_stat_callback *poll_cb;
struct blk_rq_stat poll_stat[BLK_MQ_POLL_STATS_BKTS];
+ poll_q_fn *poll_fn;
struct timer_list timeout;
struct work_struct timeout_work;
This is a prep for adding support of IO polling for dm device. ->poll_fn is introduced in commit ea435e1b9392 ("block: add a poll_fn callback to struct request_queue") for supporting non-mq queues such as nvme multipath, but removed in commit 529262d56dbe ("block: remove ->poll_fn"). To add support of IO polling for dm device, support for non-mq device should be added and thus we need ->poll_fn back. commit c62b37d96b6e ("block: move ->make_request_fn to struct block_device_operations") moved all callbacks into struct block_device_operations in gendisk. But ->poll_fn can't be moved there since there's no way to fetch the corresponding gendisk from request_queue. Signed-off-by: Jeffle Xu <jefflexu@linux.alibaba.com> --- block/blk-mq.c | 30 ++++++++++++++++++++++++------ include/linux/blkdev.h | 3 +++ 2 files changed, 27 insertions(+), 6 deletions(-)