diff mbox series

[1/2] block: add request polling helper

Message ID 20230530172343.3250958-1-kbusch@meta.com (mailing list archive)
State New
Headers show
Series [1/2] block: add request polling helper | expand

Commit Message

Keith Busch May 30, 2023, 5:23 p.m. UTC
From: Keith Busch <kbusch@kernel.org>

This will be used by drivers that allocate polling requests. It
interface does not require a bio, and can skip the overhead associated
with polling those.

Signed-off-by: Keith Busch <kbusch@kernel.org>
---
 block/blk-mq.c         | 29 ++++++++++++++++++++++++++---
 include/linux/blk-mq.h |  2 ++
 2 files changed, 28 insertions(+), 3 deletions(-)

Comments

Kanchan Joshi May 31, 2023, 9:45 a.m. UTC | #1
On Tue, May 30, 2023 at 10:23:42AM -0700, Keith Busch wrote:
>From: Keith Busch <kbusch@kernel.org>
>
>This will be used by drivers that allocate polling requests. It
>interface does not require a bio, and can skip the overhead associated
>with polling those.
>
>Signed-off-by: Keith Busch <kbusch@kernel.org>

Looks good.
Reviewed-by: Kanchan Joshi <joshi.k@samsung.com>
Christoph Hellwig May 31, 2023, 1:01 p.m. UTC | #2
> +int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, struct io_comp_batch *iob,

It would be nice to fix the overly long line while you're at it.

> +		unsigned int flags)
> +{
> +	return blk_hctx_poll(q, blk_qc_to_hctx(q, cookie), iob, flags);
> +}

But looking at the two callers of blk_mq_poll, shouldn't one use
rq->mq_hctx to get the hctx anyway instead of doing repeated
blk_qc_to_hctx in the polling loop?  We could then just open code
blk_qc_to_hctx in the remaining one.

The rest looks good to me.
Sagi Grimberg June 5, 2023, 11:03 p.m. UTC | #3
Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
diff mbox series

Patch

diff --git a/block/blk-mq.c b/block/blk-mq.c
index f6dad0886a2fa..3c12c476e3a5c 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -4740,10 +4740,9 @@  void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
 }
 EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
 
-int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, struct io_comp_batch *iob,
-		unsigned int flags)
+static int blk_hctx_poll(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
+			 struct io_comp_batch *iob, unsigned int flags)
 {
-	struct blk_mq_hw_ctx *hctx = blk_qc_to_hctx(q, cookie);
 	long state = get_current_state();
 	int ret;
 
@@ -4768,6 +4767,30 @@  int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, struct io_comp_batch *
 	return 0;
 }
 
+int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, struct io_comp_batch *iob,
+		unsigned int flags)
+{
+	return blk_hctx_poll(q, blk_qc_to_hctx(q, cookie), iob, flags);
+}
+
+int blk_rq_poll(struct request *rq, struct io_comp_batch *iob,
+		unsigned int poll_flags)
+{
+	struct request_queue *q = rq->q;
+	int ret;
+
+	if (!blk_rq_is_poll(rq))
+		return 0;
+	if (!percpu_ref_tryget(&q->q_usage_counter))
+		return 0;
+
+	ret = blk_hctx_poll(q, rq->mq_hctx, iob, poll_flags);
+	blk_queue_exit(q);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(blk_rq_poll);
+
 unsigned int blk_mq_rq_cpu(struct request *rq)
 {
 	return rq->mq_ctx->cpu;
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 06caacd77ed66..579818fa1f91d 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -722,6 +722,8 @@  int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set,
 void blk_mq_free_tag_set(struct blk_mq_tag_set *set);
 
 void blk_mq_free_request(struct request *rq);
+int blk_rq_poll(struct request *rq, struct io_comp_batch *iob,
+		unsigned int poll_flags);
 
 bool blk_mq_queue_inflight(struct request_queue *q);