@@ -868,8 +868,19 @@ static noinline_for_stack bool submit_bio_checks(struct bio *bio)
}
}
- if (!blk_queue_poll(q))
- bio->bi_opf &= ~REQ_HIPRI;
+ /*
+ * Various block parts want %current->io_context, so allocate it up
+ * front rather than dealing with lots of pain to allocate it only
+ * where needed. This may fail and the block layer knows how to live
+ * with it.
+ */
+ if (unlikely(!current->io_context))
+ create_task_io_context(current, GFP_ATOMIC, q->node);
+
+ if ((bio->bi_opf & REQ_HIPRI) && blk_queue_support_bio_poll(q))
+ blk_create_io_poll_context(q);
+
+ blk_poll_prepare(q, bio);
switch (bio_op(bio)) {
case REQ_OP_DISCARD:
@@ -908,15 +919,6 @@ static noinline_for_stack bool submit_bio_checks(struct bio *bio)
break;
}
- /*
- * Various block parts want %current->io_context, so allocate it up
- * front rather than dealing with lots of pain to allocate it only
- * where needed. This may fail and the block layer knows how to live
- * with it.
- */
- if (unlikely(!current->io_context))
- create_task_io_context(current, GFP_ATOMIC, q->node);
-
if (blk_throtl_bio(bio)) {
blkcg_bio_issue_init(bio);
return false;
@@ -19,6 +19,7 @@ static struct kmem_cache *iocontext_cachep;
static inline void free_io_context(struct io_context *ioc)
{
+ kfree(ioc->data);
kmem_cache_free(iocontext_cachep, ioc);
}
@@ -4,11 +4,14 @@
#include <linux/blkdev.h>
#include <linux/sched.h>
#include <linux/hrtimer.h>
+#include <linux/bio.h>
#include <linux/blk-mq.h>
#include "blk.h"
#include "blk-mq.h"
+static int blk_bio_poll(struct request_queue *q, blk_qc_t cookie, bool spin);
+
/* Enable polling stats and return whether they were already enabled. */
static bool blk_poll_stats_enable(struct request_queue *q)
{
@@ -165,6 +168,9 @@ int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin)
if (current->plug)
blk_flush_plug_list(current->plug, false);
+ if (!queue_is_mq(q))
+ return blk_bio_poll(q, cookie, spin);
+
hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)];
/*
@@ -204,3 +210,48 @@ int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin)
return 0;
}
EXPORT_SYMBOL_GPL(blk_poll);
+
+/* bio base io polling */
+static int blk_bio_poll(struct request_queue *q, blk_qc_t cookie, bool spin)
+{
+ /*
+ * Create poll queue for storing poll bio and its cookie from
+ * submission queue
+ */
+ blk_create_io_poll_context(q);
+
+ return 0;
+}
+
+static inline unsigned int bio_grp_list_size(unsigned int nr_grps)
+{
+ return sizeof(struct bio_grp_list) + nr_grps *
+ sizeof(struct bio_grp_list_data);
+}
+
+static void bio_poll_ctx_init(struct blk_bio_poll_ctx *pc)
+{
+ pc->sq = (void *)pc + sizeof(*pc);
+ pc->sq->max_nr_grps = BLK_BIO_POLL_SQ_SZ;
+
+ pc->pq = (void *)pc->sq + bio_grp_list_size(BLK_BIO_POLL_SQ_SZ);
+ pc->pq->max_nr_grps = BLK_BIO_POLL_PQ_SZ;
+
+ spin_lock_init(&pc->sq_lock);
+ spin_lock_init(&pc->pq_lock);
+}
+
+void bio_poll_ctx_alloc(struct io_context *ioc)
+{
+ struct blk_bio_poll_ctx *pc;
+ unsigned int size = sizeof(*pc) +
+ bio_grp_list_size(BLK_BIO_POLL_SQ_SZ) +
+ bio_grp_list_size(BLK_BIO_POLL_PQ_SZ);
+
+ pc = kzalloc(GFP_ATOMIC, size);
+ if (pc) {
+ bio_poll_ctx_init(pc);
+ if (cmpxchg(&ioc->data, NULL, (void *)pc))
+ kfree(pc);
+ }
+}
@@ -352,4 +352,64 @@ int bio_add_hw_page(struct request_queue *q, struct bio *bio,
struct page *page, unsigned int len, unsigned int offset,
unsigned int max_sectors, bool *same_page);
+/* Grouping bios that share same data into one list */
+struct bio_grp_list_data {
+ void *grp_data;
+
+ /* all bios in this list share same 'grp_data' */
+ struct bio_list list;
+};
+
+struct bio_grp_list {
+ unsigned int max_nr_grps, nr_grps;
+ struct bio_grp_list_data head[0];
+};
+
+struct blk_bio_poll_ctx {
+ spinlock_t sq_lock;
+ struct bio_grp_list *sq;
+
+ spinlock_t pq_lock;
+ struct bio_grp_list *pq;
+};
+
+#define BLK_BIO_POLL_SQ_SZ 16U
+#define BLK_BIO_POLL_PQ_SZ (BLK_BIO_POLL_SQ_SZ * 2)
+
+void bio_poll_ctx_alloc(struct io_context *ioc);
+
+static inline bool blk_queue_support_bio_poll(struct request_queue *q)
+{
+ return !queue_is_mq(q) && blk_queue_poll(q);
+}
+
+static inline struct blk_bio_poll_ctx *blk_get_bio_poll_ctx(void)
+{
+ struct io_context *ioc = current->io_context;
+
+ return ioc ? ioc->data : NULL;
+}
+
+static inline void blk_poll_prepare(struct request_queue *q,
+ struct bio *bio)
+{
+ if (!(bio->bi_opf & REQ_HIPRI))
+ return;
+
+ if (!blk_queue_poll(q) || (!queue_is_mq(q) && !blk_get_bio_poll_ctx()))
+ bio->bi_opf &= ~REQ_HIPRI;
+}
+
+static inline void blk_create_io_poll_context(struct request_queue *q)
+{
+ struct io_context *ioc;
+
+ if (unlikely(!current->io_context))
+ create_task_io_context(current, GFP_ATOMIC, q->node);
+
+ ioc = current->io_context;
+ if (unlikely(ioc && !ioc->data))
+ bio_poll_ctx_alloc(ioc);
+}
+
#endif /* BLK_INTERNAL_H */
@@ -110,6 +110,8 @@ struct io_context {
struct io_cq __rcu *icq_hint;
struct hlist_head icq_list;
+ void *data;
+
struct work_struct release_work;
};