diff mbox series

[v3,2/2] blk-mq: use BLK_MQ_GFP_FLAGS and memalloc_noio_save/restore instead

Message ID 20190917120910.24842-3-xiubli@redhat.com (mailing list archive)
State New, archived
Headers show
Series blk-mq: Avoid memory reclaim when allocating | expand

Commit Message

Xiubo Li Sept. 17, 2019, 12:09 p.m. UTC
From: Xiubo Li <xiubli@redhat.com>

There are at least 6 places are using the same combined GFP flags,
switch them to one macro instead to make the code get cleaner.

Signed-off-by: Xiubo Li <xiubli@redhat.com>
Reviewed-by: Ming Lei <ming.lei@redhat.com>
---
 block/blk-mq.c | 44 ++++++++++++++++++++++++++++++++------------
 1 file changed, 32 insertions(+), 12 deletions(-)
diff mbox series

Patch

diff --git a/block/blk-mq.c b/block/blk-mq.c
index 9c52e4dfe132..8cdc747d5c4d 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -20,6 +20,7 @@ 
 #include <linux/list_sort.h>
 #include <linux/cpu.h>
 #include <linux/cache.h>
+#include <linux/sched/mm.h>
 #include <linux/sched/sysctl.h>
 #include <linux/sched/topology.h>
 #include <linux/sched/signal.h>
@@ -39,6 +40,8 @@ 
 #include "blk-mq-sched.h"
 #include "blk-rq-qos.h"
 
+#define BLK_MQ_GFP_FLAGS (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
+
 static void blk_mq_poll_stats_start(struct request_queue *q);
 static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);
 
@@ -2083,35 +2086,38 @@  struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
 					unsigned int reserved_tags)
 {
 	struct blk_mq_tags *tags;
+	unsigned int noio_flag;
 	int node;
 
 	node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], hctx_idx);
 	if (node == NUMA_NO_NODE)
 		node = set->numa_node;
 
+	noio_flag = memalloc_noio_save();
 	tags = blk_mq_init_tags(nr_tags, reserved_tags, node,
 				BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags),
-				GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY);
+				BLK_MQ_GFP_FLAGS);
 	if (!tags)
-		return NULL;
+		goto out;
 
 	tags->rqs = kcalloc_node(nr_tags, sizeof(struct request *),
-				 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
-				 node);
+				 BLK_MQ_GFP_FLAGS, node);
 	if (!tags->rqs) {
 		blk_mq_free_tags(tags);
-		return NULL;
+		tags = NULL;
+		goto out;
 	}
 
 	tags->static_rqs = kcalloc_node(nr_tags, sizeof(struct request *),
-					GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
-					node);
+					BLK_MQ_GFP_FLAGS, node);
 	if (!tags->static_rqs) {
 		kfree(tags->rqs);
 		blk_mq_free_tags(tags);
-		return NULL;
+		tags = NULL;
 	}
 
+out:
+	memalloc_noio_restore(noio_flag);
 	return tags;
 }
 
@@ -2158,6 +2164,7 @@  int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
 
 	for (i = 0; i < depth; ) {
 		int this_order = max_order;
+		unsigned int noio_flag;
 		struct page *page;
 		int to_do;
 		void *p;
@@ -2165,9 +2172,10 @@  int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
 		while (this_order && left < order_to_size(this_order - 1))
 			this_order--;
 
+		noio_flag = memalloc_noio_save();
 		do {
 			page = alloc_pages_node(node,
-				GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
+				BLK_MQ_GFP_FLAGS | __GFP_ZERO,
 				this_order);
 			if (page)
 				break;
@@ -2176,6 +2184,7 @@  int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
 			if (order_to_size(this_order) < rq_size)
 				break;
 		} while (1);
+		memalloc_noio_restore(noio_flag);
 
 		if (!page)
 			goto fail;
@@ -2188,7 +2197,10 @@  int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
 		 * Allow kmemleak to scan these pages as they contain pointers
 		 * to additional allocations like via ops->init_request().
 		 */
-		kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO);
+		noio_flag = memalloc_noio_save();
+		kmemleak_alloc(p, order_to_size(this_order), 1,
+			       BLK_MQ_GFP_FLAGS);
+		memalloc_noio_restore(noio_flag);
 		entries_per_page = order_to_size(this_order) / rq_size;
 		to_do = min(entries_per_page, depth - i);
 		left -= to_do * rq_size;
@@ -2333,8 +2345,10 @@  blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set,
 		int node)
 {
 	struct blk_mq_hw_ctx *hctx;
-	gfp_t gfp = GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY;
+	gfp_t gfp = BLK_MQ_GFP_FLAGS;
+	unsigned int noio_flag;
 
+	noio_flag = memalloc_noio_save();
 	hctx = kzalloc_node(blk_mq_hw_ctx_size(set), gfp, node);
 	if (!hctx)
 		goto fail_alloc_hctx;
@@ -2378,6 +2392,8 @@  blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set,
 	if (!hctx->fq)
 		goto free_bitmap;
 
+	memalloc_noio_restore(noio_flag);
+
 	if (hctx->flags & BLK_MQ_F_BLOCKING)
 		init_srcu_struct(hctx->srcu);
 	blk_mq_hctx_kobj_init(hctx);
@@ -2393,6 +2409,7 @@  blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set,
  free_hctx:
 	kfree(hctx);
  fail_alloc_hctx:
+	memalloc_noio_restore(noio_flag);
 	return NULL;
 }
 
@@ -3190,11 +3207,14 @@  static bool blk_mq_elv_switch_none(struct list_head *head,
 		struct request_queue *q)
 {
 	struct blk_mq_qe_pair *qe;
+	unsigned int noio_flag;
 
 	if (!q->elevator)
 		return true;
 
-	qe = kmalloc(sizeof(*qe), GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY);
+	noio_flag = memalloc_noio_save();
+	qe = kmalloc(sizeof(*qe), BLK_MQ_GFP_FLAGS);
+	memalloc_noio_restore(noio_flag);
 	if (!qe)
 		return false;