diff mbox series

[1/3] block: move io_context creation into where it's needed

Message ID 20211123171058.346084-2-axboe@kernel.dk (mailing list archive)
State New, archived
Headers show
Series Misc block cleanups | expand

Commit Message

Jens Axboe Nov. 23, 2021, 5:10 p.m. UTC
The only user of the io_context for IO is BFQ, yet we put the checking
and logic of it into the normal IO path.

Put the creation into blk_mq_sched_assign_ioc(), and have BFQ use that
helper.

Signed-off-by: Jens Axboe <axboe@kernel.dk>
---
 block/bfq-iosched.c  | 2 ++
 block/blk-core.c     | 9 ---------
 block/blk-ioc.c      | 1 +
 block/blk-mq-sched.c | 5 +++++
 block/blk-mq.c       | 3 ---
 5 files changed, 8 insertions(+), 12 deletions(-)

Comments

Christoph Hellwig Nov. 23, 2021, 6:46 p.m. UTC | #1
On Tue, Nov 23, 2021 at 10:10:56AM -0700, Jens Axboe wrote:
> --- a/block/blk-ioc.c
> +++ b/block/blk-ioc.c
> @@ -286,6 +286,7 @@ int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node)
>  
>  	return ret;
>  }
> +EXPORT_SYMBOL_GPL(create_task_io_context);

No need to export this now.
Jens Axboe Nov. 23, 2021, 6:58 p.m. UTC | #2
On 11/23/21 11:46 AM, Christoph Hellwig wrote:
> On Tue, Nov 23, 2021 at 10:10:56AM -0700, Jens Axboe wrote:
>> --- a/block/blk-ioc.c
>> +++ b/block/blk-ioc.c
>> @@ -286,6 +286,7 @@ int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node)
>>  
>>  	return ret;
>>  }
>> +EXPORT_SYMBOL_GPL(create_task_io_context);
> 
> No need to export this now.

Indeed, killed.
diff mbox series

Patch

diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index fec18118dc30..1ce1a99a7160 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -6573,6 +6573,8 @@  static struct bfq_queue *bfq_get_bfqq_handle_split(struct bfq_data *bfqd,
  */
 static void bfq_prepare_request(struct request *rq)
 {
+	blk_mq_sched_assign_ioc(rq);
+
 	/*
 	 * Regardless of whether we have an icq attached, we have to
 	 * clear the scheduler pointers, as they might point to
diff --git a/block/blk-core.c b/block/blk-core.c
index 6443f2dfe43e..6ae8297b033f 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -750,15 +750,6 @@  noinline_for_stack bool submit_bio_checks(struct bio *bio)
 		break;
 	}
 
-	/*
-	 * Various block parts want %current->io_context, so allocate it up
-	 * front rather than dealing with lots of pain to allocate it only
-	 * where needed. This may fail and the block layer knows how to live
-	 * with it.
-	 */
-	if (unlikely(!current->io_context))
-		create_task_io_context(current, GFP_ATOMIC, q->node);
-
 	if (blk_throtl_bio(bio))
 		return false;
 
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index 57299f860d41..736e0280d76f 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -286,6 +286,7 @@  int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node)
 
 	return ret;
 }
+EXPORT_SYMBOL_GPL(create_task_io_context);
 
 /**
  * get_task_io_context - get io_context of a task
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index ba21449439cc..b942b38000e5 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -24,6 +24,10 @@  void blk_mq_sched_assign_ioc(struct request *rq)
 	struct io_context *ioc;
 	struct io_cq *icq;
 
+	/* create task io_context, if we don't have one already */
+	if (unlikely(!current->io_context))
+		create_task_io_context(current, GFP_ATOMIC, q->node);
+
 	/*
 	 * May not have an IO context if it's a passthrough request
 	 */
@@ -43,6 +47,7 @@  void blk_mq_sched_assign_ioc(struct request *rq)
 	get_io_context(icq->ioc);
 	rq->elv.icq = icq;
 }
+EXPORT_SYMBOL_GPL(blk_mq_sched_assign_ioc);
 
 /*
  * Mark a hardware queue as needing a restart. For shared queues, maintain
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 4c00b22590cc..20a6445f6a01 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -406,9 +406,6 @@  static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
 
 		if (!op_is_flush(data->cmd_flags) &&
 		    e->type->ops.prepare_request) {
-			if (e->type->icq_cache)
-				blk_mq_sched_assign_ioc(rq);
-
 			e->type->ops.prepare_request(rq);
 			rq->rq_flags |= RQF_ELVPRIV;
 		}