diff mbox series

[RFC,v2,01/11] fs/buffer: move async batch read code into a helper

Message ID 20241214031050.1337920-2-mcgrof@kernel.org (mailing list archive)
State New
Headers show
Series enable bs > ps for block devices | expand

Commit Message

Luis Chamberlain Dec. 14, 2024, 3:10 a.m. UTC
Move the code from block_read_full_folio() which does a batch of async
reads into a helper.

No functional changes.

Signed-off-by: Luis Chamberlain <mcgrof@kernel.org>
---
 fs/buffer.c | 73 +++++++++++++++++++++++++++++++----------------------
 1 file changed, 43 insertions(+), 30 deletions(-)

Comments

Hannes Reinecke Dec. 17, 2024, 9:56 a.m. UTC | #1
On 12/14/24 04:10, Luis Chamberlain wrote:
> Move the code from block_read_full_folio() which does a batch of async
> reads into a helper.
> 
> No functional changes.
> 
> Signed-off-by: Luis Chamberlain <mcgrof@kernel.org>
> ---
>   fs/buffer.c | 73 +++++++++++++++++++++++++++++++----------------------
>   1 file changed, 43 insertions(+), 30 deletions(-)
> 
> diff --git a/fs/buffer.c b/fs/buffer.c
> index cc8452f60251..580451337efa 100644
> --- a/fs/buffer.c
> +++ b/fs/buffer.c
> @@ -2350,6 +2350,48 @@ bool block_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
>   }
>   EXPORT_SYMBOL(block_is_partially_uptodate);
>   
> +static void bh_read_batch_async(struct folio *folio,
> +				int nr, struct buffer_head *arr[],
> +				bool fully_mapped, bool no_reads,
> +				bool any_get_block_error)
> +{
> +	int i;
> +	struct buffer_head *bh;
> +
> +	if (fully_mapped)
> +		folio_set_mappedtodisk(folio);
> +
> +	if (no_reads) {
> +		/*
> +		 * All buffers are uptodate or get_block() returned an
> +		 * error when trying to map them *all* buffers we can
> +		 * finish the read.
> +		 */
> +		folio_end_read(folio, !any_get_block_error);
> +		return;
> +	}
> +
> +	/* Stage one: lock the buffers */

Now you messed up documentation:
Originally this was 'stage two', so now we have two 'stage one'
comments.
Please use the original documentation convention and add a note
to the helper that it's contingent on the 'stage 1' in the
calling function.

> +	for (i = 0; i < nr; i++) {
> +		bh = arr[i];
> +		lock_buffer(bh);
> +		mark_buffer_async_read(bh);
> +	}
> +
> +	/*
> +	 * Stage 2: start the IO.  Check for uptodateness
> +	 * inside the buffer lock in case another process reading
> +	 * the underlying blockdev brought it uptodate (the sct fix).
> +	 */
Same here; should be 'stage 3' to be consistent.

> +	for (i = 0; i < nr; i++) {
> +		bh = arr[i];
> +		if (buffer_uptodate(bh))
> +			end_buffer_async_read(bh, 1);
> +		else
> +			submit_bh(REQ_OP_READ, bh);
> +	}
> +}
> +
>   /*
>    * Generic "read_folio" function for block devices that have the normal
>    * get_block functionality. This is most of the block device filesystems.
> @@ -2414,37 +2456,8 @@ int block_read_full_folio(struct folio *folio, get_block_t *get_block)
>   		arr[nr++] = bh;
>   	} while (i++, iblock++, (bh = bh->b_this_page) != head);
>   
> -	if (fully_mapped)
> -		folio_set_mappedtodisk(folio);
> -
> -	if (!nr) {
> -		/*
> -		 * All buffers are uptodate or get_block() returned an
> -		 * error when trying to map them - we can finish the read.
> -		 */
> -		folio_end_read(folio, !page_error);
> -		return 0;
> -	}
> -
> -	/* Stage two: lock the buffers */
> -	for (i = 0; i < nr; i++) {
> -		bh = arr[i];
> -		lock_buffer(bh);
> -		mark_buffer_async_read(bh);
> -	}
> +	bh_read_batch_async(folio, nr, arr, fully_mapped, nr == 0, page_error);
>   
> -	/*
> -	 * Stage 3: start the IO.  Check for uptodateness
> -	 * inside the buffer lock in case another process reading
> -	 * the underlying blockdev brought it uptodate (the sct fix).
> -	 */
> -	for (i = 0; i < nr; i++) {
> -		bh = arr[i];
> -		if (buffer_uptodate(bh))
> -			end_buffer_async_read(bh, 1);
> -		else
> -			submit_bh(REQ_OP_READ, bh);
> -	}
>   	return 0;
>   }
>   EXPORT_SYMBOL(block_read_full_folio);

Otherwise looks good.

Cheers,

Hannes
diff mbox series

Patch

diff --git a/fs/buffer.c b/fs/buffer.c
index cc8452f60251..580451337efa 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -2350,6 +2350,48 @@  bool block_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
 }
 EXPORT_SYMBOL(block_is_partially_uptodate);
 
+static void bh_read_batch_async(struct folio *folio,
+				int nr, struct buffer_head *arr[],
+				bool fully_mapped, bool no_reads,
+				bool any_get_block_error)
+{
+	int i;
+	struct buffer_head *bh;
+
+	if (fully_mapped)
+		folio_set_mappedtodisk(folio);
+
+	if (no_reads) {
+		/*
+		 * All buffers are uptodate or get_block() returned an
+		 * error when trying to map them *all* buffers we can
+		 * finish the read.
+		 */
+		folio_end_read(folio, !any_get_block_error);
+		return;
+	}
+
+	/* Stage one: lock the buffers */
+	for (i = 0; i < nr; i++) {
+		bh = arr[i];
+		lock_buffer(bh);
+		mark_buffer_async_read(bh);
+	}
+
+	/*
+	 * Stage 2: start the IO.  Check for uptodateness
+	 * inside the buffer lock in case another process reading
+	 * the underlying blockdev brought it uptodate (the sct fix).
+	 */
+	for (i = 0; i < nr; i++) {
+		bh = arr[i];
+		if (buffer_uptodate(bh))
+			end_buffer_async_read(bh, 1);
+		else
+			submit_bh(REQ_OP_READ, bh);
+	}
+}
+
 /*
  * Generic "read_folio" function for block devices that have the normal
  * get_block functionality. This is most of the block device filesystems.
@@ -2414,37 +2456,8 @@  int block_read_full_folio(struct folio *folio, get_block_t *get_block)
 		arr[nr++] = bh;
 	} while (i++, iblock++, (bh = bh->b_this_page) != head);
 
-	if (fully_mapped)
-		folio_set_mappedtodisk(folio);
-
-	if (!nr) {
-		/*
-		 * All buffers are uptodate or get_block() returned an
-		 * error when trying to map them - we can finish the read.
-		 */
-		folio_end_read(folio, !page_error);
-		return 0;
-	}
-
-	/* Stage two: lock the buffers */
-	for (i = 0; i < nr; i++) {
-		bh = arr[i];
-		lock_buffer(bh);
-		mark_buffer_async_read(bh);
-	}
+	bh_read_batch_async(folio, nr, arr, fully_mapped, nr == 0, page_error);
 
-	/*
-	 * Stage 3: start the IO.  Check for uptodateness
-	 * inside the buffer lock in case another process reading
-	 * the underlying blockdev brought it uptodate (the sct fix).
-	 */
-	for (i = 0; i < nr; i++) {
-		bh = arr[i];
-		if (buffer_uptodate(bh))
-			end_buffer_async_read(bh, 1);
-		else
-			submit_bh(REQ_OP_READ, bh);
-	}
 	return 0;
 }
 EXPORT_SYMBOL(block_read_full_folio);