diff mbox series

[v2,2/8] fs/buffer: remove batching from async read

Message ID 20250204231209.429356-3-mcgrof@kernel.org (mailing list archive)
State New
Headers show
Series enable bs > ps for block devices | expand

Commit Message

Luis Chamberlain Feb. 4, 2025, 11:12 p.m. UTC
From: Matthew Wilcox <willy@infradead.org>

The current implementation of a folio async read in block_read_full_folio()
first batches all buffer-heads which need IOs issued for by putting them on an
array of max size MAX_BUF_PER_PAGE. After collection it locks the batched
buffer-heads and finally submits the pending reads. On systems with CPUs
where the system page size is quite larger like Hexagon with 256 KiB this
batching can lead stack growth warnings so we want to avoid that.

Note the use of folio_end_read() through block_read_full_folio(), its
used either when the folio is determined to be fully uptodate and no
pending read is needed, an IO error happened on get_block(), or an out of
bound read raced against batching collection to make our required reads
uptodate.

We can simplify this logic considerably and remove the stack growth
issues of MAX_BUF_PER_PAGE by just replacing the batched logic with
one which only issues IO for the previous buffer-head keeping in mind
we'll always have one buffer-head (the current one) on the folio with
an async flag, this will prevent any calls to folio_end_read().

So we accomplish two things with this:

 o Avoid large stacks arrays with MAX_BUF_PER_PAGE
 o Make the need for folio_end_read() explicit and easier to read

Suggested-by: Matthew Wilcox <willy@infradead.org>
Signed-off-by: Luis Chamberlain <mcgrof@kernel.org>
---
 fs/buffer.c | 51 +++++++++++++++++++++------------------------------
 1 file changed, 21 insertions(+), 30 deletions(-)

Comments

Hannes Reinecke Feb. 5, 2025, 4:21 p.m. UTC | #1
On 2/5/25 00:12, Luis Chamberlain wrote:
> From: Matthew Wilcox <willy@infradead.org>
> 
> The current implementation of a folio async read in block_read_full_folio()
> first batches all buffer-heads which need IOs issued for by putting them on an
> array of max size MAX_BUF_PER_PAGE. After collection it locks the batched
> buffer-heads and finally submits the pending reads. On systems with CPUs
> where the system page size is quite larger like Hexagon with 256 KiB this
> batching can lead stack growth warnings so we want to avoid that.
> 
> Note the use of folio_end_read() through block_read_full_folio(), its
> used either when the folio is determined to be fully uptodate and no
> pending read is needed, an IO error happened on get_block(), or an out of
> bound read raced against batching collection to make our required reads
> uptodate.
> 
> We can simplify this logic considerably and remove the stack growth
> issues of MAX_BUF_PER_PAGE by just replacing the batched logic with
> one which only issues IO for the previous buffer-head keeping in mind
> we'll always have one buffer-head (the current one) on the folio with
> an async flag, this will prevent any calls to folio_end_read().
> 
> So we accomplish two things with this:
> 
>   o Avoid large stacks arrays with MAX_BUF_PER_PAGE
>   o Make the need for folio_end_read() explicit and easier to read
> 
> Suggested-by: Matthew Wilcox <willy@infradead.org>
> Signed-off-by: Luis Chamberlain <mcgrof@kernel.org>
> ---
>   fs/buffer.c | 51 +++++++++++++++++++++------------------------------
>   1 file changed, 21 insertions(+), 30 deletions(-)
> 
> diff --git a/fs/buffer.c b/fs/buffer.c
> index b99560e8a142..167fa3e33566 100644
> --- a/fs/buffer.c
> +++ b/fs/buffer.c
> @@ -2361,9 +2361,8 @@ int block_read_full_folio(struct folio *folio, get_block_t *get_block)
>   {
>   	struct inode *inode = folio->mapping->host;
>   	sector_t iblock, lblock;
> -	struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
> +	struct buffer_head *bh, *head, *prev = NULL;
>   	size_t blocksize;
> -	int nr, i;
>   	int fully_mapped = 1;
>   	bool page_error = false;
>   	loff_t limit = i_size_read(inode);
> @@ -2380,7 +2379,6 @@ int block_read_full_folio(struct folio *folio, get_block_t *get_block)
>   	iblock = div_u64(folio_pos(folio), blocksize);
>   	lblock = div_u64(limit + blocksize - 1, blocksize);
>   	bh = head;
> -	nr = 0;
>   
>   	do {
>   		if (buffer_uptodate(bh))
> @@ -2410,40 +2408,33 @@ int block_read_full_folio(struct folio *folio, get_block_t *get_block)
>   			if (buffer_uptodate(bh))
>   				continue;
>   		}
> -		arr[nr++] = bh;
> +
> +		lock_buffer(bh);
> +		if (buffer_uptodate(bh)) {
> +			unlock_buffer(bh);
> +			continue;
> +		}
> +
> +		mark_buffer_async_read(bh);
> +		if (prev)
> +			submit_bh(REQ_OP_READ, prev);
> +		prev = bh;
>   	} while (iblock++, (bh = bh->b_this_page) != head);
>   
>   	if (fully_mapped)
>   		folio_set_mappedtodisk(folio);
>   
> -	if (!nr) {
> -		/*
> -		 * All buffers are uptodate or get_block() returned an
> -		 * error when trying to map them - we can finish the read.
> -		 */
> -		folio_end_read(folio, !page_error);
> -		return 0;
> -	}
> -
> -	/* Stage two: lock the buffers */
> -	for (i = 0; i < nr; i++) {
> -		bh = arr[i];
> -		lock_buffer(bh);
> -		mark_buffer_async_read(bh);
> -	}
> -
>   	/*
> -	 * Stage 3: start the IO.  Check for uptodateness
> -	 * inside the buffer lock in case another process reading
> -	 * the underlying blockdev brought it uptodate (the sct fix).
> +	 * All buffers are uptodate or get_block() returned an error
> +	 * when trying to map them - we must finish the read because
> +	 * end_buffer_async_read() will never be called on any buffer
> +	 * in this folio.
>   	 */
> -	for (i = 0; i < nr; i++) {
> -		bh = arr[i];
> -		if (buffer_uptodate(bh))
> -			end_buffer_async_read(bh, 1);
> -		else
> -			submit_bh(REQ_OP_READ, bh);
> -	}
> +	if (prev)
> +		submit_bh(REQ_OP_READ, prev);
> +	else
> +		folio_end_read(folio, !page_error);
> +
>   	return 0;
>   }
>   EXPORT_SYMBOL(block_read_full_folio);

Similar here; as we now removed batching (which technically could result
in I/O being completed while executing the various stages) there really
is nothing preventing us to use plugging here, no?

Cheers,

Hannes
Hannes Reinecke Feb. 7, 2025, 7:08 a.m. UTC | #2
On 2/5/25 17:21, Hannes Reinecke wrote:
> On 2/5/25 00:12, Luis Chamberlain wrote:
>> From: Matthew Wilcox <willy@infradead.org>
>>
>> The current implementation of a folio async read in 
>> block_read_full_folio()
>> first batches all buffer-heads which need IOs issued for by putting 
>> them on an
>> array of max size MAX_BUF_PER_PAGE. After collection it locks the batched
>> buffer-heads and finally submits the pending reads. On systems with CPUs
>> where the system page size is quite larger like Hexagon with 256 KiB this
>> batching can lead stack growth warnings so we want to avoid that.
>>
>> Note the use of folio_end_read() through block_read_full_folio(), its
>> used either when the folio is determined to be fully uptodate and no
>> pending read is needed, an IO error happened on get_block(), or an out of
>> bound read raced against batching collection to make our required reads
>> uptodate.
>>
>> We can simplify this logic considerably and remove the stack growth
>> issues of MAX_BUF_PER_PAGE by just replacing the batched logic with
>> one which only issues IO for the previous buffer-head keeping in mind
>> we'll always have one buffer-head (the current one) on the folio with
>> an async flag, this will prevent any calls to folio_end_read().
>>
>> So we accomplish two things with this:
>>
>>   o Avoid large stacks arrays with MAX_BUF_PER_PAGE
>>   o Make the need for folio_end_read() explicit and easier to read
>>
>> Suggested-by: Matthew Wilcox <willy@infradead.org>
>> Signed-off-by: Luis Chamberlain <mcgrof@kernel.org>
>> ---
>>   fs/buffer.c | 51 +++++++++++++++++++++------------------------------
>>   1 file changed, 21 insertions(+), 30 deletions(-)
>>
>> diff --git a/fs/buffer.c b/fs/buffer.c
>> index b99560e8a142..167fa3e33566 100644
>> --- a/fs/buffer.c
>> +++ b/fs/buffer.c
>> @@ -2361,9 +2361,8 @@ int block_read_full_folio(struct folio *folio, 
>> get_block_t *get_block)
>>   {
>>       struct inode *inode = folio->mapping->host;
>>       sector_t iblock, lblock;
>> -    struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
>> +    struct buffer_head *bh, *head, *prev = NULL;
>>       size_t blocksize;
>> -    int nr, i;
>>       int fully_mapped = 1;
>>       bool page_error = false;
>>       loff_t limit = i_size_read(inode);
>> @@ -2380,7 +2379,6 @@ int block_read_full_folio(struct folio *folio, 
>> get_block_t *get_block)
>>       iblock = div_u64(folio_pos(folio), blocksize);
>>       lblock = div_u64(limit + blocksize - 1, blocksize);
>>       bh = head;
>> -    nr = 0;
>>       do {
>>           if (buffer_uptodate(bh))
>> @@ -2410,40 +2408,33 @@ int block_read_full_folio(struct folio *folio, 
>> get_block_t *get_block)
>>               if (buffer_uptodate(bh))
>>                   continue;
>>           }
>> -        arr[nr++] = bh;
>> +
>> +        lock_buffer(bh);
>> +        if (buffer_uptodate(bh)) {
>> +            unlock_buffer(bh);
>> +            continue;
>> +        }
>> +
>> +        mark_buffer_async_read(bh);
>> +        if (prev)
>> +            submit_bh(REQ_OP_READ, prev);
>> +        prev = bh;
>>       } while (iblock++, (bh = bh->b_this_page) != head);
>>       if (fully_mapped)
>>           folio_set_mappedtodisk(folio);
>> -    if (!nr) {
>> -        /*
>> -         * All buffers are uptodate or get_block() returned an
>> -         * error when trying to map them - we can finish the read.
>> -         */
>> -        folio_end_read(folio, !page_error);
>> -        return 0;
>> -    }
>> -
>> -    /* Stage two: lock the buffers */
>> -    for (i = 0; i < nr; i++) {
>> -        bh = arr[i];
>> -        lock_buffer(bh);
>> -        mark_buffer_async_read(bh);
>> -    }
>> -
>>       /*
>> -     * Stage 3: start the IO.  Check for uptodateness
>> -     * inside the buffer lock in case another process reading
>> -     * the underlying blockdev brought it uptodate (the sct fix).
>> +     * All buffers are uptodate or get_block() returned an error
>> +     * when trying to map them - we must finish the read because
>> +     * end_buffer_async_read() will never be called on any buffer
>> +     * in this folio.
>>        */
>> -    for (i = 0; i < nr; i++) {
>> -        bh = arr[i];
>> -        if (buffer_uptodate(bh))
>> -            end_buffer_async_read(bh, 1);
>> -        else
>> -            submit_bh(REQ_OP_READ, bh);
>> -    }
>> +    if (prev)
>> +        submit_bh(REQ_OP_READ, prev);
>> +    else
>> +        folio_end_read(folio, !page_error);
>> +
>>       return 0;
>>   }
>>   EXPORT_SYMBOL(block_read_full_folio);
> 
> Similar here; as we now removed batching (which technically could result
> in I/O being completed while executing the various stages) there really
> is nothing preventing us to use plugging here, no?
> 
In the light of the discussion to the previous patch we should move that
to a later point. So:

Reviewed-by: Hannes Reinecke <hare@suse.de>

Cheers,

Hannes
Matthew Wilcox Feb. 17, 2025, 9:40 p.m. UTC | #3
On Tue, Feb 04, 2025 at 03:12:03PM -0800, Luis Chamberlain wrote:
> From: Matthew Wilcox <willy@infradead.org>

From: Matthew Wilcox (Oracle) <willy@infradead.org>

block_read_full_folio() currently puts all !uptodate buffers into
an array allocated on the stack, then iterates over it twice, first
locking the buffers and then submitting them for read.  We want to
remove this array because it occupies too much stack space on
configurations with a larger PAGE_SIZE (eg 512 bytes with 8 byte
pointers and a 64KiB PAGE_SIZE).

We cannot simply submit buffer heads as we find them as the completion
handler needs to be able to tell when all reads are finished, so it can
end the folio read.  So we keep one buffer in reserve (using the 'prev'
variable) until the end of the function.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>

> diff --git a/fs/buffer.c b/fs/buffer.c
> index b99560e8a142..167fa3e33566 100644
> --- a/fs/buffer.c
> +++ b/fs/buffer.c
> @@ -2361,9 +2361,8 @@ int block_read_full_folio(struct folio *folio, get_block_t *get_block)
>  {
>  	struct inode *inode = folio->mapping->host;
>  	sector_t iblock, lblock;
> -	struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
> +	struct buffer_head *bh, *head, *prev = NULL;
>  	size_t blocksize;
> -	int nr, i;
>  	int fully_mapped = 1;
>  	bool page_error = false;
>  	loff_t limit = i_size_read(inode);
> @@ -2380,7 +2379,6 @@ int block_read_full_folio(struct folio *folio, get_block_t *get_block)
>  	iblock = div_u64(folio_pos(folio), blocksize);
>  	lblock = div_u64(limit + blocksize - 1, blocksize);
>  	bh = head;
> -	nr = 0;
>  
>  	do {
>  		if (buffer_uptodate(bh))
> @@ -2410,40 +2408,33 @@ int block_read_full_folio(struct folio *folio, get_block_t *get_block)
>  			if (buffer_uptodate(bh))
>  				continue;
>  		}
> -		arr[nr++] = bh;
> +
> +		lock_buffer(bh);
> +		if (buffer_uptodate(bh)) {
> +			unlock_buffer(bh);
> +			continue;
> +		}
> +
> +		mark_buffer_async_read(bh);
> +		if (prev)
> +			submit_bh(REQ_OP_READ, prev);
> +		prev = bh;
>  	} while (iblock++, (bh = bh->b_this_page) != head);
>  
>  	if (fully_mapped)
>  		folio_set_mappedtodisk(folio);
>  
> -	if (!nr) {
> -		/*
> -		 * All buffers are uptodate or get_block() returned an
> -		 * error when trying to map them - we can finish the read.
> -		 */
> -		folio_end_read(folio, !page_error);
> -		return 0;
> -	}
> -
> -	/* Stage two: lock the buffers */
> -	for (i = 0; i < nr; i++) {
> -		bh = arr[i];
> -		lock_buffer(bh);
> -		mark_buffer_async_read(bh);
> -	}
> -
>  	/*
> -	 * Stage 3: start the IO.  Check for uptodateness
> -	 * inside the buffer lock in case another process reading
> -	 * the underlying blockdev brought it uptodate (the sct fix).
> +	 * All buffers are uptodate or get_block() returned an error
> +	 * when trying to map them - we must finish the read because
> +	 * end_buffer_async_read() will never be called on any buffer
> +	 * in this folio.
>  	 */
> -	for (i = 0; i < nr; i++) {
> -		bh = arr[i];
> -		if (buffer_uptodate(bh))
> -			end_buffer_async_read(bh, 1);
> -		else
> -			submit_bh(REQ_OP_READ, bh);
> -	}
> +	if (prev)
> +		submit_bh(REQ_OP_READ, prev);
> +	else
> +		folio_end_read(folio, !page_error);
> +
>  	return 0;
>  }
>  EXPORT_SYMBOL(block_read_full_folio);
> -- 
> 2.45.2
>
diff mbox series

Patch

diff --git a/fs/buffer.c b/fs/buffer.c
index b99560e8a142..167fa3e33566 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -2361,9 +2361,8 @@  int block_read_full_folio(struct folio *folio, get_block_t *get_block)
 {
 	struct inode *inode = folio->mapping->host;
 	sector_t iblock, lblock;
-	struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
+	struct buffer_head *bh, *head, *prev = NULL;
 	size_t blocksize;
-	int nr, i;
 	int fully_mapped = 1;
 	bool page_error = false;
 	loff_t limit = i_size_read(inode);
@@ -2380,7 +2379,6 @@  int block_read_full_folio(struct folio *folio, get_block_t *get_block)
 	iblock = div_u64(folio_pos(folio), blocksize);
 	lblock = div_u64(limit + blocksize - 1, blocksize);
 	bh = head;
-	nr = 0;
 
 	do {
 		if (buffer_uptodate(bh))
@@ -2410,40 +2408,33 @@  int block_read_full_folio(struct folio *folio, get_block_t *get_block)
 			if (buffer_uptodate(bh))
 				continue;
 		}
-		arr[nr++] = bh;
+
+		lock_buffer(bh);
+		if (buffer_uptodate(bh)) {
+			unlock_buffer(bh);
+			continue;
+		}
+
+		mark_buffer_async_read(bh);
+		if (prev)
+			submit_bh(REQ_OP_READ, prev);
+		prev = bh;
 	} while (iblock++, (bh = bh->b_this_page) != head);
 
 	if (fully_mapped)
 		folio_set_mappedtodisk(folio);
 
-	if (!nr) {
-		/*
-		 * All buffers are uptodate or get_block() returned an
-		 * error when trying to map them - we can finish the read.
-		 */
-		folio_end_read(folio, !page_error);
-		return 0;
-	}
-
-	/* Stage two: lock the buffers */
-	for (i = 0; i < nr; i++) {
-		bh = arr[i];
-		lock_buffer(bh);
-		mark_buffer_async_read(bh);
-	}
-
 	/*
-	 * Stage 3: start the IO.  Check for uptodateness
-	 * inside the buffer lock in case another process reading
-	 * the underlying blockdev brought it uptodate (the sct fix).
+	 * All buffers are uptodate or get_block() returned an error
+	 * when trying to map them - we must finish the read because
+	 * end_buffer_async_read() will never be called on any buffer
+	 * in this folio.
 	 */
-	for (i = 0; i < nr; i++) {
-		bh = arr[i];
-		if (buffer_uptodate(bh))
-			end_buffer_async_read(bh, 1);
-		else
-			submit_bh(REQ_OP_READ, bh);
-	}
+	if (prev)
+		submit_bh(REQ_OP_READ, prev);
+	else
+		folio_end_read(folio, !page_error);
+
 	return 0;
 }
 EXPORT_SYMBOL(block_read_full_folio);