diff mbox series

[v2,2/2] btrfs: simplify the page uptodate preparation for prepare_pages()

Message ID 51064f30d856c1529d47d70dfb4f3cad46a42187.1727736323.git.wqu@suse.com (mailing list archive)
State New, archived
Headers show
Series btrfs: small cleanups to buffered write path | expand

Commit Message

Qu Wenruo Sept. 30, 2024, 10:47 p.m. UTC
Currently inside prepare_pages(), we handle the leading and tailing page
differently, and skip the middle pages (if any).

This is to avoid reading pages which is fully covered by the dirty
range.

Refactor the code by moving all checks (alignment check, range check,
force read check) into prepare_uptodate_page().

So that prepare_pages() only need to iterate all the pages
unconditionally.

And since we're here, also update prepare_uptodate_page() to use
folio API other than the old page API.

Signed-off-by: Qu Wenruo <wqu@suse.com>
---
 fs/btrfs/file.c | 68 +++++++++++++++++++++++++++----------------------
 1 file changed, 37 insertions(+), 31 deletions(-)

Comments

David Sterba Oct. 1, 2024, 3:30 p.m. UTC | #1
On Tue, Oct 01, 2024 at 08:17:11AM +0930, Qu Wenruo wrote:
> +	if (force_uptodate)
> +		goto read;
> +
> +	/* The dirty range fully cover the page, no need to read it out. */
> +	if (IS_ALIGNED(clamp_start, PAGE_SIZE) &&
> +	    IS_ALIGNED(clamp_end, PAGE_SIZE))
> +		return 0;
> +read:

I've commented under v1 and will repeat it here so it does not get lost,
this if and goto is not necessary here and can be a normal if () { ...  }
Qu Wenruo Oct. 1, 2024, 9:11 p.m. UTC | #2
在 2024/10/2 01:00, David Sterba 写道:
> On Tue, Oct 01, 2024 at 08:17:11AM +0930, Qu Wenruo wrote:
>> +	if (force_uptodate)
>> +		goto read;
>> +
>> +	/* The dirty range fully cover the page, no need to read it out. */
>> +	if (IS_ALIGNED(clamp_start, PAGE_SIZE) &&
>> +	    IS_ALIGNED(clamp_end, PAGE_SIZE))
>> +		return 0;
>> +read:
>
> I've commented under v1 and will repeat it here so it does not get lost,
> this if and goto is not necessary here and can be a normal if () { ...  }
>

This is not that simple.

If @force_uptodate we need to read the folio no matter what, meanwhile
if the range is not aligned, we also need to read the folio.

And the @force_uptodate check should go before the alignment check, thus
I'm wondering if there is any better alternative.

Thanks,
Qu
David Sterba Oct. 2, 2024, 1:04 p.m. UTC | #3
On Wed, Oct 02, 2024 at 06:41:58AM +0930, Qu Wenruo wrote:
> 
> 
> 在 2024/10/2 01:00, David Sterba 写道:
> > On Tue, Oct 01, 2024 at 08:17:11AM +0930, Qu Wenruo wrote:
> >> +	if (force_uptodate)
> >> +		goto read;
> >> +
> >> +	/* The dirty range fully cover the page, no need to read it out. */
> >> +	if (IS_ALIGNED(clamp_start, PAGE_SIZE) &&
> >> +	    IS_ALIGNED(clamp_end, PAGE_SIZE))
> >> +		return 0;
> >> +read:
> >
> > I've commented under v1 and will repeat it here so it does not get lost,
> > this if and goto is not necessary here and can be a normal if () { ...  }
> >
> 
> This is not that simple.
> 
> If @force_uptodate we need to read the folio no matter what, meanwhile
> if the range is not aligned, we also need to read the folio.
> 
> And the @force_uptodate check should go before the alignment check, thus
> I'm wondering if there is any better alternative.

if (!force_update) {
	/* The dirty range fully cover the page, no need to read it out. */
	if (IS_ALIGNED(clamp_start, PAGE_SIZE) &&
	    IS_ALIGNED(clamp_end, PAGE_SIZE))
		return 0;
}
diff mbox series

Patch

diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 9555a3485670..c5d52fcee0be 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -858,36 +858,46 @@  int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
  */
 static int prepare_uptodate_page(struct inode *inode,
 				 struct page *page, u64 pos,
-				 bool force_uptodate)
+				 u64 len, bool force_uptodate)
 {
 	struct folio *folio = page_folio(page);
+	u64 clamp_start = max_t(u64, pos, folio_pos(folio));
+	u64 clamp_end = min_t(u64, pos + len, folio_pos(folio) + folio_size(folio));
 	int ret = 0;
 
-	if (((pos & (PAGE_SIZE - 1)) || force_uptodate) &&
-	    !PageUptodate(page)) {
-		ret = btrfs_read_folio(NULL, folio);
-		if (ret)
-			return ret;
-		lock_page(page);
-		if (!PageUptodate(page)) {
-			unlock_page(page);
-			return -EIO;
-		}
+	if (folio_test_uptodate(folio))
+		return 0;
 
-		/*
-		 * Since btrfs_read_folio() will unlock the folio before it
-		 * returns, there is a window where btrfs_release_folio() can be
-		 * called to release the page.  Here we check both inode
-		 * mapping and PagePrivate() to make sure the page was not
-		 * released.
-		 *
-		 * The private flag check is essential for subpage as we need
-		 * to store extra bitmap using folio private.
-		 */
-		if (page->mapping != inode->i_mapping || !folio_test_private(folio)) {
-			unlock_page(page);
-			return -EAGAIN;
-		}
+	if (force_uptodate)
+		goto read;
+
+	/* The dirty range fully cover the page, no need to read it out. */
+	if (IS_ALIGNED(clamp_start, PAGE_SIZE) &&
+	    IS_ALIGNED(clamp_end, PAGE_SIZE))
+		return 0;
+read:
+	ret = btrfs_read_folio(NULL, folio);
+	if (ret)
+		return ret;
+	folio_lock(folio);
+	if (!folio_test_uptodate(folio)) {
+		folio_unlock(folio);
+		return -EIO;
+	}
+
+	/*
+	 * Since btrfs_read_folio() will unlock the folio before it
+	 * returns, there is a window where btrfs_release_folio() can be
+	 * called to release the page.  Here we check both inode
+	 * mapping and PagePrivate() to make sure the page was not
+	 * released.
+	 *
+	 * The private flag check is essential for subpage as we need
+	 * to store extra bitmap using folio private.
+	 */
+	if (page->mapping != inode->i_mapping || !folio_test_private(folio)) {
+		folio_unlock(folio);
+		return -EAGAIN;
 	}
 	return 0;
 }
@@ -949,12 +959,8 @@  static noinline int prepare_pages(struct inode *inode, struct page **pages,
 			goto fail;
 		}
 
-		if (i == 0)
-			ret = prepare_uptodate_page(inode, pages[i], pos,
-						    force_uptodate);
-		if (!ret && i == num_pages - 1)
-			ret = prepare_uptodate_page(inode, pages[i],
-						    pos + write_bytes, false);
+		ret = prepare_uptodate_page(inode, pages[i], pos, write_bytes,
+					    force_uptodate);
 		if (ret) {
 			put_page(pages[i]);
 			if (!nowait && ret == -EAGAIN) {