diff mbox series

btrfs: enhance function extent_range_clear_dirty_for_io()

Message ID 31b95191f9f1c8aa600370b70a77d69ebfd30bd3.1716177342.git.wqu@suse.com (mailing list archive)
State New, archived
Headers show
Series btrfs: enhance function extent_range_clear_dirty_for_io() | expand

Commit Message

Qu Wenruo May 20, 2024, 3:55 a.m. UTC
Enhance that function by:

- Moving it to inode.c
  As there is only one user inside compress_file_range(), there is no
  need to export it through extent_io.h.

- Add extra error handling
  Previously we go BUG_ON() if we can not find a page inside the range.
  Now we downgrade it to ASSERT(), as this really means some logic
  error since we should have all the pages locked already.

- Make it subpage compatible
  Although currently compression only happens in a full page basis even
  for subpage routine, there is no harm to make it subpage compatible
  now.

Signed-off-by: Qu Wenruo <wqu@suse.com>
---
 fs/btrfs/extent_io.c | 15 ---------------
 fs/btrfs/extent_io.h |  1 -
 fs/btrfs/inode.c     | 31 ++++++++++++++++++++++++++++++-
 3 files changed, 30 insertions(+), 17 deletions(-)

Comments

Filipe Manana May 20, 2024, 10:51 a.m. UTC | #1
On Mon, May 20, 2024 at 4:56 AM Qu Wenruo <wqu@suse.com> wrote:
>
> Enhance that function by:
>
> - Moving it to inode.c
>   As there is only one user inside compress_file_range(), there is no
>   need to export it through extent_io.h.
>
> - Add extra error handling
>   Previously we go BUG_ON() if we can not find a page inside the range.
>   Now we downgrade it to ASSERT(), as this really means some logic
>   error since we should have all the pages locked already.
>
> - Make it subpage compatible
>   Although currently compression only happens in a full page basis even
>   for subpage routine, there is no harm to make it subpage compatible
>   now.

The changes seem ok and reasonable to me.

However I think these are really 3 separate changes that should be in
3 different patches.
It makes it easier to review and to revert in case there's a need to do so.

So I would make the move to inode.c first, and then the other changes.
Or the move last in case we need to backport the other changes.

Some comments inlined below.

>
> Signed-off-by: Qu Wenruo <wqu@suse.com>
> ---
>  fs/btrfs/extent_io.c | 15 ---------------
>  fs/btrfs/extent_io.h |  1 -
>  fs/btrfs/inode.c     | 31 ++++++++++++++++++++++++++++++-
>  3 files changed, 30 insertions(+), 17 deletions(-)
>
> diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
> index a8fc0fcfa69f..9a6f369945c6 100644
> --- a/fs/btrfs/extent_io.c
> +++ b/fs/btrfs/extent_io.c
> @@ -164,21 +164,6 @@ void __cold extent_buffer_free_cachep(void)
>         kmem_cache_destroy(extent_buffer_cache);
>  }
>
> -void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
> -{
> -       unsigned long index = start >> PAGE_SHIFT;
> -       unsigned long end_index = end >> PAGE_SHIFT;
> -       struct page *page;
> -
> -       while (index <= end_index) {
> -               page = find_get_page(inode->i_mapping, index);
> -               BUG_ON(!page); /* Pages should be in the extent_io_tree */
> -               clear_page_dirty_for_io(page);
> -               put_page(page);
> -               index++;
> -       }
> -}
> -
>  static void process_one_page(struct btrfs_fs_info *fs_info,
>                              struct page *page, struct page *locked_page,
>                              unsigned long page_ops, u64 start, u64 end)
> diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
> index dca6b12769ec..7c2f1bbc6b67 100644
> --- a/fs/btrfs/extent_io.h
> +++ b/fs/btrfs/extent_io.h
> @@ -350,7 +350,6 @@ void extent_buffer_bitmap_clear(const struct extent_buffer *eb,
>  void set_extent_buffer_dirty(struct extent_buffer *eb);
>  void set_extent_buffer_uptodate(struct extent_buffer *eb);
>  void clear_extent_buffer_uptodate(struct extent_buffer *eb);
> -void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end);
>  void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
>                                   struct page *locked_page,
>                                   struct extent_state **cached,
> diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
> index 000809e16aba..541a719284a9 100644
> --- a/fs/btrfs/inode.c
> +++ b/fs/btrfs/inode.c
> @@ -890,6 +890,32 @@ static inline void inode_should_defrag(struct btrfs_inode *inode,
>                 btrfs_add_inode_defrag(NULL, inode, small_write);
>  }
>
> +static int extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
> +{
> +       struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
> +       const u64 len = end + 1 - start;
> +       unsigned long end_index = end >> PAGE_SHIFT;
> +       bool missing_folio = false;
> +
> +       /* We should not have such large range. */
> +       ASSERT(len < U32_MAX);
> +       for (unsigned long index = start >> PAGE_SHIFT;
> +            index <= end_index; index++) {
> +               struct folio *folio;
> +
> +               folio = filemap_get_folio(inode->i_mapping, index);
> +               if (IS_ERR(folio)) {
> +                       missing_folio = true;
> +                       continue;
> +               }
> +               btrfs_folio_clamp_clear_dirty(fs_info, folio, start, len);
> +               folio_put(folio);
> +       }
> +       if (missing_folio)
> +               return -ENOENT;

Why not return the error from filemap_get_folio()? We could keep it
and then return it after finishing the loop.
Currently it can only return -ENOENT, according to the function's
comment, but it would be better future proof and return whatever error
it returns.

Thanks.

> +       return 0;
> +}
> +
>  /*
>   * Work queue call back to started compression on a file and pages.
>   *
> @@ -931,7 +957,10 @@ static void compress_file_range(struct btrfs_work *work)
>          * Otherwise applications with the file mmap'd can wander in and change
>          * the page contents while we are compressing them.
>          */
> -       extent_range_clear_dirty_for_io(&inode->vfs_inode, start, end);
> +       ret = extent_range_clear_dirty_for_io(&inode->vfs_inode, start, end);
> +
> +       /* We have locked all the involved pagse, shouldn't hit a missing page. */
> +       ASSERT(ret == 0);
>
>         /*
>          * We need to save i_size before now because it could change in between
> --
> 2.45.1
>
>
Qu Wenruo May 20, 2024, 11:06 a.m. UTC | #2
在 2024/5/20 20:21, Filipe Manana 写道:
> On Mon, May 20, 2024 at 4:56 AM Qu Wenruo <wqu@suse.com> wrote:
[...]
>> - Make it subpage compatible
>>    Although currently compression only happens in a full page basis even
>>    for subpage routine, there is no harm to make it subpage compatible
>>    now.
> 
> The changes seem ok and reasonable to me.
> 
> However I think these are really 3 separate changes that should be in
> 3 different patches.
> It makes it easier to review and to revert in case there's a need to do so.
> 
> So I would make the move to inode.c first, and then the other changes.
> Or the move last in case we need to backport the other changes.

Sure, that indeed sounds better

[...]
>> +       if (missing_folio)
>> +               return -ENOENT;
> 
> Why not return the error from filemap_get_folio()? We could keep it
> and then return it after finishing the loop.
> Currently it can only return -ENOENT, according to the function's
> comment, but it would be better future proof and return whatever error
> it returns.

Sure, although we can only either keep the first or the last error.

Thanks,
Qu

> 
> Thanks.
> 
>> +       return 0;
>> +}
>> +
>>   /*
>>    * Work queue call back to started compression on a file and pages.
>>    *
>> @@ -931,7 +957,10 @@ static void compress_file_range(struct btrfs_work *work)
>>           * Otherwise applications with the file mmap'd can wander in and change
>>           * the page contents while we are compressing them.
>>           */
>> -       extent_range_clear_dirty_for_io(&inode->vfs_inode, start, end);
>> +       ret = extent_range_clear_dirty_for_io(&inode->vfs_inode, start, end);
>> +
>> +       /* We have locked all the involved pagse, shouldn't hit a missing page. */
>> +       ASSERT(ret == 0);
>>
>>          /*
>>           * We need to save i_size before now because it could change in between
>> --
>> 2.45.1
>>
>>
diff mbox series

Patch

diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index a8fc0fcfa69f..9a6f369945c6 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -164,21 +164,6 @@  void __cold extent_buffer_free_cachep(void)
 	kmem_cache_destroy(extent_buffer_cache);
 }
 
-void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
-{
-	unsigned long index = start >> PAGE_SHIFT;
-	unsigned long end_index = end >> PAGE_SHIFT;
-	struct page *page;
-
-	while (index <= end_index) {
-		page = find_get_page(inode->i_mapping, index);
-		BUG_ON(!page); /* Pages should be in the extent_io_tree */
-		clear_page_dirty_for_io(page);
-		put_page(page);
-		index++;
-	}
-}
-
 static void process_one_page(struct btrfs_fs_info *fs_info,
 			     struct page *page, struct page *locked_page,
 			     unsigned long page_ops, u64 start, u64 end)
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index dca6b12769ec..7c2f1bbc6b67 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -350,7 +350,6 @@  void extent_buffer_bitmap_clear(const struct extent_buffer *eb,
 void set_extent_buffer_dirty(struct extent_buffer *eb);
 void set_extent_buffer_uptodate(struct extent_buffer *eb);
 void clear_extent_buffer_uptodate(struct extent_buffer *eb);
-void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end);
 void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
 				  struct page *locked_page,
 				  struct extent_state **cached,
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 000809e16aba..541a719284a9 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -890,6 +890,32 @@  static inline void inode_should_defrag(struct btrfs_inode *inode,
 		btrfs_add_inode_defrag(NULL, inode, small_write);
 }
 
+static int extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
+{
+	struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
+	const u64 len = end + 1 - start;
+	unsigned long end_index = end >> PAGE_SHIFT;
+	bool missing_folio = false;
+
+	/* We should not have such large range. */
+	ASSERT(len < U32_MAX);
+	for (unsigned long index = start >> PAGE_SHIFT;
+	     index <= end_index; index++) {
+		struct folio *folio;
+
+		folio = filemap_get_folio(inode->i_mapping, index);
+		if (IS_ERR(folio)) {
+			missing_folio = true;
+			continue;
+		}
+		btrfs_folio_clamp_clear_dirty(fs_info, folio, start, len);
+		folio_put(folio);
+	}
+	if (missing_folio)
+		return -ENOENT;
+	return 0;
+}
+
 /*
  * Work queue call back to started compression on a file and pages.
  *
@@ -931,7 +957,10 @@  static void compress_file_range(struct btrfs_work *work)
 	 * Otherwise applications with the file mmap'd can wander in and change
 	 * the page contents while we are compressing them.
 	 */
-	extent_range_clear_dirty_for_io(&inode->vfs_inode, start, end);
+	ret = extent_range_clear_dirty_for_io(&inode->vfs_inode, start, end);
+
+	/* We have locked all the involved pagse, shouldn't hit a missing page. */
+	ASSERT(ret == 0);
 
 	/*
 	 * We need to save i_size before now because it could change in between