diff mbox series

[11/13] iomap: factor out a iomap_writepage_map_block helper

Message ID 20231126124720.1249310-12-hch@lst.de (mailing list archive)
State New, archived
Headers show
Series [01/13] iomap: clear the per-folio dirty bits on all writeback failures | expand

Commit Message

Christoph Hellwig Nov. 26, 2023, 12:47 p.m. UTC
Split the loop body that calls into the file system to map a block and
add it to the ioend into a separate helper to prefer for refactoring of
the surrounding code.

Note that this was the only place in iomap_writepage_map that could
return an error, so include the call to ->discard_folio into the new
helper as that will help to avoid code duplication in the future.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 fs/iomap/buffered-io.c | 72 +++++++++++++++++++++++++-----------------
 1 file changed, 43 insertions(+), 29 deletions(-)

Comments

Darrick J. Wong Nov. 29, 2023, 5:06 a.m. UTC | #1
On Sun, Nov 26, 2023 at 01:47:18PM +0100, Christoph Hellwig wrote:
> Split the loop body that calls into the file system to map a block and
> add it to the ioend into a separate helper to prefer for refactoring of
> the surrounding code.
> 
> Note that this was the only place in iomap_writepage_map that could
> return an error, so include the call to ->discard_folio into the new
> helper as that will help to avoid code duplication in the future.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>

Simple enough hoist,
Reviewed-by: Darrick J. Wong <djwong@kernel.org>

--D

> ---
>  fs/iomap/buffered-io.c | 72 +++++++++++++++++++++++++-----------------
>  1 file changed, 43 insertions(+), 29 deletions(-)
> 
> diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
> index e1d5076251702d..9f223820f60d22 100644
> --- a/fs/iomap/buffered-io.c
> +++ b/fs/iomap/buffered-io.c
> @@ -1723,6 +1723,45 @@ static void iomap_add_to_ioend(struct iomap_writepage_ctx *wpc,
>  	wbc_account_cgroup_owner(wbc, &folio->page, len);
>  }
>  
> +static int iomap_writepage_map_blocks(struct iomap_writepage_ctx *wpc,
> +		struct writeback_control *wbc, struct folio *folio,
> +		struct inode *inode, u64 pos, unsigned *count,
> +		struct list_head *submit_list)
> +{
> +	int error;
> +
> +	error = wpc->ops->map_blocks(wpc, inode, pos);
> +	if (error)
> +		goto fail;
> +	trace_iomap_writepage_map(inode, &wpc->iomap);
> +
> +	switch (wpc->iomap.type) {
> +	case IOMAP_INLINE:
> +		WARN_ON_ONCE(1);
> +		error = -EIO;
> +		break;
> +	case IOMAP_HOLE:
> +		break;
> +	default:
> +		iomap_add_to_ioend(wpc, wbc, folio, inode, pos, submit_list);
> +		(*count)++;
> +	}
> +
> +fail:
> +	/*
> +	 * We cannot cancel the ioend directly here on error.  We may have
> +	 * already set other pages under writeback and hence we have to run I/O
> +	 * completion to mark the error state of the pages under writeback
> +	 * appropriately.
> +	 *
> +	 * Just let the file system know what portion of the folio failed to
> +	 * map.
> +	 */
> +	if (error && wpc->ops->discard_folio)
> +		wpc->ops->discard_folio(folio, pos);
> +	return error;
> +}
> +
>  /*
>   * Check interaction of the folio with the file end.
>   *
> @@ -1807,7 +1846,8 @@ static int iomap_writepage_map(struct iomap_writepage_ctx *wpc,
>  	unsigned nblocks = i_blocks_per_folio(inode, folio);
>  	u64 pos = folio_pos(folio);
>  	u64 end_pos = pos + folio_size(folio);
> -	int error = 0, count = 0, i;
> +	unsigned count = 0;
> +	int error = 0, i;
>  	LIST_HEAD(submit_list);
>  
>  	trace_iomap_writepage(inode, pos, folio_size(folio));
> @@ -1833,19 +1873,10 @@ static int iomap_writepage_map(struct iomap_writepage_ctx *wpc,
>  	for (i = 0; i < nblocks && pos < end_pos; i++, pos += len) {
>  		if (ifs && !ifs_block_is_dirty(folio, ifs, i))
>  			continue;
> -
> -		error = wpc->ops->map_blocks(wpc, inode, pos);
> +		error = iomap_writepage_map_blocks(wpc, wbc, folio, inode, pos,
> +				&count, &submit_list);
>  		if (error)
>  			break;
> -		trace_iomap_writepage_map(inode, &wpc->iomap);
> -		if (WARN_ON_ONCE(wpc->iomap.type == IOMAP_INLINE)) {
> -			error = -EIO;
> -			break;
> -		}
> -		if (wpc->iomap.type == IOMAP_HOLE)
> -			continue;
> -		iomap_add_to_ioend(wpc, wbc, folio, inode, pos, &submit_list);
> -		count++;
>  	}
>  	if (count)
>  		wpc->nr_folios++;
> @@ -1855,23 +1886,6 @@ static int iomap_writepage_map(struct iomap_writepage_ctx *wpc,
>  	WARN_ON_ONCE(folio_test_writeback(folio));
>  	WARN_ON_ONCE(folio_test_dirty(folio));
>  
> -	/*
> -	 * We cannot cancel the ioend directly here on error.  We may have
> -	 * already set other pages under writeback and hence we have to run I/O
> -	 * completion to mark the error state of the pages under writeback
> -	 * appropriately.
> -	 */
> -	if (unlikely(error)) {
> -		/*
> -		 * Let the filesystem know what portion of the current page
> -		 * failed to map. If the page hasn't been added to ioend, it
> -		 * won't be affected by I/O completion and we must unlock it
> -		 * now.
> -		 */
> -		if (wpc->ops->discard_folio)
> -			wpc->ops->discard_folio(folio, pos);
> -	}
> -
>  	/*
>  	 * We can have dirty bits set past end of file in page_mkwrite path
>  	 * while mapping the last partial folio. Hence it's better to clear
> -- 
> 2.39.2
> 
>
diff mbox series

Patch

diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index e1d5076251702d..9f223820f60d22 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -1723,6 +1723,45 @@  static void iomap_add_to_ioend(struct iomap_writepage_ctx *wpc,
 	wbc_account_cgroup_owner(wbc, &folio->page, len);
 }
 
+static int iomap_writepage_map_blocks(struct iomap_writepage_ctx *wpc,
+		struct writeback_control *wbc, struct folio *folio,
+		struct inode *inode, u64 pos, unsigned *count,
+		struct list_head *submit_list)
+{
+	int error;
+
+	error = wpc->ops->map_blocks(wpc, inode, pos);
+	if (error)
+		goto fail;
+	trace_iomap_writepage_map(inode, &wpc->iomap);
+
+	switch (wpc->iomap.type) {
+	case IOMAP_INLINE:
+		WARN_ON_ONCE(1);
+		error = -EIO;
+		break;
+	case IOMAP_HOLE:
+		break;
+	default:
+		iomap_add_to_ioend(wpc, wbc, folio, inode, pos, submit_list);
+		(*count)++;
+	}
+
+fail:
+	/*
+	 * We cannot cancel the ioend directly here on error.  We may have
+	 * already set other pages under writeback and hence we have to run I/O
+	 * completion to mark the error state of the pages under writeback
+	 * appropriately.
+	 *
+	 * Just let the file system know what portion of the folio failed to
+	 * map.
+	 */
+	if (error && wpc->ops->discard_folio)
+		wpc->ops->discard_folio(folio, pos);
+	return error;
+}
+
 /*
  * Check interaction of the folio with the file end.
  *
@@ -1807,7 +1846,8 @@  static int iomap_writepage_map(struct iomap_writepage_ctx *wpc,
 	unsigned nblocks = i_blocks_per_folio(inode, folio);
 	u64 pos = folio_pos(folio);
 	u64 end_pos = pos + folio_size(folio);
-	int error = 0, count = 0, i;
+	unsigned count = 0;
+	int error = 0, i;
 	LIST_HEAD(submit_list);
 
 	trace_iomap_writepage(inode, pos, folio_size(folio));
@@ -1833,19 +1873,10 @@  static int iomap_writepage_map(struct iomap_writepage_ctx *wpc,
 	for (i = 0; i < nblocks && pos < end_pos; i++, pos += len) {
 		if (ifs && !ifs_block_is_dirty(folio, ifs, i))
 			continue;
-
-		error = wpc->ops->map_blocks(wpc, inode, pos);
+		error = iomap_writepage_map_blocks(wpc, wbc, folio, inode, pos,
+				&count, &submit_list);
 		if (error)
 			break;
-		trace_iomap_writepage_map(inode, &wpc->iomap);
-		if (WARN_ON_ONCE(wpc->iomap.type == IOMAP_INLINE)) {
-			error = -EIO;
-			break;
-		}
-		if (wpc->iomap.type == IOMAP_HOLE)
-			continue;
-		iomap_add_to_ioend(wpc, wbc, folio, inode, pos, &submit_list);
-		count++;
 	}
 	if (count)
 		wpc->nr_folios++;
@@ -1855,23 +1886,6 @@  static int iomap_writepage_map(struct iomap_writepage_ctx *wpc,
 	WARN_ON_ONCE(folio_test_writeback(folio));
 	WARN_ON_ONCE(folio_test_dirty(folio));
 
-	/*
-	 * We cannot cancel the ioend directly here on error.  We may have
-	 * already set other pages under writeback and hence we have to run I/O
-	 * completion to mark the error state of the pages under writeback
-	 * appropriately.
-	 */
-	if (unlikely(error)) {
-		/*
-		 * Let the filesystem know what portion of the current page
-		 * failed to map. If the page hasn't been added to ioend, it
-		 * won't be affected by I/O completion and we must unlock it
-		 * now.
-		 */
-		if (wpc->ops->discard_folio)
-			wpc->ops->discard_folio(folio, pos);
-	}
-
 	/*
 	 * We can have dirty bits set past end of file in page_mkwrite path
 	 * while mapping the last partial folio. Hence it's better to clear