diff mbox series

[04/11] writeback: Simplify the loops in write_cache_pages()

Message ID 20231214132544.376574-5-hch@lst.de (mailing list archive)
State New
Headers show
Series [01/11] writeback: Factor out writeback_finish() | expand

Commit Message

Christoph Hellwig Dec. 14, 2023, 1:25 p.m. UTC
From: "Matthew Wilcox (Oracle)" <willy@infradead.org>

Collapse the two nested loops into one.  This is needed as a step
towards turning this into an iterator.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 mm/page-writeback.c | 98 ++++++++++++++++++++++-----------------------
 1 file changed, 49 insertions(+), 49 deletions(-)

Comments

Jan Kara Dec. 15, 2023, 2:25 p.m. UTC | #1
On Thu 14-12-23 14:25:37, Christoph Hellwig wrote:
> From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
> 
> Collapse the two nested loops into one.  This is needed as a step
> towards turning this into an iterator.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>

It would be good to mention in the changelog that we drop the condition
index <= end and just rely on filemap_get_folios_tag() to return 0 entries
when index > end. This actually has a subtle implication when end == -1
because then the returned index will be -1 as well and thus if there is
page present on index -1, we could be looping indefinitely. But I think
that's mostly a theoretical concern so I'd be fine with just mentioning
this subtlety in the changelog and possibly in a comment in the code.

								Honza

> ---
>  mm/page-writeback.c | 98 ++++++++++++++++++++++-----------------------
>  1 file changed, 49 insertions(+), 49 deletions(-)
> 
> diff --git a/mm/page-writeback.c b/mm/page-writeback.c
> index 5a3df8665ff4f9..2087d16115710e 100644
> --- a/mm/page-writeback.c
> +++ b/mm/page-writeback.c
> @@ -2460,6 +2460,7 @@ int write_cache_pages(struct address_space *mapping,
>  		      void *data)
>  {
>  	int error;
> +	int i = 0;
>  
>  	if (wbc->range_cyclic) {
>  		wbc->index = mapping->writeback_index; /* prev offset */
> @@ -2477,67 +2478,66 @@ int write_cache_pages(struct address_space *mapping,
>  	folio_batch_init(&wbc->fbatch);
>  	wbc->err = 0;
>  
> -	while (wbc->index <= wbc->end) {
> -		int i;
> -
> -		writeback_get_batch(mapping, wbc);
> +	for (;;) {
> +		struct folio *folio;
> +		unsigned long nr;
>  
> +		if (i == wbc->fbatch.nr) {
> +			writeback_get_batch(mapping, wbc);
> +			i = 0;
> +		}
>  		if (wbc->fbatch.nr == 0)
>  			break;
>  
> -		for (i = 0; i < wbc->fbatch.nr; i++) {
> -			struct folio *folio = wbc->fbatch.folios[i];
> -			unsigned long nr;
> +		folio = wbc->fbatch.folios[i++];
>  
> -			wbc->done_index = folio->index;
> +		wbc->done_index = folio->index;
>  
> -			folio_lock(folio);
> -			if (!should_writeback_folio(mapping, wbc, folio)) {
> -				folio_unlock(folio);
> -				continue;
> -			}
> +		folio_lock(folio);
> +		if (!should_writeback_folio(mapping, wbc, folio)) {
> +			folio_unlock(folio);
> +			continue;
> +		}
>  
> -			trace_wbc_writepage(wbc, inode_to_bdi(mapping->host));
> -
> -			error = writepage(folio, wbc, data);
> -			nr = folio_nr_pages(folio);
> -			if (unlikely(error)) {
> -				/*
> -				 * Handle errors according to the type of
> -				 * writeback. There's no need to continue for
> -				 * background writeback. Just push done_index
> -				 * past this page so media errors won't choke
> -				 * writeout for the entire file. For integrity
> -				 * writeback, we must process the entire dirty
> -				 * set regardless of errors because the fs may
> -				 * still have state to clear for each page. In
> -				 * that case we continue processing and return
> -				 * the first error.
> -				 */
> -				if (error == AOP_WRITEPAGE_ACTIVATE) {
> -					folio_unlock(folio);
> -					error = 0;
> -				} else if (wbc->sync_mode != WB_SYNC_ALL) {
> -					wbc->err = error;
> -					wbc->done_index = folio->index + nr;
> -					return writeback_finish(mapping,
> -							wbc, true);
> -				}
> -				if (!wbc->err)
> -					wbc->err = error;
> -			}
> +		trace_wbc_writepage(wbc, inode_to_bdi(mapping->host));
>  
> +		error = writepage(folio, wbc, data);
> +		nr = folio_nr_pages(folio);
> +		if (unlikely(error)) {
>  			/*
> -			 * We stop writing back only if we are not doing
> -			 * integrity sync. In case of integrity sync we have to
> -			 * keep going until we have written all the pages
> -			 * we tagged for writeback prior to entering this loop.
> +			 * Handle errors according to the type of
> +			 * writeback. There's no need to continue for
> +			 * background writeback. Just push done_index
> +			 * past this page so media errors won't choke
> +			 * writeout for the entire file. For integrity
> +			 * writeback, we must process the entire dirty
> +			 * set regardless of errors because the fs may
> +			 * still have state to clear for each page. In
> +			 * that case we continue processing and return
> +			 * the first error.
>  			 */
> -			wbc->nr_to_write -= nr;
> -			if (wbc->nr_to_write <= 0 &&
> -			    wbc->sync_mode == WB_SYNC_NONE)
> +			if (error == AOP_WRITEPAGE_ACTIVATE) {
> +				folio_unlock(folio);
> +				error = 0;
> +			} else if (wbc->sync_mode != WB_SYNC_ALL) {
> +				wbc->err = error;
> +				wbc->done_index = folio->index + nr;
>  				return writeback_finish(mapping, wbc, true);
> +			}
> +			if (!wbc->err)
> +				wbc->err = error;
>  		}
> +
> +		/*
> +		 * We stop writing back only if we are not doing
> +		 * integrity sync. In case of integrity sync we have to
> +		 * keep going until we have written all the pages
> +		 * we tagged for writeback prior to entering this loop.
> +		 */
> +		wbc->nr_to_write -= nr;
> +		if (wbc->nr_to_write <= 0 &&
> +		    wbc->sync_mode == WB_SYNC_NONE)
> +			return writeback_finish(mapping, wbc, true);
>  	}
>  
>  	return writeback_finish(mapping, wbc, false);
> -- 
> 2.39.2
>
Matthew Wilcox Dec. 16, 2023, 6:16 a.m. UTC | #2
On Thu, Dec 14, 2023 at 02:25:37PM +0100, Christoph Hellwig wrote:
> From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
> 
> Collapse the two nested loops into one.  This is needed as a step
> towards turning this into an iterator.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>

> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>  mm/page-writeback.c | 98 ++++++++++++++++++++++-----------------------
>  1 file changed, 49 insertions(+), 49 deletions(-)
> 
> diff --git a/mm/page-writeback.c b/mm/page-writeback.c
> index 5a3df8665ff4f9..2087d16115710e 100644
> --- a/mm/page-writeback.c
> +++ b/mm/page-writeback.c
> @@ -2460,6 +2460,7 @@ int write_cache_pages(struct address_space *mapping,
>  		      void *data)
>  {
>  	int error;
> +	int i = 0;
>  
>  	if (wbc->range_cyclic) {
>  		wbc->index = mapping->writeback_index; /* prev offset */
> @@ -2477,67 +2478,66 @@ int write_cache_pages(struct address_space *mapping,
>  	folio_batch_init(&wbc->fbatch);
>  	wbc->err = 0;
>  
> -	while (wbc->index <= wbc->end) {
> -		int i;
> -
> -		writeback_get_batch(mapping, wbc);
> +	for (;;) {
> +		struct folio *folio;
> +		unsigned long nr;
>  
> +		if (i == wbc->fbatch.nr) {
> +			writeback_get_batch(mapping, wbc);
> +			i = 0;
> +		}
>  		if (wbc->fbatch.nr == 0)
>  			break;
>  
> -		for (i = 0; i < wbc->fbatch.nr; i++) {
> -			struct folio *folio = wbc->fbatch.folios[i];
> -			unsigned long nr;
> +		folio = wbc->fbatch.folios[i++];
>  
> -			wbc->done_index = folio->index;
> +		wbc->done_index = folio->index;
>  
> -			folio_lock(folio);
> -			if (!should_writeback_folio(mapping, wbc, folio)) {
> -				folio_unlock(folio);
> -				continue;
> -			}
> +		folio_lock(folio);
> +		if (!should_writeback_folio(mapping, wbc, folio)) {
> +			folio_unlock(folio);
> +			continue;
> +		}
>  
> -			trace_wbc_writepage(wbc, inode_to_bdi(mapping->host));
> -
> -			error = writepage(folio, wbc, data);
> -			nr = folio_nr_pages(folio);
> -			if (unlikely(error)) {
> -				/*
> -				 * Handle errors according to the type of
> -				 * writeback. There's no need to continue for
> -				 * background writeback. Just push done_index
> -				 * past this page so media errors won't choke
> -				 * writeout for the entire file. For integrity
> -				 * writeback, we must process the entire dirty
> -				 * set regardless of errors because the fs may
> -				 * still have state to clear for each page. In
> -				 * that case we continue processing and return
> -				 * the first error.
> -				 */
> -				if (error == AOP_WRITEPAGE_ACTIVATE) {
> -					folio_unlock(folio);
> -					error = 0;
> -				} else if (wbc->sync_mode != WB_SYNC_ALL) {
> -					wbc->err = error;
> -					wbc->done_index = folio->index + nr;
> -					return writeback_finish(mapping,
> -							wbc, true);
> -				}
> -				if (!wbc->err)
> -					wbc->err = error;
> -			}
> +		trace_wbc_writepage(wbc, inode_to_bdi(mapping->host));
>  
> +		error = writepage(folio, wbc, data);
> +		nr = folio_nr_pages(folio);
> +		if (unlikely(error)) {
>  			/*
> -			 * We stop writing back only if we are not doing
> -			 * integrity sync. In case of integrity sync we have to
> -			 * keep going until we have written all the pages
> -			 * we tagged for writeback prior to entering this loop.
> +			 * Handle errors according to the type of
> +			 * writeback. There's no need to continue for
> +			 * background writeback. Just push done_index
> +			 * past this page so media errors won't choke
> +			 * writeout for the entire file. For integrity
> +			 * writeback, we must process the entire dirty
> +			 * set regardless of errors because the fs may
> +			 * still have state to clear for each page. In
> +			 * that case we continue processing and return
> +			 * the first error.
>  			 */
> -			wbc->nr_to_write -= nr;
> -			if (wbc->nr_to_write <= 0 &&
> -			    wbc->sync_mode == WB_SYNC_NONE)
> +			if (error == AOP_WRITEPAGE_ACTIVATE) {
> +				folio_unlock(folio);
> +				error = 0;
> +			} else if (wbc->sync_mode != WB_SYNC_ALL) {
> +				wbc->err = error;
> +				wbc->done_index = folio->index + nr;
>  				return writeback_finish(mapping, wbc, true);
> +			}
> +			if (!wbc->err)
> +				wbc->err = error;
>  		}
> +
> +		/*
> +		 * We stop writing back only if we are not doing
> +		 * integrity sync. In case of integrity sync we have to
> +		 * keep going until we have written all the pages
> +		 * we tagged for writeback prior to entering this loop.
> +		 */
> +		wbc->nr_to_write -= nr;
> +		if (wbc->nr_to_write <= 0 &&
> +		    wbc->sync_mode == WB_SYNC_NONE)
> +			return writeback_finish(mapping, wbc, true);
>  	}
>  
>  	return writeback_finish(mapping, wbc, false);
> -- 
> 2.39.2
>
diff mbox series

Patch

diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 5a3df8665ff4f9..2087d16115710e 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2460,6 +2460,7 @@  int write_cache_pages(struct address_space *mapping,
 		      void *data)
 {
 	int error;
+	int i = 0;
 
 	if (wbc->range_cyclic) {
 		wbc->index = mapping->writeback_index; /* prev offset */
@@ -2477,67 +2478,66 @@  int write_cache_pages(struct address_space *mapping,
 	folio_batch_init(&wbc->fbatch);
 	wbc->err = 0;
 
-	while (wbc->index <= wbc->end) {
-		int i;
-
-		writeback_get_batch(mapping, wbc);
+	for (;;) {
+		struct folio *folio;
+		unsigned long nr;
 
+		if (i == wbc->fbatch.nr) {
+			writeback_get_batch(mapping, wbc);
+			i = 0;
+		}
 		if (wbc->fbatch.nr == 0)
 			break;
 
-		for (i = 0; i < wbc->fbatch.nr; i++) {
-			struct folio *folio = wbc->fbatch.folios[i];
-			unsigned long nr;
+		folio = wbc->fbatch.folios[i++];
 
-			wbc->done_index = folio->index;
+		wbc->done_index = folio->index;
 
-			folio_lock(folio);
-			if (!should_writeback_folio(mapping, wbc, folio)) {
-				folio_unlock(folio);
-				continue;
-			}
+		folio_lock(folio);
+		if (!should_writeback_folio(mapping, wbc, folio)) {
+			folio_unlock(folio);
+			continue;
+		}
 
-			trace_wbc_writepage(wbc, inode_to_bdi(mapping->host));
-
-			error = writepage(folio, wbc, data);
-			nr = folio_nr_pages(folio);
-			if (unlikely(error)) {
-				/*
-				 * Handle errors according to the type of
-				 * writeback. There's no need to continue for
-				 * background writeback. Just push done_index
-				 * past this page so media errors won't choke
-				 * writeout for the entire file. For integrity
-				 * writeback, we must process the entire dirty
-				 * set regardless of errors because the fs may
-				 * still have state to clear for each page. In
-				 * that case we continue processing and return
-				 * the first error.
-				 */
-				if (error == AOP_WRITEPAGE_ACTIVATE) {
-					folio_unlock(folio);
-					error = 0;
-				} else if (wbc->sync_mode != WB_SYNC_ALL) {
-					wbc->err = error;
-					wbc->done_index = folio->index + nr;
-					return writeback_finish(mapping,
-							wbc, true);
-				}
-				if (!wbc->err)
-					wbc->err = error;
-			}
+		trace_wbc_writepage(wbc, inode_to_bdi(mapping->host));
 
+		error = writepage(folio, wbc, data);
+		nr = folio_nr_pages(folio);
+		if (unlikely(error)) {
 			/*
-			 * We stop writing back only if we are not doing
-			 * integrity sync. In case of integrity sync we have to
-			 * keep going until we have written all the pages
-			 * we tagged for writeback prior to entering this loop.
+			 * Handle errors according to the type of
+			 * writeback. There's no need to continue for
+			 * background writeback. Just push done_index
+			 * past this page so media errors won't choke
+			 * writeout for the entire file. For integrity
+			 * writeback, we must process the entire dirty
+			 * set regardless of errors because the fs may
+			 * still have state to clear for each page. In
+			 * that case we continue processing and return
+			 * the first error.
 			 */
-			wbc->nr_to_write -= nr;
-			if (wbc->nr_to_write <= 0 &&
-			    wbc->sync_mode == WB_SYNC_NONE)
+			if (error == AOP_WRITEPAGE_ACTIVATE) {
+				folio_unlock(folio);
+				error = 0;
+			} else if (wbc->sync_mode != WB_SYNC_ALL) {
+				wbc->err = error;
+				wbc->done_index = folio->index + nr;
 				return writeback_finish(mapping, wbc, true);
+			}
+			if (!wbc->err)
+				wbc->err = error;
 		}
+
+		/*
+		 * We stop writing back only if we are not doing
+		 * integrity sync. In case of integrity sync we have to
+		 * keep going until we have written all the pages
+		 * we tagged for writeback prior to entering this loop.
+		 */
+		wbc->nr_to_write -= nr;
+		if (wbc->nr_to_write <= 0 &&
+		    wbc->sync_mode == WB_SYNC_NONE)
+			return writeback_finish(mapping, wbc, true);
 	}
 
 	return writeback_finish(mapping, wbc, false);