Message ID | 20231214132544.376574-4-hch@lst.de (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | [01/11] writeback: Factor out writeback_finish() | expand |
On Thu 14-12-23 14:25:36, Christoph Hellwig wrote: > From: "Matthew Wilcox (Oracle)" <willy@infradead.org> > > Reduce write_cache_pages() by about 30 lines; much of it is commentary, > but it all bundles nicely into an obvious function. > > Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> > Signed-off-by: Christoph Hellwig <hch@lst.de> I like this! Feel free to add: Reviewed-by: Jan Kara <jack@suse.cz> Honza > --- > mm/page-writeback.c | 59 ++++++++++++++++++++++++--------------------- > 1 file changed, 32 insertions(+), 27 deletions(-) > > diff --git a/mm/page-writeback.c b/mm/page-writeback.c > index 5d33e7b468e2cc..5a3df8665ff4f9 100644 > --- a/mm/page-writeback.c > +++ b/mm/page-writeback.c > @@ -2394,6 +2394,36 @@ static void writeback_get_batch(struct address_space *mapping, > &wbc->fbatch); > } > > +static bool should_writeback_folio(struct address_space *mapping, > + struct writeback_control *wbc, struct folio *folio) > +{ > + /* > + * Folio truncated or invalidated. We can freely skip it then, > + * even for data integrity operations: the folio has disappeared > + * concurrently, so there could be no real expectation of this > + * data integrity operation even if there is now a new, dirty > + * folio at the same pagecache index. > + */ > + if (unlikely(folio->mapping != mapping)) > + return false; > + > + /* Did somebody write it for us? */ > + if (!folio_test_dirty(folio)) > + return false; > + > + if (folio_test_writeback(folio)) { > + if (wbc->sync_mode == WB_SYNC_NONE) > + return false; > + folio_wait_writeback(folio); > + } > + > + BUG_ON(folio_test_writeback(folio)); > + if (!folio_clear_dirty_for_io(folio)) > + return false; > + > + return true; > +} > + > /** > * write_cache_pages - walk the list of dirty pages of the given address space and write all of them. > * @mapping: address space structure to write > @@ -2462,38 +2492,13 @@ int write_cache_pages(struct address_space *mapping, > wbc->done_index = folio->index; > > folio_lock(folio); > - > - /* > - * Page truncated or invalidated. We can freely skip it > - * then, even for data integrity operations: the page > - * has disappeared concurrently, so there could be no > - * real expectation of this data integrity operation > - * even if there is now a new, dirty page at the same > - * pagecache address. > - */ > - if (unlikely(folio->mapping != mapping)) { > -continue_unlock: > + if (!should_writeback_folio(mapping, wbc, folio)) { > folio_unlock(folio); > continue; > } > > - if (!folio_test_dirty(folio)) { > - /* someone wrote it for us */ > - goto continue_unlock; > - } > - > - if (folio_test_writeback(folio)) { > - if (wbc->sync_mode != WB_SYNC_NONE) > - folio_wait_writeback(folio); > - else > - goto continue_unlock; > - } > - > - BUG_ON(folio_test_writeback(folio)); > - if (!folio_clear_dirty_for_io(folio)) > - goto continue_unlock; > - > trace_wbc_writepage(wbc, inode_to_bdi(mapping->host)); > + > error = writepage(folio, wbc, data); > nr = folio_nr_pages(folio); > if (unlikely(error)) { > -- > 2.39.2 >
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 5d33e7b468e2cc..5a3df8665ff4f9 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -2394,6 +2394,36 @@ static void writeback_get_batch(struct address_space *mapping, &wbc->fbatch); } +static bool should_writeback_folio(struct address_space *mapping, + struct writeback_control *wbc, struct folio *folio) +{ + /* + * Folio truncated or invalidated. We can freely skip it then, + * even for data integrity operations: the folio has disappeared + * concurrently, so there could be no real expectation of this + * data integrity operation even if there is now a new, dirty + * folio at the same pagecache index. + */ + if (unlikely(folio->mapping != mapping)) + return false; + + /* Did somebody write it for us? */ + if (!folio_test_dirty(folio)) + return false; + + if (folio_test_writeback(folio)) { + if (wbc->sync_mode == WB_SYNC_NONE) + return false; + folio_wait_writeback(folio); + } + + BUG_ON(folio_test_writeback(folio)); + if (!folio_clear_dirty_for_io(folio)) + return false; + + return true; +} + /** * write_cache_pages - walk the list of dirty pages of the given address space and write all of them. * @mapping: address space structure to write @@ -2462,38 +2492,13 @@ int write_cache_pages(struct address_space *mapping, wbc->done_index = folio->index; folio_lock(folio); - - /* - * Page truncated or invalidated. We can freely skip it - * then, even for data integrity operations: the page - * has disappeared concurrently, so there could be no - * real expectation of this data integrity operation - * even if there is now a new, dirty page at the same - * pagecache address. - */ - if (unlikely(folio->mapping != mapping)) { -continue_unlock: + if (!should_writeback_folio(mapping, wbc, folio)) { folio_unlock(folio); continue; } - if (!folio_test_dirty(folio)) { - /* someone wrote it for us */ - goto continue_unlock; - } - - if (folio_test_writeback(folio)) { - if (wbc->sync_mode != WB_SYNC_NONE) - folio_wait_writeback(folio); - else - goto continue_unlock; - } - - BUG_ON(folio_test_writeback(folio)); - if (!folio_clear_dirty_for_io(folio)) - goto continue_unlock; - trace_wbc_writepage(wbc, inode_to_bdi(mapping->host)); + error = writepage(folio, wbc, data); nr = folio_nr_pages(folio); if (unlikely(error)) {