Message ID | 20240213093713.1753368-7-kernel@pankajraghav.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | enable bs > ps in XFS | expand |
On Tue, Feb 13, 2024 at 10:37:05AM +0100, Pankaj Raghav (Samsung) wrote: > From: Hannes Reinecke <hare@suse.de> > > Rework the loop in page_cache_ra_unbounded() to advance with > the number of pages in a folio instead of just one page at a time. > > Signed-off-by: Hannes Reinecke <hare@suse.de> > Co-developed-by: Pankaj Raghav <p.raghav@samsung.com> > Signed-off-by: Pankaj Raghav <p.raghav@samsung.com> Acked-by: Darrick J. Wong <djwong@kernel.org> --D > --- > mm/readahead.c | 13 +++++++------ > 1 file changed, 7 insertions(+), 6 deletions(-) > > diff --git a/mm/readahead.c b/mm/readahead.c > index 5e1ec7705c78..13b62cbd3b79 100644 > --- a/mm/readahead.c > +++ b/mm/readahead.c > @@ -213,7 +213,7 @@ void page_cache_ra_unbounded(struct readahead_control *ractl, > struct address_space *mapping = ractl->mapping; > unsigned long index = readahead_index(ractl); > gfp_t gfp_mask = readahead_gfp_mask(mapping); > - unsigned long i; > + unsigned long i = 0; > > /* > * Partway through the readahead operation, we will have added > @@ -231,7 +231,7 @@ void page_cache_ra_unbounded(struct readahead_control *ractl, > /* > * Preallocate as many pages as we will need. > */ > - for (i = 0; i < nr_to_read; i++) { > + while (i < nr_to_read) { > struct folio *folio = xa_load(&mapping->i_pages, index + i); > > if (folio && !xa_is_value(folio)) { > @@ -244,8 +244,8 @@ void page_cache_ra_unbounded(struct readahead_control *ractl, > * not worth getting one just for that. > */ > read_pages(ractl); > - ractl->_index++; > - i = ractl->_index + ractl->_nr_pages - index - 1; > + ractl->_index += folio_nr_pages(folio); > + i = ractl->_index + ractl->_nr_pages - index; > continue; > } > > @@ -257,13 +257,14 @@ void page_cache_ra_unbounded(struct readahead_control *ractl, > folio_put(folio); > read_pages(ractl); > ractl->_index++; > - i = ractl->_index + ractl->_nr_pages - index - 1; > + i = ractl->_index + ractl->_nr_pages - index; > continue; > } > if (i == nr_to_read - lookahead_size) > folio_set_readahead(folio); > ractl->_workingset |= folio_test_workingset(folio); > - ractl->_nr_pages++; > + ractl->_nr_pages += folio_nr_pages(folio); > + i += folio_nr_pages(folio); > } > > /* > -- > 2.43.0 > >
diff --git a/mm/readahead.c b/mm/readahead.c index 5e1ec7705c78..13b62cbd3b79 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -213,7 +213,7 @@ void page_cache_ra_unbounded(struct readahead_control *ractl, struct address_space *mapping = ractl->mapping; unsigned long index = readahead_index(ractl); gfp_t gfp_mask = readahead_gfp_mask(mapping); - unsigned long i; + unsigned long i = 0; /* * Partway through the readahead operation, we will have added @@ -231,7 +231,7 @@ void page_cache_ra_unbounded(struct readahead_control *ractl, /* * Preallocate as many pages as we will need. */ - for (i = 0; i < nr_to_read; i++) { + while (i < nr_to_read) { struct folio *folio = xa_load(&mapping->i_pages, index + i); if (folio && !xa_is_value(folio)) { @@ -244,8 +244,8 @@ void page_cache_ra_unbounded(struct readahead_control *ractl, * not worth getting one just for that. */ read_pages(ractl); - ractl->_index++; - i = ractl->_index + ractl->_nr_pages - index - 1; + ractl->_index += folio_nr_pages(folio); + i = ractl->_index + ractl->_nr_pages - index; continue; } @@ -257,13 +257,14 @@ void page_cache_ra_unbounded(struct readahead_control *ractl, folio_put(folio); read_pages(ractl); ractl->_index++; - i = ractl->_index + ractl->_nr_pages - index - 1; + i = ractl->_index + ractl->_nr_pages - index; continue; } if (i == nr_to_read - lookahead_size) folio_set_readahead(folio); ractl->_workingset |= folio_test_workingset(folio); - ractl->_nr_pages++; + ractl->_nr_pages += folio_nr_pages(folio); + i += folio_nr_pages(folio); } /*