@@ -196,9 +196,9 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
* Preallocate as many pages as we will need.
*/
for (i = 0; i < nr_to_read; i++) {
- struct page *page = xa_load(&mapping->i_pages, index + i);
+ struct folio *folio = xa_load(&mapping->i_pages, index + i);
- if (page && !xa_is_value(page)) {
+ if (folio && !xa_is_value(folio)) {
/*
* Page already present? Kick off the current batch
* of contiguous pages before continuing with the
@@ -212,21 +212,21 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
continue;
}
- page = __page_cache_alloc(gfp_mask);
- if (!page)
+ folio = filemap_alloc_folio(gfp_mask, 0);
+ if (!folio)
break;
if (mapping->a_ops->readpages) {
- page->index = index + i;
- list_add(&page->lru, &page_pool);
- } else if (add_to_page_cache_lru(page, mapping, index + i,
+ folio->index = index + i;
+ list_add(&folio->lru, &page_pool);
+ } else if (filemap_add_folio(mapping, folio, index + i,
gfp_mask) < 0) {
- put_page(page);
+ folio_put(folio);
read_pages(ractl, &page_pool, true);
i = ractl->_index + ractl->_nr_pages - index - 1;
continue;
}
if (i == nr_to_read - lookahead_size)
- SetPageReadahead(page);
+ folio_set_readahead(folio);
ractl->_nr_pages++;
}
This saves 99 bytes of kernel text. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> --- mm/readahead.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-)