@@ -356,10 +356,12 @@ static struct page *ext4_read_merkle_tree_page(struct inode *inode,
page = find_get_page_flags(inode->i_mapping, index, FGP_ACCESSED);
if (!page || !PageUptodate(page)) {
- if (page)
+ if (page) {
put_page(page);
- else if (num_ra_pages > 1)
- page_cache_readahead_unbounded(&rac, num_ra_pages, 0);
+ } else if (num_ra_pages > 1) {
+ rac._nr_pages = num_ra_pages;
+ page_cache_readahead_unbounded(&rac, 0);
+ }
page = read_mapping_page(inode->i_mapping, index, NULL);
}
return page;
@@ -235,10 +235,12 @@ static struct page *f2fs_read_merkle_tree_page(struct inode *inode,
page = find_get_page_flags(inode->i_mapping, index, FGP_ACCESSED);
if (!page || !PageUptodate(page)) {
- if (page)
+ if (page) {
put_page(page);
- else if (num_ra_pages > 1)
- page_cache_readahead_unbounded(&rac, num_ra_pages, 0);
+ } else if (num_ra_pages > 1) {
+ rac._nr_pages = num_ra_pages;
+ page_cache_readahead_unbounded(&rac, 0);
+ }
page = read_mapping_page(inode->i_mapping, index, NULL);
}
return page;
@@ -772,8 +772,7 @@ void delete_from_page_cache_batch(struct address_space *mapping,
void page_cache_sync_readahead(struct readahead_control *, struct file_ra_state *);
void page_cache_async_readahead(struct readahead_control *, struct file_ra_state *,
struct page *);
-void page_cache_readahead_unbounded(struct readahead_control *,
- unsigned long nr_to_read, unsigned long lookahead_count);
+void page_cache_readahead_unbounded(struct readahead_control *, unsigned long);
/*
* Like add_to_page_cache_locked, but used to add newly allocated pages:
@@ -50,8 +50,7 @@ void unmap_page_range(struct mmu_gather *tlb,
struct zap_details *details);
void force_page_cache_readahead(struct readahead_control *);
-void __do_page_cache_readahead(struct readahead_control *,
- unsigned long nr_to_read, unsigned long lookahead_size);
+void __do_page_cache_readahead(struct readahead_control *, unsigned long);
/*
* Submit IO for the read-ahead request in file_ra_state.
@@ -60,7 +59,8 @@ static inline void ra_submit(struct file_ra_state *ra,
struct address_space *mapping, struct file *file)
{
DEFINE_READAHEAD(rac, file, mapping, ra->start);
- __do_page_cache_readahead(&rac, ra->size, ra->async_size);
+ rac._nr_pages = ra->size;
+ __do_page_cache_readahead(&rac, ra->async_size);
}
/**
@@ -172,10 +172,11 @@ static void read_pages(struct readahead_control *rac, struct list_head *pages,
* May sleep, but will not reenter filesystem to reclaim memory.
*/
void page_cache_readahead_unbounded(struct readahead_control *rac,
- unsigned long nr_to_read, unsigned long lookahead_size)
+ unsigned long lookahead_size)
{
struct address_space *mapping = rac->mapping;
unsigned long index = readahead_index(rac);
+ unsigned long nr_to_read = readahead_count(rac);
LIST_HEAD(page_pool);
gfp_t gfp_mask = readahead_gfp_mask(mapping);
unsigned long i;
@@ -195,6 +196,7 @@ void page_cache_readahead_unbounded(struct readahead_control *rac,
/*
* Preallocate as many pages as we will need.
*/
+ rac->_nr_pages = 0;
for (i = 0; i < nr_to_read; i++) {
struct page *page = xa_load(&mapping->i_pages, index + i);
@@ -247,7 +249,7 @@ EXPORT_SYMBOL_GPL(page_cache_readahead_unbounded);
* We really don't want to intermingle reads and writes like that.
*/
void __do_page_cache_readahead(struct readahead_control *rac,
- unsigned long nr_to_read, unsigned long lookahead_size)
+ unsigned long lookahead_size)
{
struct inode *inode = rac->mapping->host;
unsigned long index = readahead_index(rac);
@@ -261,10 +263,10 @@ void __do_page_cache_readahead(struct readahead_control *rac,
if (index > end_index)
return;
/* Don't read past the page containing the last byte of the file */
- if (nr_to_read > end_index - index)
- nr_to_read = end_index - index + 1;
+ if (readahead_count(rac) > end_index - index)
+ rac->_nr_pages = end_index - index + 1;
- page_cache_readahead_unbounded(rac, nr_to_read, lookahead_size);
+ page_cache_readahead_unbounded(rac, lookahead_size);
}
/*
@@ -297,8 +299,7 @@ void force_page_cache_readahead(struct readahead_control *rac)
rac->_index = index;
rac->_nr_pages = this_chunk;
- // Do I need to modify rac->_batch_count?
- __do_page_cache_readahead(rac, this_chunk, 0);
+ __do_page_cache_readahead(rac, 0);
index += this_chunk;
nr_to_read -= this_chunk;
@@ -601,7 +602,7 @@ static void ondemand_readahead(struct readahead_control *rac,
* standalone, small random read
* Read as is, and do not pollute the readahead state.
*/
- __do_page_cache_readahead(rac, req_size, 0);
+ __do_page_cache_readahead(rac, 0);
return;
initial_readahead:
@@ -630,7 +631,8 @@ static void ondemand_readahead(struct readahead_control *rac,
rac->_index = ra->start;
if (page && page_cache_readahead_order(rac, ra, thp_order(page)))
return;
- __do_page_cache_readahead(rac, ra->size, ra->async_size);
+ rac->_nr_pages = ra->size;
+ __do_page_cache_readahead(rac, ra->async_size);
}
/**
Make __do_page_cache_readahead() use rac->_nr_pages rather than passing in nr_to_read argument. Signed-off-by: David Howells <dhowells@redhat.com> --- fs/ext4/verity.c | 8 +++++--- fs/f2fs/verity.c | 8 +++++--- include/linux/pagemap.h | 3 +-- mm/internal.h | 6 +++--- mm/readahead.c | 20 +++++++++++--------- 5 files changed, 25 insertions(+), 20 deletions(-)