@@ -43,7 +43,7 @@ static inline void set_page_count(struct page *page, int v)
extern int __do_page_cache_readahead(struct address_space *mapping,
struct file *filp, pgoff_t offset, unsigned long nr_to_read,
- unsigned long lookahead_size);
+ unsigned long lookahead_size, int report_present);
/*
* Submit IO for the read-ahead request in file_ra_state.
@@ -52,7 +52,7 @@ static inline unsigned long ra_submit(struct file_ra_state *ra,
struct address_space *mapping, struct file *filp)
{
return __do_page_cache_readahead(mapping, filp,
- ra->start, ra->size, ra->async_size);
+ ra->start, ra->size, ra->async_size, 0);
}
/*
@@ -151,12 +151,13 @@ out:
*/
int __do_page_cache_readahead(struct address_space *mapping, struct file *filp,
pgoff_t offset, unsigned long nr_to_read,
- unsigned long lookahead_size)
+ unsigned long lookahead_size, int report_present)
{
struct inode *inode = mapping->host;
struct page *page;
unsigned long end_index; /* The last page we want to read */
LIST_HEAD(page_pool);
+ int present = 0;
int page_idx;
int ret = 0;
loff_t isize = i_size_read(inode);
@@ -178,8 +179,10 @@ int __do_page_cache_readahead(struct address_space *mapping, struct file *filp,
rcu_read_lock();
page = radix_tree_lookup(&mapping->page_tree, page_offset);
rcu_read_unlock();
- if (page && !radix_tree_exceptional_entry(page))
+ if (page && !radix_tree_exceptional_entry(page)) {
+ present++;
continue;
+ }
page = page_cache_alloc_readahead(mapping);
if (!page)
@@ -199,6 +202,8 @@ int __do_page_cache_readahead(struct address_space *mapping, struct file *filp,
if (ret)
read_pages(mapping, filp, &page_pool, ret);
BUG_ON(!list_empty(&page_pool));
+ if (report_present)
+ ret += present;
out:
return ret;
}
@@ -222,7 +227,7 @@ int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
if (this_chunk > nr_to_read)
this_chunk = nr_to_read;
err = __do_page_cache_readahead(mapping, filp,
- offset, this_chunk, 0);
+ offset, this_chunk, 0, 0);
if (err < 0)
return err;
@@ -441,7 +446,7 @@ ondemand_readahead(struct address_space *mapping,
* standalone, small random read
* Read as is, and do not pollute the readahead state.
*/
- return __do_page_cache_readahead(mapping, filp, offset, req_size, 0);
+ return __do_page_cache_readahead(mapping, filp, offset, req_size, 0, 0);
initial_readahead:
ra->start = offset;