@@ -729,24 +729,14 @@ unsigned long shmem_swap_usage(struct vm_area_struct *vma)
void shmem_unlock_mapping(struct address_space *mapping)
{
struct pagevec pvec;
- pgoff_t indices[PAGEVEC_SIZE];
pgoff_t index = 0;
pagevec_init(&pvec, 0);
/*
* Minor point, but we might as well stop if someone else SHM_LOCKs it.
*/
- while (!mapping_unevictable(mapping)) {
- /*
- * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it
- * has finished, if it hits a row of PAGEVEC_SIZE swap entries.
- */
- pvec.nr = find_get_entries(mapping, index,
- PAGEVEC_SIZE, pvec.pages, indices);
- if (!pvec.nr)
- break;
- index = indices[pvec.nr - 1] + 1;
- pagevec_remove_exceptionals(&pvec);
+ while (!mapping_unevictable(mapping) &&
+ pagevec_lookup(&pvec, mapping, &index)) {
check_move_unevictable_pages(pvec.pages, pvec.nr);
pagevec_release(&pvec);
cond_resched();
The comment about find_get_pages() returning if it finds a row of swap entries seems to be stale. Use pagevec_lookup() in shmem_unlock_mapping() to simplify the code. CC: Hugh Dickins <hughd@google.com> Signed-off-by: Jan Kara <jack@suse.cz> --- mm/shmem.c | 14 ++------------ 1 file changed, 2 insertions(+), 12 deletions(-)