@@ -631,7 +631,7 @@ static void __set_page_dirty(struct page *page, struct address_space *mapping,
unsigned long flags;
spin_lock_irqsave(&mapping->tree_lock, flags);
- if (page->mapping) { /* Race with truncate? */
+ if (page_mapping(page)) { /* Race with truncate? */
WARN_ON_ONCE(warn && !PageUptodate(page));
account_page_dirtied(page, mapping);
radix_tree_tag_set(&mapping->page_tree,
@@ -1237,7 +1237,7 @@ static int ext4_write_begin(struct file *file, struct address_space *mapping,
}
lock_page(page);
- if (page->mapping != mapping) {
+ if (page_mapping(page) != mapping) {
/* The page got truncated from under us */
unlock_page(page);
put_page(page);
@@ -2975,7 +2975,7 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
}
lock_page(page);
- if (page->mapping != mapping) {
+ if (page_mapping(page) != mapping) {
/* The page got truncated from under us */
unlock_page(page);
put_page(page);
@@ -399,7 +399,7 @@ static int __filemap_fdatawait_range(struct address_space *mapping,
struct page *page = pvec.pages[i];
/* until radix tree lookup accepts end_index */
- if (page->index > end)
+ if (page_to_pgoff(page) > end)
continue;
page = compound_head(page);
@@ -1364,7 +1364,7 @@ struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
}
/* Has the page been truncated? */
- if (unlikely(page->mapping != mapping)) {
+ if (unlikely(page_mapping(page) != mapping)) {
unlock_page(page);
put_page(page);
goto repeat;
@@ -1641,7 +1641,8 @@ unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
* otherwise we can get both false positives and false
* negatives, which is just confusing to the caller.
*/
- if (page->mapping == NULL || page_to_pgoff(page) != index) {
+ if (page_mapping(page) == NULL ||
+ page_to_pgoff(page) != index) {
put_page(page);
break;
}
@@ -1929,7 +1930,7 @@ static ssize_t do_generic_file_read(struct file *filp, loff_t *ppos,
if (!trylock_page(page))
goto page_not_up_to_date;
/* Did it get truncated before we got the lock? */
- if (!page->mapping)
+ if (!page_mapping(page))
goto page_not_up_to_date_locked;
if (!mapping->a_ops->is_partially_uptodate(page,
offset, iter->count))
@@ -2009,7 +2010,7 @@ static ssize_t do_generic_file_read(struct file *filp, loff_t *ppos,
page_not_up_to_date_locked:
/* Did it get truncated before we got the lock? */
- if (!page->mapping) {
+ if (!page_mapping(page)) {
unlock_page(page);
put_page(page);
continue;
@@ -2045,7 +2046,7 @@ static ssize_t do_generic_file_read(struct file *filp, loff_t *ppos,
if (unlikely(error))
goto readpage_error;
if (!PageUptodate(page)) {
- if (page->mapping == NULL) {
+ if (page_mapping(page) == NULL) {
/*
* invalidate_mapping_pages got it
*/
@@ -2344,12 +2345,12 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
}
/* Did it get truncated? */
- if (unlikely(page->mapping != mapping)) {
+ if (unlikely(page_mapping(page) != mapping)) {
unlock_page(page);
put_page(page);
goto retry_find;
}
- VM_BUG_ON_PAGE(page->index != offset, page);
+ VM_BUG_ON_PAGE(page_to_pgoff(page) != offset, page);
/*
* We have a locked page in the page cache, now we need to check
@@ -2525,7 +2526,7 @@ int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
sb_start_pagefault(inode->i_sb);
file_update_time(vma->vm_file);
lock_page(page);
- if (page->mapping != inode->i_mapping) {
+ if (page_mapping(page) != inode->i_mapping) {
unlock_page(page);
ret = VM_FAULT_NOPAGE;
goto out;
@@ -2674,7 +2675,7 @@ static struct page *do_read_cache_page(struct address_space *mapping,
lock_page(page);
/* Case c or d, restart the operation */
- if (!page->mapping) {
+ if (!page_mapping(page)) {
unlock_page(page);
put_page(page);
goto repeat;
@@ -3130,12 +3131,13 @@ EXPORT_SYMBOL(generic_file_write_iter);
*/
int try_to_release_page(struct page *page, gfp_t gfp_mask)
{
- struct address_space * const mapping = page->mapping;
+ struct address_space * const mapping = page_mapping(page);
BUG_ON(!PageLocked(page));
if (PageWriteback(page))
return 0;
+ page = compound_head(page);
if (mapping && mapping->a_ops->releasepage)
return mapping->a_ops->releasepage(page, gfp_mask);
return try_to_free_buffers(page);
@@ -2049,7 +2049,7 @@ static int do_page_mkwrite(struct vm_fault *vmf)
return ret;
if (unlikely(!(ret & VM_FAULT_LOCKED))) {
lock_page(page);
- if (!page->mapping) {
+ if (!page_mapping(page)) {
unlock_page(page);
return 0; /* retry */
}
@@ -2869,7 +2869,7 @@ EXPORT_SYMBOL(mapping_tagged);
*/
void wait_for_stable_page(struct page *page)
{
- if (bdi_cap_stable_pages_required(inode_to_bdi(page->mapping->host)))
+ if (bdi_cap_stable_pages_required(inode_to_bdi(page_mapping(page)->host)))
wait_on_page_writeback(page);
}
EXPORT_SYMBOL_GPL(wait_for_stable_page);
@@ -620,6 +620,7 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page)
{
unsigned long flags;
+ page = compound_head(page);
if (page->mapping != mapping)
return 0;
@@ -648,7 +649,7 @@ static int do_launder_page(struct address_space *mapping, struct page *page)
{
if (!PageDirty(page))
return 0;
- if (page->mapping != mapping || mapping->a_ops->launder_page == NULL)
+ if (page_mapping(page) != mapping || mapping->a_ops->launder_page == NULL)
return 0;
return mapping->a_ops->launder_page(page);
}
@@ -698,7 +699,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
lock_page(page);
WARN_ON(page_to_pgoff(page) != index);
- if (page->mapping != mapping) {
+ if (page_mapping(page) != mapping) {
unlock_page(page);
continue;
}
With huge pages in page cache we see tail pages in more code paths. This patch replaces direct access to struct page fields with macros which can handle tail pages properly. Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> --- fs/buffer.c | 2 +- fs/ext4/inode.c | 4 ++-- mm/filemap.c | 24 +++++++++++++----------- mm/memory.c | 2 +- mm/page-writeback.c | 2 +- mm/truncate.c | 5 +++-- 6 files changed, 21 insertions(+), 18 deletions(-)