@@ -2564,38 +2564,37 @@ EXPORT_SYMBOL(block_commit_write);
int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
get_block_t get_block)
{
- struct page *page = vmf->page;
+ struct folio *folio = page_folio(vmf->page);
struct inode *inode = file_inode(vma->vm_file);
unsigned long end;
loff_t size;
int ret;
- lock_page(page);
+ folio_lock(folio);
size = i_size_read(inode);
- if ((page->mapping != inode->i_mapping) ||
- (page_offset(page) > size)) {
+ if ((folio->mapping != inode->i_mapping) ||
+ (folio_pos(folio) >= size)) {
/* We overload EFAULT to mean page got truncated */
ret = -EFAULT;
goto out_unlock;
}
- /* page is wholly or partially inside EOF */
- if (((page->index + 1) << PAGE_SHIFT) > size)
- end = size & ~PAGE_MASK;
- else
- end = PAGE_SIZE;
+ end = folio_size(folio);
+ /* folio is wholly or partially inside EOF */
+ if (folio_pos(folio) + end > size)
+ end = size - folio_pos(folio);
- ret = __block_write_begin(page, 0, end, get_block);
+ ret = __block_write_begin_int(folio, 0, end, get_block, NULL);
if (!ret)
- ret = block_commit_write(page, 0, end);
+ ret = block_commit_write(&folio->page, 0, end);
if (unlikely(ret < 0))
goto out_unlock;
- set_page_dirty(page);
- wait_for_stable_page(page);
+ folio_mark_dirty(folio);
+ folio_wait_stable(folio);
return 0;
out_unlock:
- unlock_page(page);
+ folio_unlock(folio);
return ret;
}
EXPORT_SYMBOL(block_page_mkwrite);
If any page in a folio is dirtied, dirty the entire folio. Removes a number of hidden calls to compound_head() and references to page->mapping and page->index. Fixes a pre-existing bug where we could mark a folio as dirty if the file is truncated to a multiple of the page size just as we take the page fault. I don't believe this bug has any bad effect, it's just inefficient. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> --- fs/buffer.c | 27 +++++++++++++-------------- 1 file changed, 13 insertions(+), 14 deletions(-)