@@ -134,11 +134,9 @@ iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop,
*lenp = plen;
}
-static void
-iomap_iop_set_range_uptodate(struct page *page, unsigned off, unsigned len)
+static void iomap_iop_set_range_uptodate(struct page *page,
+ struct iomap_page *iop, unsigned off, unsigned len)
{
- struct folio *folio = page_folio(page);
- struct iomap_page *iop = to_iomap_page(folio);
struct inode *inode = page->mapping->host;
unsigned first = off >> inode->i_blkbits;
unsigned last = (off + len - 1) >> inode->i_blkbits;
@@ -151,14 +149,14 @@ iomap_iop_set_range_uptodate(struct page *page, unsigned off, unsigned len)
spin_unlock_irqrestore(&iop->uptodate_lock, flags);
}
-static void
-iomap_set_range_uptodate(struct page *page, unsigned off, unsigned len)
+static void iomap_set_range_uptodate(struct page *page,
+ struct iomap_page *iop, unsigned off, unsigned len)
{
if (PageError(page))
return;
- if (page_has_private(page))
- iomap_iop_set_range_uptodate(page, off, len);
+ if (iop)
+ iomap_iop_set_range_uptodate(page, iop, off, len);
else
SetPageUptodate(page);
}
@@ -174,7 +172,8 @@ iomap_read_page_end_io(struct bio_vec *bvec, int error)
ClearPageUptodate(page);
SetPageError(page);
} else {
- iomap_set_range_uptodate(page, bvec->bv_offset, bvec->bv_len);
+ iomap_set_range_uptodate(page, iop, bvec->bv_offset,
+ bvec->bv_len);
}
if (!iop || atomic_sub_and_test(bvec->bv_len, &iop->read_bytes_pending))
@@ -254,7 +253,7 @@ iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
if (iomap_block_needs_zeroing(inode, iomap, pos)) {
zero_user(page, poff, plen);
- iomap_set_range_uptodate(page, poff, plen);
+ iomap_set_range_uptodate(page, iop, poff, plen);
goto done;
}
@@ -583,7 +582,7 @@ __iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, int flags,
if (status)
return status;
}
- iomap_set_range_uptodate(page, poff, plen);
+ iomap_set_range_uptodate(page, iop, poff, plen);
} while ((block_start += plen) < block_end);
return 0;
@@ -645,6 +644,8 @@ iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
size_t copied, struct page *page)
{
+ struct folio *folio = page_folio(page);
+ struct iomap_page *iop = to_iomap_page(folio);
flush_dcache_page(page);
/*
@@ -660,7 +661,7 @@ static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
*/
if (unlikely(copied < len && !PageUptodate(page)))
return 0;
- iomap_set_range_uptodate(page, offset_in_page(pos), len);
+ iomap_set_range_uptodate(page, iop, offset_in_page(pos), len);
__set_page_dirty_nobuffers(page);
return copied;
}
All but one caller already has the iomap_page, and we can avoid getting it again. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> --- fs/iomap/buffered-io.c | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-)