@@ -59,18 +59,18 @@ iomap_page_create(struct inode *inode, struct folio *folio)
return iop;
}
-static void
-iomap_page_release(struct page *page)
+static void iomap_page_release(struct folio *folio)
{
- struct iomap_page *iop = detach_page_private(page);
- unsigned int nr_blocks = i_blocks_per_page(page->mapping->host, page);
+ struct iomap_page *iop = folio_detach_private(folio);
+ struct inode *inode = folio->mapping->host;
+ unsigned int nr_blocks = i_blocks_per_folio(inode, folio);
if (!iop)
return;
WARN_ON_ONCE(atomic_read(&iop->read_bytes_pending));
WARN_ON_ONCE(atomic_read(&iop->write_bytes_pending));
WARN_ON_ONCE(bitmap_full(iop->uptodate, nr_blocks) !=
- PageUptodate(page));
+ folio_test_uptodate(folio));
kfree(iop);
}
@@ -451,6 +451,8 @@ EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate);
int
iomap_releasepage(struct page *page, gfp_t gfp_mask)
{
+ struct folio *folio = page_folio(page);
+
trace_iomap_releasepage(page->mapping->host, page_offset(page),
PAGE_SIZE);
@@ -461,7 +463,7 @@ iomap_releasepage(struct page *page, gfp_t gfp_mask)
*/
if (PageDirty(page) || PageWriteback(page))
return 0;
- iomap_page_release(page);
+ iomap_page_release(folio);
return 1;
}
EXPORT_SYMBOL_GPL(iomap_releasepage);
@@ -469,6 +471,8 @@ EXPORT_SYMBOL_GPL(iomap_releasepage);
void
iomap_invalidatepage(struct page *page, unsigned int offset, unsigned int len)
{
+ struct folio *folio = page_folio(page);
+
trace_iomap_invalidatepage(page->mapping->host, offset, len);
/*
@@ -478,7 +482,7 @@ iomap_invalidatepage(struct page *page, unsigned int offset, unsigned int len)
if (offset == 0 && len == PAGE_SIZE) {
WARN_ON_ONCE(PageWriteback(page));
cancel_dirty_page(page);
- iomap_page_release(page);
+ iomap_page_release(folio);
}
}
EXPORT_SYMBOL_GPL(iomap_invalidatepage);