@@ -959,36 +959,54 @@ static int __gfs2_iomap_get(struct inode *inode, loff_t pos, loff_t length,
goto out;
}
-static int gfs2_iomap_page_prepare(struct inode *inode, loff_t pos,
- unsigned len)
+static struct folio *
+gfs2_iomap_folio_prepare(struct inode *inode, unsigned fgp,
+ loff_t pos, unsigned len)
{
unsigned int blockmask = i_blocksize(inode) - 1;
struct gfs2_sbd *sdp = GFS2_SB(inode);
unsigned int blocks;
+ struct folio *folio;
+ int ret;
blocks = ((pos & blockmask) + len + blockmask) >> inode->i_blkbits;
- return gfs2_trans_begin(sdp, RES_DINODE + blocks, 0);
+ ret = gfs2_trans_begin(sdp, RES_DINODE + blocks, 0);
+ if (ret)
+ return ERR_PTR(ret);
+
+ folio = __filemap_get_folio(inode->i_mapping, pos >> PAGE_SHIFT, fgp,
+ mapping_gfp_mask(inode->i_mapping));
+ if (!folio)
+ gfs2_trans_end(sdp);
+
+ return folio;
}
-static void gfs2_iomap_page_done(struct inode *inode, loff_t pos,
- unsigned copied, struct page *page)
+static void
+gfs2_iomap_folio_done(struct inode *inode, struct folio *folio,
+ loff_t pos, unsigned copied)
{
struct gfs2_trans *tr = current->journal_info;
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
- if (page && !gfs2_is_stuffed(ip))
- gfs2_page_add_databufs(ip, page, offset_in_page(pos), copied);
+ folio_unlock(folio);
+
+ if (!gfs2_is_stuffed(ip))
+ gfs2_page_add_databufs(ip, &folio->page, offset_in_page(pos),
+ copied);
if (tr->tr_num_buf_new)
__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
gfs2_trans_end(sdp);
+
+ folio_put(folio);
}
-static const struct iomap_page_ops gfs2_iomap_page_ops = {
- .page_prepare = gfs2_iomap_page_prepare,
- .page_done = gfs2_iomap_page_done,
+static const struct iomap_folio_ops gfs2_iomap_folio_ops = {
+ .folio_prepare = gfs2_iomap_folio_prepare,
+ .folio_done = gfs2_iomap_folio_done,
};
static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos,
@@ -1064,7 +1082,7 @@ static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos,
}
if (gfs2_is_stuffed(ip) || gfs2_is_jdata(ip))
- iomap->page_ops = &gfs2_iomap_page_ops;
+ iomap->folio_ops = &gfs2_iomap_folio_ops;
return 0;
out_trans_end:
@@ -587,7 +587,7 @@ static int iomap_write_begin_inline(const struct iomap_iter *iter,
static int iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
size_t len, struct folio **foliop)
{
- const struct iomap_page_ops *page_ops = iter->iomap.page_ops;
+ const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops;
const struct iomap *srcmap = iomap_iter_srcmap(iter);
struct folio *folio;
unsigned fgp = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE | FGP_NOFS;
@@ -606,17 +606,18 @@ static int iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
if (!mapping_large_folio_support(iter->inode->i_mapping))
len = min_t(size_t, len, PAGE_SIZE - offset_in_page(pos));
- if (page_ops && page_ops->page_prepare) {
- status = page_ops->page_prepare(iter->inode, pos, len);
- if (status)
- return status;
+ if (folio_ops && folio_ops->folio_prepare) {
+ folio = folio_ops->folio_prepare(iter->inode, fgp, pos, len);
+ } else {
+ folio = __filemap_get_folio(iter->inode->i_mapping,
+ pos >> PAGE_SHIFT, fgp,
+ mapping_gfp_mask(iter->inode->i_mapping));
}
-
- folio = __filemap_get_folio(iter->inode->i_mapping, pos >> PAGE_SHIFT,
- fgp, mapping_gfp_mask(iter->inode->i_mapping));
- if (!folio) {
- status = (iter->flags & IOMAP_NOWAIT) ? -EAGAIN : -ENOMEM;
- goto out_no_page;
+ if (IS_ERR_OR_NULL(folio)) {
+ status = PTR_ERR(folio);
+ if (folio == NULL)
+ status = (iter->flags & IOMAP_NOWAIT) ? -EAGAIN : -ENOMEM;
+ return status;
}
if (pos + len > folio_pos(folio) + folio_size(folio))
len = folio_pos(folio) + folio_size(folio) - pos;
@@ -635,13 +636,13 @@ static int iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
return 0;
out_unlock:
- folio_unlock(folio);
- folio_put(folio);
+ if (folio_ops && folio_ops->folio_done) {
+ folio_ops->folio_done(iter->inode, folio, pos, 0);
+ } else {
+ folio_unlock(folio);
+ folio_put(folio);
+ }
iomap_write_failed(iter->inode, pos, len);
-
-out_no_page:
- if (page_ops && page_ops->page_done)
- page_ops->page_done(iter->inode, pos, 0, NULL);
return status;
}
@@ -691,7 +692,7 @@ static size_t iomap_write_end_inline(const struct iomap_iter *iter,
static size_t iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len,
size_t copied, struct folio *folio)
{
- const struct iomap_page_ops *page_ops = iter->iomap.page_ops;
+ const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops;
const struct iomap *srcmap = iomap_iter_srcmap(iter);
loff_t old_size = iter->inode->i_size;
size_t ret;
@@ -715,10 +716,13 @@ static size_t iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len,
iter->iomap.flags |= IOMAP_F_SIZE_CHANGED;
folio_may_straddle_isize(iter->inode, folio, old_size, pos);
}
- folio_unlock(folio);
- if (page_ops && page_ops->page_done)
- page_ops->page_done(iter->inode, pos, ret, &folio->page);
- folio_put(folio);
+ if (folio_ops && folio_ops->folio_done) {
+ folio_ops->folio_done(iter->inode, folio, pos, ret);
+ } else {
+ folio_unlock(folio);
+ folio_put(folio);
+ }
+
if (ret < len)
iomap_write_failed(iter->inode, pos + ret, len - ret);
return ret;
@@ -76,7 +76,7 @@ struct vm_fault;
*/
#define IOMAP_NULL_ADDR -1ULL /* addr is not valid */
-struct iomap_page_ops;
+struct iomap_folio_ops;
struct iomap {
u64 addr; /* disk offset of mapping, bytes */
@@ -88,7 +88,7 @@ struct iomap {
struct dax_device *dax_dev; /* dax_dev for dax operations */
void *inline_data;
void *private; /* filesystem private */
- const struct iomap_page_ops *page_ops;
+ const struct iomap_folio_ops *folio_ops;
};
static inline sector_t iomap_sector(const struct iomap *iomap, loff_t pos)
@@ -115,19 +115,19 @@ static inline bool iomap_inline_data_valid(const struct iomap *iomap)
}
/*
- * When a filesystem sets page_ops in an iomap mapping it returns, page_prepare
- * and page_done will be called for each page written to. This only applies to
- * buffered writes as unbuffered writes will not typically have pages
+ * When a filesystem sets folio_ops in an iomap mapping it returns, folio_prepare
+ * and folio_done will be called for each folio written to. This only applies to
+ * buffered writes as unbuffered writes will not typically have folios
* associated with them.
*
- * When page_prepare succeeds, page_done will always be called to do any
- * cleanup work necessary. In that page_done call, @page will be NULL if the
- * associated page could not be obtained.
+ * When folio_prepare succeeds, folio_done will always be called to do any
+ * cleanup work necessary.
*/
-struct iomap_page_ops {
- int (*page_prepare)(struct inode *inode, loff_t pos, unsigned len);
- void (*page_done)(struct inode *inode, loff_t pos, unsigned copied,
- struct page *page);
+struct iomap_folio_ops {
+ struct folio *(*folio_prepare)(struct inode *inode, unsigned fgp,
+ loff_t pos, unsigned len);
+ void (*folio_done)(struct inode *inode, struct folio *folio,
+ loff_t pos, unsigned copied);
};
/*
Rename the iomap page_ops into folio_ops, and rename the operations accordingly. Move looking up the folio into ->folio_prepare(), and unlocking and putting the folio into ->folio_done(). We'll need the added flexibility in gfs2. Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com> --- fs/gfs2/bmap.c | 40 +++++++++++++++++++++++++---------- fs/iomap/buffered-io.c | 48 +++++++++++++++++++++++------------------- include/linux/iomap.h | 24 ++++++++++----------- 3 files changed, 67 insertions(+), 45 deletions(-)