@@ -483,12 +483,12 @@ const struct address_space_operations def_blk_aops = {
#else /* CONFIG_BUFFER_HEAD */
static int blkdev_read_folio(struct file *file, struct folio *folio)
{
- return iomap_read_folio(folio, &blkdev_iomap_ops);
+ return iomap_read_folio(folio, &blkdev_iomap_ops, NULL);
}
static void blkdev_readahead(struct readahead_control *rac)
{
- iomap_readahead(rac, &blkdev_iomap_ops);
+ iomap_readahead(rac, &blkdev_iomap_ops, NULL);
}
static int blkdev_map_blocks(struct iomap_writepage_ctx *wpc,
@@ -347,12 +347,12 @@ int erofs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
*/
static int erofs_read_folio(struct file *file, struct folio *folio)
{
- return iomap_read_folio(folio, &erofs_iomap_ops);
+ return iomap_read_folio(folio, &erofs_iomap_ops, NULL);
}
static void erofs_readahead(struct readahead_control *rac)
{
- return iomap_readahead(rac, &erofs_iomap_ops);
+ return iomap_readahead(rac, &erofs_iomap_ops, NULL);
}
static sector_t erofs_bmap(struct address_space *mapping, sector_t block)
@@ -451,7 +451,7 @@ static int gfs2_read_folio(struct file *file, struct folio *folio)
if (!gfs2_is_jdata(ip) ||
(i_blocksize(inode) == PAGE_SIZE && !folio_buffers(folio))) {
- error = iomap_read_folio(folio, &gfs2_iomap_ops);
+ error = iomap_read_folio(folio, &gfs2_iomap_ops, NULL);
} else if (gfs2_is_stuffed(ip)) {
error = stuffed_read_folio(ip, folio);
} else {
@@ -526,7 +526,7 @@ static void gfs2_readahead(struct readahead_control *rac)
else if (gfs2_is_jdata(ip))
mpage_readahead(rac, gfs2_block_map);
else
- iomap_readahead(rac, &gfs2_iomap_ops);
+ iomap_readahead(rac, &gfs2_iomap_ops, NULL);
}
/**
@@ -341,6 +341,7 @@ struct iomap_readpage_ctx {
bool cur_folio_in_bio;
struct bio *bio;
struct readahead_control *rac;
+ const struct iomap_read_folio_ops *ops;
};
/**
@@ -424,8 +425,12 @@ static loff_t iomap_readpage_iter(const struct iomap_iter *iter,
gfp_t orig_gfp = gfp;
unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE);
- if (ctx->bio)
- submit_bio(ctx->bio);
+ if (ctx->bio) {
+ if (ctx->ops && ctx->ops->submit_io)
+ ctx->ops->submit_io(iter->inode, ctx->bio);
+ else
+ submit_bio(ctx->bio);
+ }
if (ctx->rac) /* same as readahead_gfp_mask */
gfp |= __GFP_NORETRY | __GFP_NOWARN;
@@ -475,7 +480,8 @@ static loff_t iomap_read_folio_iter(const struct iomap_iter *iter,
return done;
}
-int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops)
+int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops,
+ const struct iomap_read_folio_ops *read_folio_ops)
{
struct iomap_iter iter = {
.inode = folio->mapping->host,
@@ -484,6 +490,7 @@ int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops)
};
struct iomap_readpage_ctx ctx = {
.cur_folio = folio,
+ .ops = read_folio_ops,
};
int ret;
@@ -493,7 +500,10 @@ int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops)
iter.processed = iomap_read_folio_iter(&iter, &ctx);
if (ctx.bio) {
- submit_bio(ctx.bio);
+ if (ctx.ops->submit_io)
+ ctx.ops->submit_io(iter.inode, ctx.bio);
+ else
+ submit_bio(ctx.bio);
WARN_ON_ONCE(!ctx.cur_folio_in_bio);
} else {
WARN_ON_ONCE(ctx.cur_folio_in_bio);
@@ -538,6 +548,7 @@ static loff_t iomap_readahead_iter(const struct iomap_iter *iter,
* iomap_readahead - Attempt to read pages from a file.
* @rac: Describes the pages to be read.
* @ops: The operations vector for the filesystem.
+ * @read_folio_ops: Function hooks for filesystems for special bio submissions
*
* This function is for filesystems to call to implement their readahead
* address_space operation.
@@ -549,7 +560,8 @@ static loff_t iomap_readahead_iter(const struct iomap_iter *iter,
* function is called with memalloc_nofs set, so allocations will not cause
* the filesystem to be reentered.
*/
-void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops)
+void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops,
+ const struct iomap_read_folio_ops *read_folio_ops)
{
struct iomap_iter iter = {
.inode = rac->mapping->host,
@@ -558,6 +570,7 @@ void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops)
};
struct iomap_readpage_ctx ctx = {
.rac = rac,
+ .ops = read_folio_ops,
};
trace_iomap_readahead(rac->mapping->host, readahead_count(rac));
@@ -565,8 +578,12 @@ void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops)
while (iomap_iter(&iter, ops) > 0)
iter.processed = iomap_readahead_iter(&iter, &ctx);
- if (ctx.bio)
- submit_bio(ctx.bio);
+ if (ctx.bio) {
+ if (ctx.ops && ctx.ops->submit_io)
+ ctx.ops->submit_io(iter.inode, ctx.bio);
+ else
+ submit_bio(ctx.bio);
+ }
if (ctx.cur_folio) {
if (!ctx.cur_folio_in_bio)
folio_unlock(ctx.cur_folio);
@@ -517,14 +517,14 @@ xfs_vm_read_folio(
struct file *unused,
struct folio *folio)
{
- return iomap_read_folio(folio, &xfs_read_iomap_ops);
+ return iomap_read_folio(folio, &xfs_read_iomap_ops, NULL);
}
STATIC void
xfs_vm_readahead(
struct readahead_control *rac)
{
- iomap_readahead(rac, &xfs_read_iomap_ops);
+ iomap_readahead(rac, &xfs_read_iomap_ops, NULL);
}
static int
@@ -112,12 +112,12 @@ static const struct iomap_ops zonefs_write_iomap_ops = {
static int zonefs_read_folio(struct file *unused, struct folio *folio)
{
- return iomap_read_folio(folio, &zonefs_read_iomap_ops);
+ return iomap_read_folio(folio, &zonefs_read_iomap_ops, NULL);
}
static void zonefs_readahead(struct readahead_control *rac)
{
- iomap_readahead(rac, &zonefs_read_iomap_ops);
+ iomap_readahead(rac, &zonefs_read_iomap_ops, NULL);
}
/*
@@ -256,14 +256,24 @@ static inline const struct iomap *iomap_iter_srcmap(const struct iomap_iter *i)
return &i->iomap;
}
+struct iomap_read_folio_ops {
+ /*
+ * Optional, allows the filesystem to perform a custom submission of
+ * bio, such as csum calculations or multi-device bio split
+ */
+ void (*submit_io)(struct inode *inode, struct bio *bio);
+};
+
ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from,
const struct iomap_ops *ops);
int iomap_file_buffered_write_punch_delalloc(struct inode *inode,
struct iomap *iomap, loff_t pos, loff_t length, ssize_t written,
int (*punch)(struct inode *inode, loff_t pos, loff_t length));
-int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops);
-void iomap_readahead(struct readahead_control *, const struct iomap_ops *ops);
+int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops,
+ const struct iomap_read_folio_ops *);
+void iomap_readahead(struct readahead_control *, const struct iomap_ops *ops,
+ const struct iomap_read_folio_ops *);
bool iomap_is_partially_uptodate(struct folio *, size_t from, size_t count);
struct folio *iomap_get_folio(struct iomap_iter *iter, loff_t pos, size_t len);
bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags);