@@ -474,7 +474,7 @@ static int __gfs2_readpage(void *file, struct page *page)
if (!gfs2_is_jdata(ip) ||
(i_blocksize(inode) == PAGE_SIZE && !page_has_buffers(page))) {
- error = iomap_readpage(page, &gfs2_iomap_ops);
+ error = iomap_readpage(page, &gfs2_iomap_ops, NULL);
} else if (gfs2_is_stuffed(ip)) {
error = stuffed_readpage(ip, page);
unlock_page(page);
@@ -563,7 +563,7 @@ static void gfs2_readahead(struct readahead_control *rac)
else if (gfs2_is_jdata(ip))
mpage_readahead(rac, gfs2_block_map);
else
- iomap_readahead(rac, &gfs2_iomap_ops);
+ iomap_readahead(rac, &gfs2_iomap_ops, NULL);
}
/**
@@ -201,10 +201,11 @@ iomap_read_end_io(struct bio *bio)
}
struct iomap_readpage_ctx {
- struct page *cur_page;
- bool cur_page_in_bio;
- struct bio *bio;
- struct readahead_control *rac;
+ struct page *cur_page;
+ bool cur_page_in_bio;
+ struct bio *bio;
+ struct readahead_control *rac;
+ const struct iomap_readpage_ops *ops;
};
static void
@@ -282,19 +283,31 @@ iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
gfp_t orig_gfp = gfp;
unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE);
- if (ctx->bio)
- submit_bio(ctx->bio);
+ if (ctx->bio) {
+ if (ctx->ops && ctx->ops->submit_io)
+ ctx->ops->submit_io(inode, ctx->bio);
+ else
+ submit_bio(ctx->bio);
+ }
if (ctx->rac) /* same as readahead_gfp_mask */
gfp |= __GFP_NORETRY | __GFP_NOWARN;
- ctx->bio = bio_alloc(gfp, bio_max_segs(nr_vecs));
+ if (ctx->ops->alloc_bio)
+ ctx->bio = ctx->ops->alloc_bio(gfp,
+ bio_max_segs(nr_vecs));
+ else
+ ctx->bio = bio_alloc(gfp, bio_max_segs(nr_vecs));
/*
* If the bio_alloc fails, try it again for a single page to
* avoid having to deal with partial page reads. This emulates
* what do_mpage_readpage does.
*/
- if (!ctx->bio)
- ctx->bio = bio_alloc(orig_gfp, 1);
+ if (!ctx->bio) {
+ if (ctx->ops->alloc_bio)
+ ctx->bio = ctx->ops->alloc_bio(orig_gfp, 1);
+ else
+ ctx->bio = bio_alloc(orig_gfp, 1);
+ }
ctx->bio->bi_opf = REQ_OP_READ;
if (ctx->rac)
ctx->bio->bi_opf |= REQ_RAHEAD;
@@ -315,9 +328,13 @@ iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
}
int
-iomap_readpage(struct page *page, const struct iomap_ops *ops)
+iomap_readpage(struct page *page, const struct iomap_ops *ops,
+ const struct iomap_readpage_ops *readpage_ops)
{
- struct iomap_readpage_ctx ctx = { .cur_page = page };
+ struct iomap_readpage_ctx ctx = {
+ .cur_page = page,
+ .ops = readpage_ops,
+ };
struct inode *inode = page->mapping->host;
unsigned poff;
loff_t ret;
@@ -336,7 +353,10 @@ iomap_readpage(struct page *page, const struct iomap_ops *ops)
}
if (ctx.bio) {
- submit_bio(ctx.bio);
+ if (ctx.ops->submit_io)
+ ctx.ops->submit_io(inode, ctx.bio);
+ else
+ submit_bio(ctx.bio);
WARN_ON_ONCE(!ctx.cur_page_in_bio);
} else {
WARN_ON_ONCE(ctx.cur_page_in_bio);
@@ -392,13 +412,15 @@ iomap_readahead_actor(struct inode *inode, loff_t pos, loff_t length,
* function is called with memalloc_nofs set, so allocations will not cause
* the filesystem to be reentered.
*/
-void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops)
+void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops,
+ const struct iomap_readpage_ops *readpage_ops)
{
struct inode *inode = rac->mapping->host;
loff_t pos = readahead_pos(rac);
size_t length = readahead_length(rac);
struct iomap_readpage_ctx ctx = {
.rac = rac,
+ .ops = readpage_ops,
};
trace_iomap_readahead(inode, readahead_count(rac));
@@ -414,8 +436,12 @@ void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops)
length -= ret;
}
- if (ctx.bio)
- submit_bio(ctx.bio);
+ if (ctx.bio) {
+ if (ctx.ops && ctx.ops->submit_io)
+ ctx.ops->submit_io(inode, ctx.bio);
+ else
+ submit_bio(ctx.bio);
+ }
if (ctx.cur_page) {
if (!ctx.cur_page_in_bio)
unlock_page(ctx.cur_page);
@@ -535,14 +535,14 @@ xfs_vm_readpage(
struct file *unused,
struct page *page)
{
- return iomap_readpage(page, &xfs_read_iomap_ops);
+ return iomap_readpage(page, &xfs_read_iomap_ops, NULL);
}
STATIC void
xfs_vm_readahead(
struct readahead_control *rac)
{
- iomap_readahead(rac, &xfs_read_iomap_ops);
+ iomap_readahead(rac, &xfs_read_iomap_ops, NULL);
}
static int
@@ -115,12 +115,12 @@ static const struct iomap_ops zonefs_iomap_ops = {
static int zonefs_readpage(struct file *unused, struct page *page)
{
- return iomap_readpage(page, &zonefs_iomap_ops);
+ return iomap_readpage(page, &zonefs_iomap_ops, NULL);
}
static void zonefs_readahead(struct readahead_control *rac)
{
- iomap_readahead(rac, &zonefs_iomap_ops);
+ iomap_readahead(rac, &zonefs_iomap_ops, NULL);
}
/*
@@ -157,8 +157,6 @@ loff_t iomap_apply(struct inode *inode, loff_t pos, loff_t length,
ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from,
const struct iomap_ops *ops);
-int iomap_readpage(struct page *page, const struct iomap_ops *ops);
-void iomap_readahead(struct readahead_control *, const struct iomap_ops *ops);
int iomap_set_page_dirty(struct page *page);
int iomap_is_partially_uptodate(struct page *page, unsigned long from,
unsigned long count);
@@ -188,6 +186,23 @@ loff_t iomap_seek_data(struct inode *inode, loff_t offset,
sector_t iomap_bmap(struct address_space *mapping, sector_t bno,
const struct iomap_ops *ops);
+struct iomap_readpage_ops {
+
+ /* allow the filesystem to allocate custom struct bio */
+ struct bio *(*alloc_bio)(gfp_t gfp_mask, unsigned short nr_iovecs);
+
+ /*
+ * Optional, allows the filesystem to perform a custom submission of
+ * bio, such as csum calculations or multi-device bio split
+ */
+ void (*submit_io)(struct inode *inode, struct bio *bio);
+};
+
+int iomap_readpage(struct page *page, const struct iomap_ops *ops,
+ const struct iomap_readpage_ops *readpage_ops);
+void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops,
+ const struct iomap_readpage_ops *readpage_ops);
+
/*
* Structure for writeback I/O completions.
*/