@@ -966,15 +966,30 @@ static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos, loff_t length
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
- bool unstuff, alloc_required;
+ bool unstuff = false, alloc_required;
int ret;
ret = gfs2_write_lock(inode);
if (ret)
return ret;
- unstuff = gfs2_is_stuffed(ip) &&
- pos + length > gfs2_max_stuffed_size(ip);
+ if (gfs2_is_stuffed(ip)) {
+ unstuff = pos + length > gfs2_max_stuffed_size(ip);
+
+ if (!unstuff) {
+ iomap->page = grab_cache_page_write_begin(
+ inode->i_mapping, 0, flags);
+ ret = -ENOMEM;
+ if (!iomap->page)
+ goto out;
+
+ if (!PageUptodate(iomap->page)) {
+ ret = stuffed_readpage(ip, iomap->page);
+ if (ret)
+ goto out;
+ }
+ }
+ }
ret = gfs2_iomap_get(inode, pos, length, flags, iomap, &mp);
if (ret)
@@ -1044,6 +1059,12 @@ static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos, loff_t length
gfs2_quota_unlock(ip);
out_release:
release_metapath(&mp);
+out:
+ if (iomap->page) {
+ unlock_page(iomap->page);
+ put_page(iomap->page);
+ iomap->page = NULL;
+ }
gfs2_write_unlock(inode);
return ret;
}
@@ -1073,54 +1094,10 @@ static int gfs2_iomap_begin(struct inode *inode, loff_t pos, loff_t length,
return ret;
}
-static int
-gfs2_iomap_write_begin(struct inode *inode, loff_t pos, unsigned len,
- unsigned flags, struct page **pagep, struct iomap *iomap)
-{
- struct gfs2_inode *ip = GFS2_I(inode);
- struct page *page;
- int ret;
-
- if (gfs2_is_stuffed(ip)) {
- BUG_ON(pos + len > gfs2_max_stuffed_size(ip));
-
- page = grab_cache_page_write_begin(inode->i_mapping, 0, flags);
- if (!page)
- return -ENOMEM;
-
- if (!PageUptodate(page)) {
- ret = stuffed_readpage(ip, page);
- if (ret) {
- unlock_page(page);
- put_page(page);
- return ret;
- }
- }
- *pagep = page;
- return 0;
- }
-
- return iomap_write_begin(inode, pos, len, flags, pagep, iomap);
-}
-
static int gfs2_iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
unsigned copied, struct page *page)
{
struct gfs2_inode *ip = GFS2_I(inode);
- struct buffer_head *dibh;
- int ret;
-
- if (gfs2_is_stuffed(ip)) {
- ret = gfs2_meta_inode_buffer(ip, &dibh);
- if (ret) {
- unlock_page(page);
- put_page(page);
- return ret;
- }
- ret = gfs2_stuffed_write_end(inode, dibh, pos, copied, page);
- brelse(dibh);
- return ret;
- }
if (gfs2_is_jdata(ip))
gfs2_page_add_databufs(ip, page, offset_in_page(pos), len);
@@ -1139,6 +1116,23 @@ static int gfs2_iomap_end(struct inode *inode, loff_t pos, loff_t length,
if ((flags & (IOMAP_WRITE | IOMAP_DIRECT)) != IOMAP_WRITE)
return 0;
+ if (iomap->page) {
+ struct buffer_head *dibh;
+
+ ret = gfs2_meta_inode_buffer(ip, &dibh);
+ if (ret) {
+ unlock_page(iomap->page);
+ put_page(iomap->page);
+ iomap->page = NULL;
+ goto out;
+ }
+ ret = gfs2_stuffed_write_end(inode, dibh, pos, written,
+ iomap->page);
+ iomap->page = NULL;
+ brelse(dibh);
+ goto out;
+ }
+
gfs2_ordered_add_inode(ip);
if (tr->tr_num_buf_new)
@@ -1153,6 +1147,7 @@ static int gfs2_iomap_end(struct inode *inode, loff_t pos, loff_t length,
}
}
+out:
if (inode == sdp->sd_rindex) {
adjust_fs_space(inode);
sdp->sd_rindex_uptodate = 0;
@@ -1180,7 +1175,7 @@ static int gfs2_iomap_end(struct inode *inode, loff_t pos, loff_t length,
const struct iomap_ops gfs2_iomap_ops = {
.iomap_begin = gfs2_iomap_begin,
- .write_begin = gfs2_iomap_write_begin,
+ .write_begin = iomap_write_begin,
.write_end = gfs2_iomap_write_end,
.iomap_end = gfs2_iomap_end,
};
@@ -196,10 +196,13 @@ iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
break;
}
- status = args->ops->write_begin(inode, pos, bytes, flags, &page,
- iomap);
- if (unlikely(status))
- break;
+ page = iomap->page;
+ if (!iomap->page) {
+ status = args->ops->write_begin(inode, pos, bytes, flags, &page,
+ iomap);
+ if (unlikely(status))
+ break;
+ }
if (mapping_writably_mapped(inode->i_mapping))
flush_dcache_page(page);
@@ -208,10 +211,12 @@ iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
flush_dcache_page(page);
- status = args->ops->write_end(inode, pos, bytes, copied, page);
- if (unlikely(status < 0))
- break;
- copied = status;
+ if (!iomap->page) {
+ status = args->ops->write_end(inode, pos, bytes, copied, page);
+ if (unlikely(status < 0))
+ break;
+ copied = status;
+ }
cond_resched();
@@ -47,6 +47,7 @@ struct iomap {
u64 length; /* length of mapping, bytes */
u16 type; /* type of mapping */
u16 flags; /* flags for mapping */
+ struct page *page;
struct block_device *bdev; /* block device for I/O */
struct dax_device *dax_dev; /* dax_dev for dax operations */
};
Proof of concept: move the gfs2 stuffed file handling from the iomap_write_{begin,end} handlers to the iomap_{begin,end} handlers. With this, the page written to is locked before faulting in the page to read from, so when both refer to the same page, we end up with a deadlock as demonstrated by xfstest generic/248. Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com> --- fs/gfs2/bmap.c | 91 ++++++++++++++++++++----------------------- fs/iomap.c | 21 ++++++---- include/linux/iomap.h | 1 + 3 files changed, 57 insertions(+), 56 deletions(-)