@@ -22,6 +22,7 @@
#include <linux/backing-dev.h>
#include <linux/uio.h>
#include <trace/events/writeback.h>
+#include <linux/sched/signal.h>
#include "gfs2.h"
#include "incore.h"
@@ -847,6 +848,99 @@ static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh,
return copied;
}
+ssize_t gfs2_stuffed_write(struct kiocb *iocb, struct iov_iter *from)
+{
+ struct inode *inode = iocb->ki_filp->f_mapping->host;
+ struct gfs2_inode *ip = GFS2_I(inode);
+ struct gfs2_sbd *sdp = GFS2_SB(inode);
+ loff_t pos = iocb->ki_pos;
+ struct page *page = NULL;
+ ssize_t written = 0, ret;
+
+ BUG_ON(!gfs2_glock_is_locked_by_me(ip->i_gl));
+ BUG_ON(!gfs2_is_stuffed(ip));
+
+ ret = gfs2_trans_begin(sdp, RES_DINODE, 0);
+ if (ret)
+ return ret;
+
+ do {
+ struct buffer_head *dibh;
+ unsigned long offset;
+ unsigned long bytes;
+ size_t copied;
+
+ offset = pos & (PAGE_SIZE - 1);
+ bytes = min_t(unsigned long, PAGE_SIZE - offset,
+ iov_iter_count(from));
+again:
+ /*
+ * Bring in the user page that we will copy from _first_.
+ * Otherwise there's a nasty deadlock on copying from the
+ * same page as we're writing to, without it being marked
+ * up-to-date.
+ *
+ * Not only is this an optimisation, but it is also required
+ * to check that the address is actually valid, when atomic
+ * usercopies are used, below.
+ */
+ if (unlikely(iov_iter_fault_in_readable(from, bytes))) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ if (fatal_signal_pending(current)) {
+ ret = -EINTR;
+ goto out;
+ }
+
+ page = grab_cache_page_write_begin(inode->i_mapping, pos >> PAGE_SHIFT, AOP_FLAG_NOFS);
+ if (!page)
+ return -ENOMEM;
+
+ if (!PageUptodate(page)) {
+ ret = stuffed_readpage(ip, page);
+ if (ret)
+ goto out;
+ }
+
+ if (mapping_writably_mapped(inode->i_mapping))
+ flush_dcache_page(page);
+
+ copied = iov_iter_copy_from_user_atomic(page, from, pos, bytes);
+
+ flush_dcache_page(page);
+
+ ret = gfs2_meta_inode_buffer(ip, &dibh);
+ if (ret)
+ goto out;
+ ret = gfs2_stuffed_write_end(inode, dibh, pos, copied, page);
+ brelse(dibh);
+ if (ret)
+ goto out;
+
+ unlock_page(page);
+ put_page(page);
+ page = NULL;
+
+ iov_iter_advance(from, copied);
+ if (unlikely(copied == 0)) {
+ bytes = iov_iter_single_seg_count(from);
+ goto again;
+ }
+ pos += copied;
+ written += copied;
+ } while (iov_iter_count(from));
+
+out:
+ if (page) {
+ unlock_page(page);
+ put_page(page);
+ }
+ gfs2_trans_end(sdp);
+ return written ? written : ret;
+}
+
/**
* gfs2_write_end
* @file: The file to write to
@@ -11,6 +11,7 @@
#include "incore.h"
+extern ssize_t gfs2_stuffed_write(struct kiocb *iocb, struct iov_iter *from);
extern void adjust_fs_space(struct inode *inode);
extern void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
unsigned int from, unsigned int len);
@@ -827,7 +827,7 @@ static int gfs2_write_lock(struct inode *inode)
return error;
}
-static void gfs2_write_unlock(struct inode *inode)
+void gfs2_write_unlock(struct inode *inode)
{
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
@@ -856,14 +856,14 @@ static int gfs2_iomap_write_begin(struct inode *inode, loff_t pos, loff_t length
if (gfs2_is_stuffed(ip)) {
if (pos + length <= gfs2_max_stuffed_size(ip)) {
- ret = -ENOTBLK;
- goto out_unlock;
+ /* Keep the inode locked! */
+ return -ENOTBLK;
}
}
ret = gfs2_iomap_get(inode, pos, length, flags, iomap, &mp);
if (ret)
- goto out_release;
+ goto out_unlock;
alloc_required = iomap->type != IOMAP_MAPPED;
if (alloc_required || gfs2_is_jdata(ip))
@@ -873,7 +873,7 @@ static int gfs2_iomap_write_begin(struct inode *inode, loff_t pos, loff_t length
struct gfs2_alloc_parms ap = { .target = data_blocks + ind_blocks };
ret = gfs2_quota_lock_check(ip, &ap);
if (ret)
- goto out_release;
+ goto out_unlock;
ret = gfs2_inplace_reserve(ip, &ap);
if (ret)
@@ -924,9 +924,8 @@ static int gfs2_iomap_write_begin(struct inode *inode, loff_t pos, loff_t length
out_qunlock:
gfs2_quota_unlock(ip);
}
-out_release:
- release_metapath(&mp);
out_unlock:
+ release_metapath(&mp);
gfs2_write_unlock(inode);
return ret;
}
@@ -62,5 +62,6 @@ extern int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset,
extern int gfs2_map_journal_extents(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd);
extern void gfs2_free_journal_extents(struct gfs2_jdesc *jd);
extern int __gfs2_punch_hole(struct file *file, loff_t offset, loff_t length);
+extern void gfs2_write_unlock(struct inode *inode);
#endif /* __BMAP_DOT_H__ */
@@ -31,6 +31,7 @@
#include "gfs2.h"
#include "incore.h"
#include "bmap.h"
+#include "aops.h"
#include "dir.h"
#include "glock.h"
#include "glops.h"
@@ -710,6 +711,10 @@ static ssize_t gfs2_file_buffered_write(struct kiocb *iocb, struct iov_iter *fro
current->backing_dev_info = inode_to_bdi(inode);
ret = iomap_file_buffered_write(iocb, from, &gfs2_iomap_ops);
+ if (ret == -ENOTBLK) {
+ ret = gfs2_stuffed_write(iocb, from);
+ gfs2_write_unlock(inode);
+ }
current->backing_dev_info = NULL;
@@ -761,10 +766,7 @@ static ssize_t gfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
if (iocb->ki_flags & IOCB_DIRECT)
return generic_file_write_iter(iocb, from);
- ret = gfs2_file_buffered_write(iocb, from);
- if (ret == -ENOTBLK)
- ret = generic_file_write_iter(iocb, from);
- return ret;
+ return gfs2_file_buffered_write(iocb, from);
}
static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
Instead of falling back to generic_file_write_iter when writing to a stuffed file that stays stuffed, implement that case separately. We eventually want to get rid of the remaining users of gfs2_write_begin + gfs2_write_end so that those functions can eventually be removed, and generic_file_write_iter uses that interface. Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com> --- fs/gfs2/aops.c | 94 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ fs/gfs2/aops.h | 1 + fs/gfs2/bmap.c | 13 ++++---- fs/gfs2/bmap.h | 1 + fs/gfs2/file.c | 10 ++++--- 5 files changed, 108 insertions(+), 11 deletions(-)