diff mbox series

[RFC,7/7] fs: xfs: Enable buffered atomic writes

Message ID 20240422143923.3927601-8-john.g.garry@oracle.com (mailing list archive)
State New, archived
Headers show
Series buffered block atomic writes | expand

Commit Message

John Garry April 22, 2024, 2:39 p.m. UTC
Enable support for buffered atomic writes, in addition to already
supported direct IO atomic writes.

The folio mapping order min and max is set to this same size for an inode
with FS_XFLAG_ATOMICWRITES set. That size is the extent alignment size.

Atomic writes support depends on forcealign. For forcealign, extent sizes
need to be a power-of-2 and naturally aligned, and this matches folios
nicely.

Signed-off-by: John Garry <john.g.garry@oracle.com>
---
 fs/xfs/libxfs/xfs_inode_buf.c |  8 ++++++++
 fs/xfs/xfs_file.c             | 12 ++++++++++--
 fs/xfs/xfs_ioctl.c            |  3 +++
 3 files changed, 21 insertions(+), 2 deletions(-)
diff mbox series

Patch

diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c
index abaef1137b97..38e058756b1e 100644
--- a/fs/xfs/libxfs/xfs_inode_buf.c
+++ b/fs/xfs/libxfs/xfs_inode_buf.c
@@ -181,6 +181,7 @@  xfs_inode_from_disk(
 	struct inode		*inode = VFS_I(ip);
 	int			error;
 	xfs_failaddr_t		fa;
+	struct xfs_mount	*mp = ip->i_mount;
 
 	ASSERT(ip->i_cowfp == NULL);
 
@@ -261,6 +262,13 @@  xfs_inode_from_disk(
 	}
 	if (xfs_is_reflink_inode(ip))
 		xfs_ifork_init_cow(ip);
+
+	if (xfs_inode_atomicwrites(ip)) {
+		unsigned int folio_order = ffs(XFS_B_TO_FSB(mp, ip->i_extsize)) - 1;
+
+		mapping_set_folio_orders(VFS_I(ip)->i_mapping, folio_order, folio_order);
+	}
+
 	return 0;
 
 out_destroy_data_fork:
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 2fbefd60d753..d35869b5e4ce 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -782,6 +782,16 @@  xfs_file_buffered_write(
 	ssize_t			ret;
 	bool			cleared_space = false;
 	unsigned int		iolock;
+	struct xfs_mount	*mp = ip->i_mount;
+
+	if (iocb->ki_flags & IOCB_ATOMIC) {
+		unsigned int extsz_bytes = XFS_FSB_TO_B(mp, ip->i_extsize);
+
+		if (!generic_atomic_write_valid_size(iocb->ki_pos, from,
+			extsz_bytes, extsz_bytes)) {
+			return -EINVAL;
+		}
+	}
 
 write_retry:
 	iolock = XFS_IOLOCK_EXCL;
@@ -1241,8 +1251,6 @@  static bool xfs_file_open_can_atomicwrite(
 	struct xfs_inode	*ip = XFS_I(inode);
 	struct xfs_buftarg	*target = xfs_inode_buftarg(ip);
 
-	if (!(file->f_flags & O_DIRECT))
-		return false;
 
 	if (!xfs_inode_atomicwrites(ip))
 		return false;
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index d115f2601921..d6b146c999f6 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -1169,10 +1169,13 @@  xfs_ioctl_setattr_xflags(
 	}
 
 	if (atomic_writes) {
+		unsigned int folio_order = ffs(XFS_B_TO_FSB(mp, fa->fsx_extsize)) - 1;
+
 		if (!xfs_has_atomicwrites(mp))
 			return -EINVAL;
 		if (!(fa->fsx_xflags & FS_XFLAG_FORCEALIGN))
 			return -EINVAL;
+		mapping_set_folio_orders(VFS_I(ip)->i_mapping, folio_order, folio_order);
 	}
 
 	ip->i_diflags = xfs_flags2diflags(ip, fa->fsx_xflags);