@@ -606,12 +606,23 @@ xfs_get_atomic_write_attr(
unsigned int *unit_min,
unsigned int *unit_max)
{
+ struct xfs_buftarg *target = xfs_inode_buftarg(ip);
+ struct xfs_mount *mp = ip->i_mount;
+
if (!xfs_inode_can_atomicwrite(ip)) {
*unit_min = *unit_max = 0;
return;
}
- *unit_min = *unit_max = ip->i_mount->m_sb.sb_blocksize;
+ *unit_min = ip->i_mount->m_sb.sb_blocksize;
+
+ if (XFS_IS_REALTIME_INODE(ip)) {
+ /* For now, set limit at 1x block */
+ *unit_max = ip->i_mount->m_sb.sb_blocksize;
+ } else {
+ *unit_max = min_t(unsigned int, XFS_FSB_TO_B(mp, mp->awu_max),
+ target->bt_bdev_awu_max);
+ }
}
static void
@@ -651,6 +651,32 @@ xfs_agbtree_compute_maxlevels(
levels = max(levels, mp->m_rmap_maxlevels);
mp->m_agbtree_maxlevels = max(levels, mp->m_refc_maxlevels);
}
+static inline void
+xfs_compute_awu_max(
+ struct xfs_mount *mp)
+{
+ xfs_agblock_t agsize = mp->m_sb.sb_agblocks;
+ xfs_agblock_t awu_max;
+
+ if (!xfs_has_reflink(mp)) {
+ mp->awu_max = 1;
+ return;
+ }
+
+ /*
+ * Find highest power-of-2 evenly divisible into agsize and which
+ * also fits into an unsigned int field.
+ */
+ awu_max = 1;
+ while (1) {
+ if (agsize % (awu_max * 2))
+ break;
+ if (XFS_FSB_TO_B(mp, awu_max * 2) > UINT_MAX)
+ break;
+ awu_max *= 2;
+ }
+ mp->awu_max = awu_max;
+}
/* Compute maximum possible height for realtime btree types for this fs. */
static inline void
@@ -736,6 +762,8 @@ xfs_mountfs(
xfs_agbtree_compute_maxlevels(mp);
xfs_rtbtree_compute_maxlevels(mp);
+ xfs_compute_awu_max(mp);
+
/*
* Check if sb_agblocks is aligned at stripe boundary. If sb_agblocks
* is NOT aligned turn off m_dalign since allocator alignment is within
@@ -198,6 +198,7 @@ typedef struct xfs_mount {
bool m_fail_unmount;
bool m_finobt_nores; /* no per-AG finobt resv. */
bool m_update_sb; /* sb needs update in mount */
+ xfs_extlen_t awu_max; /* data device max atomic write */
/*
* Bitsets of per-fs metadata that have been checked and/or are sick.