@@ -998,6 +998,7 @@ xfs_buffered_write_iomap_begin(
struct iomap *iomap,
struct iomap *srcmap)
{
+ struct iomap_iter *iter = container_of(iomap, struct iomap_iter, iomap);
struct xfs_inode *ip = XFS_I(inode);
struct xfs_mount *mp = ip->i_mount;
xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
@@ -1065,12 +1066,21 @@ xfs_buffered_write_iomap_begin(
*/
if (flags & IOMAP_ZERO) {
xfs_fileoff_t eof_fsb = XFS_B_TO_FSB(mp, XFS_ISIZE(ip));
+ u64 end;
if (isnullstartblock(imap.br_startblock) &&
offset_fsb >= eof_fsb)
goto convert_delay;
if (offset_fsb < eof_fsb && end_fsb > eof_fsb)
end_fsb = eof_fsb;
+ if (imap.br_state == XFS_EXT_UNWRITTEN &&
+ offset_fsb < eof_fsb) {
+ xfs_trim_extent(&imap, offset_fsb, end_fsb - offset_fsb);
+ end = iomap_fill_dirty_folios(iter,
+ XFS_FSB_TO_B(mp, imap.br_startoff),
+ XFS_FSB_TO_B(mp, imap.br_blockcount));
+ end_fsb = min_t(xfs_fileoff_t, end_fsb, XFS_B_TO_FSB(mp, end));
+ }
xfs_trim_extent(&imap, offset_fsb, end_fsb - offset_fsb);
}
Use the iomap folio batch mechanism to identify which folios to zero on zero range of unwritten mappings. Trim the resulting mapping if the batch is filled (unlikely) and set the HAS_FOLIOS flag to inform iomap that pagecache has been checked for dirty folios. Signed-off-by: Brian Foster <bfoster@redhat.com> --- fs/xfs/xfs_iomap.c | 10 ++++++++++ 1 file changed, 10 insertions(+)