@@ -1401,6 +1401,10 @@ iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
.len = len,
.flags = IOMAP_ZERO,
};
+ struct address_space *mapping = inode->i_mapping;
+ unsigned int blocksize = i_blocksize(inode);
+ unsigned int off = pos & (blocksize - 1);
+ loff_t plen = min_t(loff_t, len, blocksize - off);
int ret;
bool range_dirty;
@@ -1410,12 +1414,28 @@ iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
* mapping converts on writeback completion and so must be zeroed.
*
* The simplest way to deal with this across a range is to flush
- * pagecache and process the updated mappings. To avoid an unconditional
- * flush, check pagecache state and only flush if dirty and the fs
- * returns a mapping that might convert on writeback.
+ * pagecache and process the updated mappings. To avoid excessive
+ * flushing on partial eof zeroing, special case it to zero the
+ * unaligned start portion if already dirty in pagecache.
+ */
+ if (off &&
+ filemap_range_needs_writeback(mapping, pos, pos + plen - 1)) {
+ iter.len = plen;
+ while ((ret = iomap_iter(&iter, ops)) > 0)
+ iter.processed = iomap_zero_iter(&iter, did_zero);
+
+ iter.len = len - (iter.pos - pos);
+ if (ret || !iter.len)
+ return ret;
+ }
+
+ /*
+ * To avoid an unconditional flush, check pagecache state and only flush
+ * if dirty and the fs returns a mapping that might convert on
+ * writeback.
*/
range_dirty = filemap_range_needs_writeback(inode->i_mapping,
- pos, pos + len - 1);
+ iter.pos, iter.pos + iter.len - 1);
while ((ret = iomap_iter(&iter, ops)) > 0) {
const struct iomap *srcmap = iomap_iter_srcmap(&iter);