@@ -1092,7 +1092,7 @@ iomap_finish_page_writeback(struct inode *inode, struct page *page,
* ioend after this.
*/
static void
-iomap_finish_ioend(struct iomap_ioend *ioend, int error)
+iomap_finish_ioend(struct iomap_ioend *ioend, int error, bool atomic)
{
struct inode *inode = ioend->io_inode;
struct bio *bio = &ioend->io_inline_bio;
@@ -1115,8 +1115,11 @@ iomap_finish_ioend(struct iomap_ioend *ioend, int error)
next = bio->bi_private;
/* walk each page on bio, ending page IO on them */
- bio_for_each_segment_all(bv, bio, iter_all)
+ bio_for_each_segment_all(bv, bio, iter_all) {
iomap_finish_page_writeback(inode, bv->bv_page, error);
+ if (!atomic)
+ cond_resched();
+ }
bio_put(bio);
}
/* The ioend has been freed by bio_put() */
@@ -1129,17 +1132,17 @@ iomap_finish_ioend(struct iomap_ioend *ioend, int error)
}
void
-iomap_finish_ioends(struct iomap_ioend *ioend, int error)
+iomap_finish_ioends(struct iomap_ioend *ioend, int error, bool atomic)
{
struct list_head tmp;
list_replace_init(&ioend->io_list, &tmp);
- iomap_finish_ioend(ioend, error);
+ iomap_finish_ioend(ioend, error, atomic);
while (!list_empty(&tmp)) {
ioend = list_first_entry(&tmp, struct iomap_ioend, io_list);
list_del_init(&ioend->io_list);
- iomap_finish_ioend(ioend, error);
+ iomap_finish_ioend(ioend, error, atomic);
}
}
EXPORT_SYMBOL_GPL(iomap_finish_ioends);
@@ -1208,7 +1211,7 @@ static void iomap_writepage_end_bio(struct bio *bio)
{
struct iomap_ioend *ioend = bio->bi_private;
- iomap_finish_ioend(ioend, blk_status_to_errno(bio->bi_status));
+ iomap_finish_ioend(ioend, blk_status_to_errno(bio->bi_status), true);
}
/*
@@ -188,7 +188,7 @@ xfs_end_ioend(
done:
if (ioend->io_private)
error = xfs_setfilesize_ioend(ioend, error);
- iomap_finish_ioends(ioend, error);
+ iomap_finish_ioends(ioend, error, false);
memalloc_nofs_restore(nofs_flag);
}
@@ -229,7 +229,7 @@ struct iomap_writepage_ctx {
const struct iomap_writeback_ops *ops;
};
-void iomap_finish_ioends(struct iomap_ioend *ioend, int error);
+void iomap_finish_ioends(struct iomap_ioend *ioend, int error, bool atomic);
void iomap_ioend_try_merge(struct iomap_ioend *ioend,
struct list_head *more_ioends,
void (*merge_private)(struct iomap_ioend *ioend,
The iomap ioend mechanism has the ability to construct very large, contiguous bios and/or bio chains. This has been reported to lead to soft lockup warnings in bio completion due to the amount of page processing that occurs. Update the ioend completion path with a parameter to indicate atomic context and insert a cond_resched() call to avoid soft lockups in either scenario. Signed-off-by: Brian Foster <bfoster@redhat.com> --- fs/iomap/buffered-io.c | 15 +++++++++------ fs/xfs/xfs_aops.c | 2 +- include/linux/iomap.h | 2 +- 3 files changed, 11 insertions(+), 8 deletions(-)