@@ -501,9 +501,6 @@ int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct page *locked_page
u64 start, u64 end, int *page_started,
unsigned long *nr_written, struct writeback_control *wbc);
int btrfs_writepage_cow_fixup(struct page *page);
-void btrfs_writepage_endio_finish_ordered(struct btrfs_inode *inode,
- struct page *page, u64 start,
- u64 end, bool uptodate);
int btrfs_encoded_io_compression_from_extent(struct btrfs_fs_info *fs_info,
int compress_type);
int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
@@ -473,17 +473,15 @@ void end_extent_writepage(struct page *page, int err, u64 start, u64 end)
struct btrfs_inode *inode;
const bool uptodate = (err == 0);
int ret = 0;
+ u32 len = end + 1 - start;
+ ASSERT(end + 1 - start <= U32_MAX);
ASSERT(page && page->mapping);
inode = BTRFS_I(page->mapping->host);
- btrfs_writepage_endio_finish_ordered(inode, page, start, end, uptodate);
+ btrfs_mark_ordered_io_finished(inode, page, start, len, uptodate);
if (!uptodate) {
const struct btrfs_fs_info *fs_info = inode->root->fs_info;
- u32 len;
-
- ASSERT(end + 1 - start <= U32_MAX);
- len = end + 1 - start;
btrfs_page_clear_uptodate(fs_info, page, start, len);
ret = err < 0 ? err : -EIO;
@@ -1328,6 +1326,7 @@ static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
bio_ctrl->end_io_func = end_bio_extent_writepage;
while (cur <= end) {
+ u32 len = end - cur + 1;
u64 disk_bytenr;
u64 em_end;
u64 dirty_range_start = cur;
@@ -1335,8 +1334,8 @@ static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
u32 iosize;
if (cur >= i_size) {
- btrfs_writepage_endio_finish_ordered(inode, page, cur,
- end, true);
+ btrfs_mark_ordered_io_finished(inode, page, cur, len,
+ true);
/*
* This range is beyond i_size, thus we don't need to
* bother writing back.
@@ -1345,7 +1344,7 @@ static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
* writeback the sectors with subpage dirty bits,
* causing writeback without ordered extent.
*/
- btrfs_page_clear_dirty(fs_info, page, cur, end + 1 - cur);
+ btrfs_page_clear_dirty(fs_info, page, cur, len);
break;
}
@@ -1356,7 +1355,7 @@ static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
continue;
}
- em = btrfs_get_extent(inode, NULL, 0, cur, end - cur + 1);
+ em = btrfs_get_extent(inode, NULL, 0, cur, len);
if (IS_ERR(em)) {
ret = PTR_ERR_OR_ZERO(em);
goto out_error;
@@ -3384,15 +3384,6 @@ int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered)
return btrfs_finish_one_ordered(ordered);
}
-void btrfs_writepage_endio_finish_ordered(struct btrfs_inode *inode,
- struct page *page, u64 start,
- u64 end, bool uptodate)
-{
- trace_btrfs_writepage_end_io_hook(inode, start, end, uptodate);
-
- btrfs_mark_ordered_io_finished(inode, page, start, end + 1 - start, uptodate);
-}
-
/*
* Verify the checksum for a single sector without any extra action that depend
* on the type of I/O.
@@ -410,6 +410,10 @@ void btrfs_mark_ordered_io_finished(struct btrfs_inode *inode,
unsigned long flags;
u64 cur = file_offset;
+ trace_btrfs_writepage_end_io_hook(inode, file_offset,
+ file_offset + num_bytes - 1,
+ uptodate);
+
spin_lock_irqsave(&tree->lock, flags);
while (cur < file_offset + num_bytes) {
u64 entry_end;
btrfs_writepage_endio_finish_ordered is a small wrapper around btrfs_mark_ordered_io_finished that just changs the argument passing slightly, and adds a tracepoint. Move the tracpoint to btrfs_mark_ordered_io_finished, which means it now also covers the error handling in btrfs_cleanup_ordered_extent and switch all callers to just call btrfs_mark_ordered_io_finished directly. Signed-off-by: Christoph Hellwig <hch@lst.de> --- fs/btrfs/btrfs_inode.h | 3 --- fs/btrfs/extent_io.c | 17 ++++++++--------- fs/btrfs/inode.c | 9 --------- fs/btrfs/ordered-data.c | 4 ++++ 4 files changed, 12 insertions(+), 21 deletions(-)