@@ -1610,6 +1610,10 @@ static void heuristic_collect_sample(struct inode *inode, u64 start, u64 end,
curr_sample_pos = 0;
while (index < index_end) {
page = find_get_page(inode->i_mapping, index);
+ if (!page) {
+ index++;
+ continue;
+ }
in_data = kmap_local_page(page);
/* Handle case where the start is not aligned to PAGE_SIZE */
i = start % PAGE_SIZE;
@@ -2264,8 +2264,7 @@ int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct page *locked_page
ASSERT(!zoned || btrfs_is_data_reloc_root(inode->root));
ret = run_delalloc_nocow(inode, locked_page, start, end,
page_started, nr_written);
- } else if (!btrfs_inode_can_compress(inode) ||
- !inode_need_compress(inode, start, end)) {
+ } else if (!test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, &inode->runtime_flags)) {
if (zoned)
ret = run_delalloc_zoned(inode, locked_page, start, end,
page_started, nr_written);
@@ -2273,7 +2272,6 @@ int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct page *locked_page
ret = cow_file_range(inode, locked_page, start, end,
page_started, nr_written, 1, NULL);
} else {
- set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, &inode->runtime_flags);
ret = cow_file_range_async(inode, wbc, locked_page, start, end,
page_started, nr_written);
}
@@ -8257,9 +8255,14 @@ static int btrfs_writepages(struct address_space *mapping,
*/
async_wb = btrfs_inode_can_compress(inode) &&
inode_need_compress(inode, start, end);
- if (!async_wb)
+
+ if (async_wb)
+ set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, &inode->runtime_flags);
+ else
lock_extent(&inode->io_tree, start, end, &cached);
+
ret = extent_writepages(mapping, wbc);
+
if (!async_wb)
unlock_extent(&inode->io_tree, start, end, &cached);
This sets the async bit early in the writeback and uses it to decide if it should write asynchronously. Since there could be missing pages, check if page is NULL while performing heuristics. Signed-off-by: Goldwyn Rodrigues <rgoldwyn@suse.com> --- fs/btrfs/compression.c | 4 ++++ fs/btrfs/inode.c | 11 +++++++---- 2 files changed, 11 insertions(+), 4 deletions(-)