@@ -832,7 +832,7 @@ static inline void inode_should_defrag(struct btrfs_inode *inode,
* are written in the same order that the flusher thread sent them
* down.
*/
-static noinline int compress_file_range(struct async_chunk *async_chunk)
+static noinline void compress_file_range(struct async_chunk *async_chunk)
{
struct btrfs_inode *inode = async_chunk->inode;
struct btrfs_fs_info *fs_info = inode->root->fs_info;
@@ -850,7 +850,6 @@ static noinline int compress_file_range(struct async_chunk *async_chunk)
int i;
int will_compress;
int compress_type = fs_info->compress_type;
- int compressed_extents = 0;
int redirty = 0;
inode_should_defrag(inode, start, end, end - start + 1, SZ_16K);
@@ -1027,7 +1026,7 @@ static noinline int compress_file_range(struct async_chunk *async_chunk)
}
kfree(pages);
}
- return 0;
+ return;
}
}
@@ -1046,8 +1045,6 @@ static noinline int compress_file_range(struct async_chunk *async_chunk)
*/
total_in = round_up(total_in, fs_info->sectorsize);
if (total_compressed + blocksize <= total_in) {
- compressed_extents++;
-
/*
* The async work queues will take care of doing actual
* allocation on disk for these compressed pages, and
@@ -1063,7 +1060,7 @@ static noinline int compress_file_range(struct async_chunk *async_chunk)
cond_resched();
goto again;
}
- return compressed_extents;
+ return;
}
}
if (pages) {
@@ -1104,9 +1101,6 @@ static noinline int compress_file_range(struct async_chunk *async_chunk)
extent_range_redirty_for_io(&inode->vfs_inode, start, end);
add_async_extent(async_chunk, start, end - start + 1, 0, NULL, 0,
BTRFS_COMPRESS_NONE);
- compressed_extents++;
-
- return compressed_extents;
}
static void free_async_extent_pages(struct async_extent *async_extent)
@@ -1659,15 +1653,9 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
static noinline void async_cow_start(struct btrfs_work *work)
{
struct async_chunk *async_chunk;
- int compressed_extents;
async_chunk = container_of(work, struct async_chunk, work);
-
- compressed_extents = compress_file_range(async_chunk);
- if (compressed_extents == 0) {
- btrfs_add_delayed_iput(async_chunk->inode);
- async_chunk->inode = NULL;
- }
+ compress_file_range(async_chunk);
}
/*
@@ -1704,8 +1692,7 @@ static noinline void async_cow_free(struct btrfs_work *work)
struct async_cow *async_cow;
async_chunk = container_of(work, struct async_chunk, work);
- if (async_chunk->inode)
- btrfs_add_delayed_iput(async_chunk->inode);
+ btrfs_add_delayed_iput(async_chunk->inode);
if (async_chunk->blkcg_css)
css_put(async_chunk->blkcg_css);
Now that the ->inode check isn't needed in submit_compressed_extents any more, there is no reason to clear the field early. Always keep the inode around until the work item is finished and remove the special casing, and the counting of compressed extents in compress_file_range. Signed-off-by: Christoph Hellwig <hch@lst.de> --- fs/btrfs/inode.c | 23 +++++------------------ 1 file changed, 5 insertions(+), 18 deletions(-)