@@ -1092,17 +1092,18 @@ int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
* @bits: the bits to set in this range
* @clear_bits: the bits to clear in this range
* @cached_state: state that we're going to cache
- * @mask: the allocation mask
*
* This will go through and set bits for the given range. If any states exist
* already in this range they are set with the given bit and cleared of the
* clear_bits. This is only meant to be used by things that are mergeable, ie
* converting from say DELALLOC to DIRTY. This is not meant to be used with
* boundary bits like LOCK.
+ *
+ * All allocations are done with GFP_NOFS.
*/
int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
unsigned bits, unsigned clear_bits,
- struct extent_state **cached_state, gfp_t mask)
+ struct extent_state **cached_state)
{
struct extent_state *state;
struct extent_state *prealloc = NULL;
@@ -1117,7 +1118,7 @@ int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
btrfs_debug_check_extent_io_range(tree, start, end);
again:
- if (!prealloc && gfpflags_allow_blocking(mask)) {
+ if (!prealloc) {
/*
* Best effort, don't worry if extent state allocation fails
* here for the first iteration. We might have a cached state
@@ -1125,7 +1126,7 @@ int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
* extent state allocations are needed. We'll only know this
* after locking the tree.
*/
- prealloc = alloc_extent_state(mask);
+ prealloc = alloc_extent_state(GFP_NOFS);
if (!prealloc && !first_iteration)
return -ENOMEM;
}
@@ -1286,15 +1287,14 @@ int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
if (start > end)
goto out;
spin_unlock(&tree->lock);
- if (gfpflags_allow_blocking(mask))
- cond_resched();
+ cond_resched();
/*
* If we used the preallocated state, try again here out of the
* locked section so we can avoid GFP_ATOMIC. No error checking
* as we might not need it in the end.
*/
if (!prealloc)
- prealloc = alloc_extent_state(mask);
+ prealloc = alloc_extent_state(GFP_NOFS);
first_iteration = false;
goto again;
@@ -287,7 +287,7 @@ static inline int clear_extent_dirty(struct extent_io_tree *tree, u64 start,
int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
unsigned bits, unsigned clear_bits,
- struct extent_state **cached_state, gfp_t mask);
+ struct extent_state **cached_state);
static inline int set_extent_delalloc(struct extent_io_tree *tree, u64 start,
u64 end, struct extent_state **cached_state)
@@ -943,7 +943,7 @@ int btrfs_write_marked_extents(struct btrfs_root *root,
err = convert_extent_bit(dirty_pages, start, end,
EXTENT_NEED_WAIT,
- mark, &cached_state, GFP_NOFS);
+ mark, &cached_state);
/*
* convert_extent_bit can return -ENOMEM, which is most of the
* time a temporary error. So when it happens, ignore the error
Single caller passes GFP_NOFS. We can get rid of the gfpflags_allow_blocking checks as NOFS can block but does not recurse to filesystem through reclaim. Signed-off-by: David Sterba <dsterba@suse.com> --- fs/btrfs/extent_io.c | 14 +++++++------- fs/btrfs/extent_io.h | 2 +- fs/btrfs/transaction.c | 2 +- 3 files changed, 9 insertions(+), 9 deletions(-)