@@ -1440,7 +1440,7 @@ static noinline int
lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
size_t num_pages, loff_t pos,
size_t write_bytes,
- u64 *lockstart, u64 *lockend,
+ u64 *lockstart, u64 *lockend, bool nowait,
struct extent_state **cached_state)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
@@ -1455,8 +1455,20 @@ lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
if (start_pos < inode->vfs_inode.i_size) {
struct btrfs_ordered_extent *ordered;
- lock_extent_bits(&inode->io_tree, start_pos, last_pos,
+ if (nowait) {
+ if (!try_lock_extent(&inode->io_tree, start_pos, last_pos)) {
+ for (i = 0; i < num_pages; i++) {
+ unlock_page(pages[i]);
+ put_page(pages[i]);
+ }
+
+ return -EWOULDBLOCK;
+ }
+ } else {
+ lock_extent_bits(&inode->io_tree, start_pos, last_pos,
cached_state);
+ }
+
ordered = btrfs_lookup_ordered_range(inode, start_pos,
last_pos - start_pos + 1);
if (ordered &&
@@ -1755,7 +1767,7 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
extents_locked = lock_and_cleanup_extent_if_need(
BTRFS_I(inode), pages,
num_pages, pos, write_bytes, &lockstart,
- &lockend, &cached_state);
+ &lockend, false, &cached_state);
if (extents_locked < 0) {
if (extents_locked == -EAGAIN)
goto again;
This adds the nowait parameter to lock_and_cleanup_extent_if_need(). If the nowait parameter is specified we try to lock the extent in nowait mode. Signed-off-by: Stefan Roesch <shr@fb.com> --- fs/btrfs/file.c | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-)