diff mbox series

[RFC,17/31] btrfs: Introduce btrfs_iomap

Message ID 23b8bc9426ff2de7cf4345c21df8d0c9c36dffa6.1623567940.git.rgoldwyn@suse.com (mailing list archive)
State New, archived
Headers show
Series btrfs buffered iomap support | expand

Commit Message

Goldwyn Rodrigues June 13, 2021, 1:39 p.m. UTC
From: Goldwyn Rodrigues <rgoldwyn@suse.com>

A structure which will be used to transfer information from
iomap_begin() to iomap_end().

Move all locking information into btrfs_iomap.

Signed-off-by: Goldwyn Rodrigues <rgoldwyn@suse.com>
---
 fs/btrfs/file.c | 44 ++++++++++++++++++++++++++++----------------
 1 file changed, 28 insertions(+), 16 deletions(-)

Comments

David Sterba June 16, 2021, 8:02 p.m. UTC | #1
On Sun, Jun 13, 2021 at 08:39:45AM -0500, Goldwyn Rodrigues wrote:
> From: Goldwyn Rodrigues <rgoldwyn@suse.com>
> 
> A structure which will be used to transfer information from
> iomap_begin() to iomap_end().
> 
> Move all locking information into btrfs_iomap.
> 
> Signed-off-by: Goldwyn Rodrigues <rgoldwyn@suse.com>
> ---
>  fs/btrfs/file.c | 44 ++++++++++++++++++++++++++++----------------
>  1 file changed, 28 insertions(+), 16 deletions(-)
> 
> diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
> index a94ab3c8da1d..a28435a6bb7e 100644
> --- a/fs/btrfs/file.c
> +++ b/fs/btrfs/file.c
> @@ -56,6 +56,15 @@ struct inode_defrag {
>  	int cycled;
>  };
>  
> +struct btrfs_iomap {
> +
> +	/* Locking */
> +	u64 lockstart;
> +	u64 lockend;
> +	struct extent_state *cached_state;
> +	int extents_locked;
> +};
> +
>  static int __compare_inode_defrag(struct inode_defrag *defrag1,
>  				  struct inode_defrag *defrag2)
>  {
> @@ -1599,14 +1608,13 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
>  	struct page **pages = NULL;
>  	struct extent_changeset *data_reserved = NULL;
>  	u64 release_bytes = 0;
> -	u64 lockstart;
> -	u64 lockend;
>  	size_t num_written = 0;
>  	int nrptrs;
>  	ssize_t ret;
>  	bool only_release_metadata = false;
>  	loff_t old_isize = i_size_read(inode);
>  	unsigned int ilock_flags = 0;
> +	struct btrfs_iomap *bi = NULL;
>  
>  	if (iocb->ki_flags & IOCB_NOWAIT)
>  		ilock_flags |= BTRFS_ILOCK_TRY;
> @@ -1634,6 +1642,12 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
>  		goto out;
>  	}
>  
> +	bi = kzalloc(sizeof(struct btrfs_iomap), GFP_NOFS);

This is adding an allocation per write. The struct btrfs_iomap is not
that big so it could be on stack, but as there are more members added to
it in further patches the size could become unsuitable for stack. OTOH
some optimization of the member layout can reduce the size, I haven't
looked closer.

> +	if (!bi) {
> +		ret = -ENOMEM;
> +		goto out;
> +	}
> +
>  	while (iov_iter_count(i) > 0) {
>  		struct extent_state *cached_state = NULL;
>  		size_t offset = offset_in_page(pos);
> @@ -1647,7 +1661,6 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
>  		size_t copied;
>  		size_t dirty_sectors;
>  		size_t num_sectors;
> -		int extents_locked = false;
>  
>  		/*
>  		 * Fault pages before locking them in prepare_pages
> @@ -1658,6 +1671,7 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
>  			break;
>  		}
>  
> +		bi->extents_locked = false;
>  		only_release_metadata = false;
>  		sector_offset = pos & (fs_info->sectorsize - 1);
>  
> @@ -1698,11 +1712,12 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
>  		release_bytes = reserve_bytes;
>  
>  		if (pos < inode->i_size) {
> -			lockstart = round_down(pos, fs_info->sectorsize);
> -			lockend = round_up(pos + write_bytes, fs_info->sectorsize) - 1;
> +			bi->lockstart = round_down(pos, fs_info->sectorsize);
> +			bi->lockend = round_up(pos + write_bytes, fs_info->sectorsize) - 1;
>  			btrfs_lock_and_flush_ordered_range(BTRFS_I(inode),
> -					lockstart, lockend, &cached_state);
> -			extents_locked = true;
> +					bi->lockstart, bi->lockend,
> +					&cached_state);
> +			bi->extents_locked = true;
>  		}
>  
>  		/*
> @@ -1715,11 +1730,6 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
>  		if (ret) {
>  			btrfs_delalloc_release_extents(BTRFS_I(inode),
>  						       reserve_bytes);
> -			if (extents_locked)
> -				unlock_extent_cached(&BTRFS_I(inode)->io_tree,
> -						lockstart, lockend, &cached_state);
> -			else
> -				free_extent_state(cached_state);
>  			break;
>  		}
>  
> @@ -1777,12 +1787,13 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
>  		 * as delalloc through btrfs_dirty_pages(). Therefore free any
>  		 * possible cached extent state to avoid a memory leak.
>  		 */
> -		if (extents_locked)
> +		if (bi->extents_locked)
>  			unlock_extent_cached(&BTRFS_I(inode)->io_tree,
> -					     lockstart, lockend, &cached_state);
> +					     bi->lockstart, bi->lockend,
> +					     &bi->cached_state);
>  		else
> -			free_extent_state(cached_state);
> -		extents_locked = false;
> +			free_extent_state(bi->cached_state);
> +		bi->extents_locked = false;
>  
>  		btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes);
>  		if (ret) {
> @@ -1825,6 +1836,7 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
>  		iocb->ki_pos += num_written;
>  	}
>  out:
> +	kfree(bi);

It's released in the same function, so that's the lifetime.

>  	btrfs_inode_unlock(inode, ilock_flags);
>  	return num_written ? num_written : ret;
>  }
> -- 
> 2.31.1
diff mbox series

Patch

diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index a94ab3c8da1d..a28435a6bb7e 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -56,6 +56,15 @@  struct inode_defrag {
 	int cycled;
 };
 
+struct btrfs_iomap {
+
+	/* Locking */
+	u64 lockstart;
+	u64 lockend;
+	struct extent_state *cached_state;
+	int extents_locked;
+};
+
 static int __compare_inode_defrag(struct inode_defrag *defrag1,
 				  struct inode_defrag *defrag2)
 {
@@ -1599,14 +1608,13 @@  static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
 	struct page **pages = NULL;
 	struct extent_changeset *data_reserved = NULL;
 	u64 release_bytes = 0;
-	u64 lockstart;
-	u64 lockend;
 	size_t num_written = 0;
 	int nrptrs;
 	ssize_t ret;
 	bool only_release_metadata = false;
 	loff_t old_isize = i_size_read(inode);
 	unsigned int ilock_flags = 0;
+	struct btrfs_iomap *bi = NULL;
 
 	if (iocb->ki_flags & IOCB_NOWAIT)
 		ilock_flags |= BTRFS_ILOCK_TRY;
@@ -1634,6 +1642,12 @@  static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
 		goto out;
 	}
 
+	bi = kzalloc(sizeof(struct btrfs_iomap), GFP_NOFS);
+	if (!bi) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
 	while (iov_iter_count(i) > 0) {
 		struct extent_state *cached_state = NULL;
 		size_t offset = offset_in_page(pos);
@@ -1647,7 +1661,6 @@  static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
 		size_t copied;
 		size_t dirty_sectors;
 		size_t num_sectors;
-		int extents_locked = false;
 
 		/*
 		 * Fault pages before locking them in prepare_pages
@@ -1658,6 +1671,7 @@  static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
 			break;
 		}
 
+		bi->extents_locked = false;
 		only_release_metadata = false;
 		sector_offset = pos & (fs_info->sectorsize - 1);
 
@@ -1698,11 +1712,12 @@  static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
 		release_bytes = reserve_bytes;
 
 		if (pos < inode->i_size) {
-			lockstart = round_down(pos, fs_info->sectorsize);
-			lockend = round_up(pos + write_bytes, fs_info->sectorsize) - 1;
+			bi->lockstart = round_down(pos, fs_info->sectorsize);
+			bi->lockend = round_up(pos + write_bytes, fs_info->sectorsize) - 1;
 			btrfs_lock_and_flush_ordered_range(BTRFS_I(inode),
-					lockstart, lockend, &cached_state);
-			extents_locked = true;
+					bi->lockstart, bi->lockend,
+					&cached_state);
+			bi->extents_locked = true;
 		}
 
 		/*
@@ -1715,11 +1730,6 @@  static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
 		if (ret) {
 			btrfs_delalloc_release_extents(BTRFS_I(inode),
 						       reserve_bytes);
-			if (extents_locked)
-				unlock_extent_cached(&BTRFS_I(inode)->io_tree,
-						lockstart, lockend, &cached_state);
-			else
-				free_extent_state(cached_state);
 			break;
 		}
 
@@ -1777,12 +1787,13 @@  static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
 		 * as delalloc through btrfs_dirty_pages(). Therefore free any
 		 * possible cached extent state to avoid a memory leak.
 		 */
-		if (extents_locked)
+		if (bi->extents_locked)
 			unlock_extent_cached(&BTRFS_I(inode)->io_tree,
-					     lockstart, lockend, &cached_state);
+					     bi->lockstart, bi->lockend,
+					     &bi->cached_state);
 		else
-			free_extent_state(cached_state);
-		extents_locked = false;
+			free_extent_state(bi->cached_state);
+		bi->extents_locked = false;
 
 		btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes);
 		if (ret) {
@@ -1825,6 +1836,7 @@  static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
 		iocb->ki_pos += num_written;
 	}
 out:
+	kfree(bi);
 	btrfs_inode_unlock(inode, ilock_flags);
 	return num_written ? num_written : ret;
 }