diff mbox series

[02/13] btrfs: zoned: revive max_zone_append_bytes

Message ID 687ec8ab8c61a9972d6936cdf189dc5756299051.1656909695.git.naohiro.aota@wdc.com (mailing list archive)
State New, archived
Headers show
Series btrfs: zoned: fix active zone tracking issues | expand

Commit Message

Naohiro Aota July 4, 2022, 4:58 a.m. UTC
This patch is basically a revert of commit 5a80d1c6a270 ("btrfs: zoned:
remove max_zone_append_size logic"), but without unnecessary ASSERT and
check. The max_zone_append_size will be used as a hint to estimate the
number of extents to cover delalloc/writeback region in the later commits.

The size of a ZONE APPEND bio is also limited by queue_max_segments(), so
this commit considers it to calculate max_zone_append_size. Technically, a
bio can be larger than queue_max_segments() * PAGE_SIZE if the pages are
contiguous. But, it is safe to consider "queue_max_segments() * PAGE_SIZE"
as an upper limit of an extent size to calculate the number of extents
needed to write data.

Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
---
 fs/btrfs/ctree.h |  2 ++
 fs/btrfs/zoned.c | 10 ++++++++++
 fs/btrfs/zoned.h |  1 +
 3 files changed, 13 insertions(+)

Comments

Johannes Thumshirn July 4, 2022, 7:57 a.m. UTC | #1
Looks good,
Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Christoph Hellwig July 4, 2022, 8:24 a.m. UTC | #2
> diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
> index 7a0f8fa44800..271b8b8fd4d0 100644
> --- a/fs/btrfs/zoned.c
> +++ b/fs/btrfs/zoned.c
> @@ -415,6 +415,9 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache)
>  	nr_sectors = bdev_nr_sectors(bdev);
>  	zone_info->zone_size_shift = ilog2(zone_info->zone_size);
>  	zone_info->nr_zones = nr_sectors >> ilog2(zone_sectors);
> +	zone_info->max_zone_append_size =
> +		min_t(u64, (u64)bdev_max_zone_append_sectors(bdev) << SECTOR_SHIFT,
> +		      (u64)bdev_max_segments(bdev) << PAGE_SHIFT);

This assumes each segment is just page sized, so you probably want to
document how you arrived at that assumption.
Johannes Thumshirn July 4, 2022, 9:33 a.m. UTC | #3
> @@ -723,6 +732,7 @@ int btrfs_check_zoned_mode(struct btrfs_fs_info *fs_info)
>  	}
>  
>  	fs_info->zone_size = zone_size;
> +	fs_info->max_zone_append_size = max_zone_append_size;
>  	fs_info->fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_ZONED;
>  
>  	/*

Thinking a bit more of this, this need to be the min() of all 
max_zone_append_size values of the underlying devices, because even as of now
zoned btrfs supports multiple devices on a single FS.

Sorry for not noticing this earlier.
Christoph Hellwig July 4, 2022, 11:54 a.m. UTC | #4
On Mon, Jul 04, 2022 at 09:33:32AM +0000, Johannes Thumshirn wrote:
> > @@ -723,6 +732,7 @@ int btrfs_check_zoned_mode(struct btrfs_fs_info *fs_info)
> >  	}
> >  
> >  	fs_info->zone_size = zone_size;
> > +	fs_info->max_zone_append_size = max_zone_append_size;
> >  	fs_info->fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_ZONED;
> >  
> >  	/*
> 
> Thinking a bit more of this, this need to be the min() of all 
> max_zone_append_size values of the underlying devices, because even as of now
> zoned btrfs supports multiple devices on a single FS.

Yes.
Naohiro Aota July 4, 2022, 1:24 p.m. UTC | #5
On Mon, Jul 04, 2022 at 04:54:44AM -0700, Christoph Hellwig wrote:
> On Mon, Jul 04, 2022 at 09:33:32AM +0000, Johannes Thumshirn wrote:
> > > @@ -723,6 +732,7 @@ int btrfs_check_zoned_mode(struct btrfs_fs_info *fs_info)
> > >  	}
> > >  
> > >  	fs_info->zone_size = zone_size;
> > > +	fs_info->max_zone_append_size = max_zone_append_size;
> > >  	fs_info->fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_ZONED;
> > >  
> > >  	/*
> > 
> > Thinking a bit more of this, this need to be the min() of all 
> > max_zone_append_size values of the underlying devices, because even as of now
> > zoned btrfs supports multiple devices on a single FS.
> 
> Yes.

That min() is done by the one above hunk.
diff mbox series

Patch

diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 4e2569f84aab..e4879912c475 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -1071,6 +1071,8 @@  struct btrfs_fs_info {
 	 */
 	u64 zone_size;
 
+	/* Max size to emit ZONE_APPEND write command */
+	u64 max_zone_append_size;
 	struct mutex zoned_meta_io_lock;
 	spinlock_t treelog_bg_lock;
 	u64 treelog_bg;
diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
index 7a0f8fa44800..271b8b8fd4d0 100644
--- a/fs/btrfs/zoned.c
+++ b/fs/btrfs/zoned.c
@@ -415,6 +415,9 @@  int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache)
 	nr_sectors = bdev_nr_sectors(bdev);
 	zone_info->zone_size_shift = ilog2(zone_info->zone_size);
 	zone_info->nr_zones = nr_sectors >> ilog2(zone_sectors);
+	zone_info->max_zone_append_size =
+		min_t(u64, (u64)bdev_max_zone_append_sectors(bdev) << SECTOR_SHIFT,
+		      (u64)bdev_max_segments(bdev) << PAGE_SHIFT);
 	if (!IS_ALIGNED(nr_sectors, zone_sectors))
 		zone_info->nr_zones++;
 
@@ -640,6 +643,7 @@  int btrfs_check_zoned_mode(struct btrfs_fs_info *fs_info)
 	u64 zoned_devices = 0;
 	u64 nr_devices = 0;
 	u64 zone_size = 0;
+	u64 max_zone_append_size = 0;
 	const bool incompat_zoned = btrfs_fs_incompat(fs_info, ZONED);
 	int ret = 0;
 
@@ -674,6 +678,11 @@  int btrfs_check_zoned_mode(struct btrfs_fs_info *fs_info)
 				ret = -EINVAL;
 				goto out;
 			}
+			if (!max_zone_append_size ||
+			    (zone_info->max_zone_append_size &&
+			     zone_info->max_zone_append_size < max_zone_append_size))
+				max_zone_append_size =
+					zone_info->max_zone_append_size;
 		}
 		nr_devices++;
 	}
@@ -723,6 +732,7 @@  int btrfs_check_zoned_mode(struct btrfs_fs_info *fs_info)
 	}
 
 	fs_info->zone_size = zone_size;
+	fs_info->max_zone_append_size = max_zone_append_size;
 	fs_info->fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_ZONED;
 
 	/*
diff --git a/fs/btrfs/zoned.h b/fs/btrfs/zoned.h
index 6b2eec99162b..9caeab07fd38 100644
--- a/fs/btrfs/zoned.h
+++ b/fs/btrfs/zoned.h
@@ -19,6 +19,7 @@  struct btrfs_zoned_device_info {
 	 */
 	u64 zone_size;
 	u8  zone_size_shift;
+	u64 max_zone_append_size;
 	u32 nr_zones;
 	unsigned int max_active_zones;
 	atomic_t active_zones_left;