@@ -67,7 +67,7 @@ int discard_blocks(int fd, u64 start, u64 len)
return 0;
}
-static int zero_blocks(int fd, off_t start, size_t len)
+int zero_blocks(int fd, off_t start, size_t len)
{
char *buf = malloc(len);
int ret = 0;
@@ -86,7 +86,8 @@ static int zero_blocks(int fd, off_t start, size_t len)
#define ZERO_DEV_BYTES SZ_2M
/* don't write outside the device by clamping the region to the device size */
-static int zero_dev_clamped(int fd, off_t start, ssize_t len, u64 dev_size)
+static int zero_dev_clamped(int fd, struct btrfs_zone_info *zinfo, off_t start,
+ ssize_t len, u64 dev_size)
{
off_t end = max(start, start + len);
@@ -99,6 +100,9 @@ static int zero_dev_clamped(int fd, off_t start, ssize_t len, u64 dev_size)
start = min_t(u64, start, dev_size);
end = min_t(u64, end, dev_size);
+ if (zinfo->model == ZONED_HOST_MANAGED)
+ return zero_zone_blocks(fd, zinfo, start, end - start);
+
return zero_blocks(fd, start, end - start);
}
@@ -206,12 +210,12 @@ int btrfs_prepare_device(int fd, const char *file, u64 *block_count_ret,
}
}
- ret = zero_dev_clamped(fd, 0, ZERO_DEV_BYTES, block_count);
+ ret = zero_dev_clamped(fd, &zinfo, 0, ZERO_DEV_BYTES, block_count);
for (i = 0 ; !ret && i < BTRFS_SUPER_MIRROR_MAX; i++)
- ret = zero_dev_clamped(fd, btrfs_sb_offset(i),
+ ret = zero_dev_clamped(fd, &zinfo, btrfs_sb_offset(i),
BTRFS_SUPER_INFO_SIZE, block_count);
if (!ret && (opflags & PREP_DEVICE_ZERO_END))
- ret = zero_dev_clamped(fd, block_count - ZERO_DEV_BYTES,
+ ret = zero_dev_clamped(fd, &zinfo, block_count - ZERO_DEV_BYTES,
ZERO_DEV_BYTES, block_count);
if (ret < 0) {
@@ -26,6 +26,7 @@
#define PREP_DEVICE_HMZONED (1U << 3)
int discard_blocks(int fd, u64 start, u64 len);
+int zero_blocks(int fd, off_t start, size_t len);
u64 get_partition_size(const char *dev);
u64 disk_size(const char *path);
u64 btrfs_device_size(int fd, struct stat *st);
@@ -243,3 +243,32 @@ int btrfs_discard_all_zones(int fd, struct btrfs_zone_info *zinfo)
return 0;
}
+
+int zero_zone_blocks(int fd, struct btrfs_zone_info *zinfo, off_t start,
+ size_t len)
+{
+ size_t zone_len = zinfo->zone_size;
+ off_t ofst = start;
+ size_t count;
+ int ret;
+
+ /* Make sure that zero_blocks does not write sequential zones */
+ while (len > 0) {
+
+ /* Limit zero_blocks to a single zone */
+ count = min_t(size_t, len, zone_len);
+ if (count > zone_len - (ofst & (zone_len - 1)))
+ count = zone_len - (ofst & (zone_len - 1));
+
+ if (!zone_is_sequential(zinfo, ofst)) {
+ ret = zero_blocks(fd, ofst, count);
+ if (ret != 0)
+ return ret;
+ }
+
+ len -= count;
+ ofst += count;
+ }
+
+ return 0;
+}
@@ -57,6 +57,8 @@ int btrfs_get_zone_info(int fd, const char *file, bool hmzoned,
#ifdef BTRFS_ZONED
bool zone_is_sequential(struct btrfs_zone_info *zinfo, u64 bytenr);
int btrfs_discard_all_zones(int fd, struct btrfs_zone_info *zinfo);
+int zero_zone_blocks(int fd, struct btrfs_zone_info *zinfo, off_t start,
+ size_t len);
#else
static inline bool zone_is_sequential(struct btrfs_zone_info *zinfo,
u64 bytenr)
@@ -67,6 +69,11 @@ static inline int btrfs_discard_all_zones(int fd, struct btrfs_zone_info *zinfo)
{
return -EOPNOTSUPP;
}
+static int zero_zone_blocks(int fd, struct btrfs_zone_info *zinfo, off_t start,
+ size_t len)
+{
+ return -EOPNOTSUPP;
+}
#endif /* BTRFS_ZONED */
#endif /* __BTRFS_HMZONED_H__ */
If we zero out a region in a sequential write required zone, we cannot write to the region until we reset the zone. Thus, we must prohibit zeroing out to a sequential write required zone. zero_dev_clamped() is modified to take the zone information and it calls zero_zone_blocks() if the device is host managed to avoid writing to sequential write required zones. Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com> --- common/device-utils.c | 14 +++++++++----- common/device-utils.h | 1 + common/hmzoned.c | 29 +++++++++++++++++++++++++++++ common/hmzoned.h | 7 +++++++ 4 files changed, 46 insertions(+), 5 deletions(-)