@@ -118,6 +118,14 @@ typedef struct BlockZoneDescriptor {
BlockZoneState state;
} BlockZoneDescriptor;
+/*
+ * Track write pointers of a zone in bytes.
+ */
+typedef struct BlockZoneWps {
+ CoMutex colock;
+ uint64_t wp[];
+} BlockZoneWps;
+
typedef struct BlockDriverInfo {
/* in bytes, 0 if irrelevant */
int cluster_size;
@@ -240,6 +248,12 @@ typedef enum {
#define BDRV_SECTOR_BITS 9
#define BDRV_SECTOR_SIZE (1ULL << BDRV_SECTOR_BITS)
+/*
+ * Get the first most significant bit of wp. If it is zero, then
+ * the zone type is SWR.
+ */
+#define BDRV_ZT_IS_CONV(wp) (wp & (1ULL << 63))
+
#define BDRV_REQUEST_MAX_SECTORS MIN_CONST(SIZE_MAX >> BDRV_SECTOR_BITS, \
INT_MAX >> BDRV_SECTOR_BITS)
#define BDRV_REQUEST_MAX_BYTES (BDRV_REQUEST_MAX_SECTORS << BDRV_SECTOR_BITS)
@@ -890,6 +890,8 @@ typedef struct BlockLimits {
/* maximum number of active zones */
int64_t max_active_zones;
+
+ int64_t write_granularity;
} BlockLimits;
typedef struct BdrvOpBlocker BdrvOpBlocker;
@@ -1251,6 +1253,9 @@ struct BlockDriverState {
CoMutex bsc_modify_lock;
/* Always non-NULL, but must only be dereferenced under an RCU read guard */
BdrvBlockStatusCache *block_status_cache;
+
+ /* array of write pointers' location of each zone in the zoned device. */
+ BlockZoneWps *wps;
};
struct BlockBackendRootState {
@@ -1324,6 +1324,90 @@ static int hdev_get_max_segments(int fd, struct stat *st)
#endif
}
+#if defined(CONFIG_BLKZONED)
+/*
+ * If the reset_all flag is true, then the wps of zone whose state is
+ * not readonly or offline should be all reset to the start sector.
+ * Else, take the real wp of the device.
+ */
+static int get_zones_wp(BlockDriverState *bs, int fd, int64_t offset,
+ unsigned int nrz, bool reset_all)
+{
+ struct blk_zone *blkz;
+ size_t rep_size;
+ uint64_t sector = offset >> BDRV_SECTOR_BITS;
+ BlockZoneWps *wps = bs->wps;
+ int j = offset / bs->bl.zone_size;
+ int ret, n = 0, i = 0;
+ rep_size = sizeof(struct blk_zone_report) + nrz * sizeof(struct blk_zone);
+ g_autofree struct blk_zone_report *rep = NULL;
+
+ rep = g_malloc(rep_size);
+ blkz = (struct blk_zone *)(rep + 1);
+ while (n < nrz) {
+ memset(rep, 0, rep_size);
+ rep->sector = sector;
+ rep->nr_zones = nrz - n;
+
+ do {
+ ret = ioctl(fd, BLKREPORTZONE, rep);
+ } while (ret != 0 && errno == EINTR);
+ if (ret != 0) {
+ error_report("%d: ioctl BLKREPORTZONE at %" PRId64 " failed %d",
+ fd, offset, errno);
+ return -errno;
+ }
+
+ if (!rep->nr_zones) {
+ break;
+ }
+
+ for (i = 0; i < rep->nr_zones; ++i, ++n, ++j) {
+ /*
+ * The wp tracking cares only about sequential writes required and
+ * sequential write preferred zones so that the wp can advance to
+ * the right location.
+ * Use the most significant bit of the wp location to indicate the
+ * zone type: 0 for SWR/SWP zones and 1 for conventional zones.
+ */
+ if (blkz[i].type == BLK_ZONE_TYPE_CONVENTIONAL) {
+ wps->wp[j] |= 1ULL << 63;
+ } else {
+ switch(blkz[i].cond) {
+ case BLK_ZONE_COND_FULL:
+ case BLK_ZONE_COND_READONLY:
+ /* Zone not writable */
+ wps->wp[j] = (blkz[i].start + blkz[i].len) << BDRV_SECTOR_BITS;
+ break;
+ case BLK_ZONE_COND_OFFLINE:
+ /* Zone not writable nor readable */
+ wps->wp[j] = (blkz[i].start) << BDRV_SECTOR_BITS;
+ break;
+ default:
+ if (reset_all) {
+ wps->wp[j] = blkz[i].start << BDRV_SECTOR_BITS;
+ } else {
+ wps->wp[j] = blkz[i].wp << BDRV_SECTOR_BITS;
+ }
+ break;
+ }
+ }
+ }
+ sector = blkz[i - 1].start + blkz[i - 1].len;
+ }
+
+ return 0;
+}
+
+static void update_zones_wp(BlockDriverState *bs, int fd, int64_t offset,
+ unsigned int nrz)
+{
+ if (get_zones_wp(bs, fd, offset, nrz, 0) < 0) {
+ error_report("update zone wp failed");
+ }
+}
+#endif
+
static void raw_refresh_limits(BlockDriverState *bs, Error **errp)
{
BDRVRawState *s = bs->opaque;
@@ -1413,6 +1497,23 @@ static void raw_refresh_limits(BlockDriverState *bs, Error **errp)
if (ret >= 0) {
bs->bl.max_active_zones = ret;
}
+
+ ret = get_sysfs_long_val(&st, "physical_block_size");
+ if (ret >= 0) {
+ bs->bl.write_granularity = ret;
+ }
+
+ /* The refresh_limits() function can be called multiple times. */
+ g_free(bs->wps);
+ bs->wps = g_malloc(sizeof(BlockZoneWps) +
+ sizeof(int64_t) * bs->bl.nr_zones);
+ ret = get_zones_wp(bs, s->fd, 0, bs->bl.nr_zones, 0);
+ if (ret < 0) {
+ error_setg_errno(errp, -ret, "report wps failed");
+ bs->wps = NULL;
+ return;
+ }
+ qemu_co_mutex_init(&bs->wps->colock);
return;
}
out:
@@ -2338,9 +2439,15 @@ static int coroutine_fn raw_co_prw(BlockDriverState *bs, uint64_t offset,
{
BDRVRawState *s = bs->opaque;
RawPosixAIOData acb;
+ int ret;
if (fd_open(bs) < 0)
return -EIO;
+#if defined(CONFIG_BLKZONED)
+ if (type & QEMU_AIO_WRITE && bs->wps) {
+ qemu_co_mutex_lock(&bs->wps->colock);
+ }
+#endif
/*
* When using O_DIRECT, the request must be aligned to be able to use
@@ -2354,14 +2461,16 @@ static int coroutine_fn raw_co_prw(BlockDriverState *bs, uint64_t offset,
} else if (s->use_linux_io_uring) {
LuringState *aio = aio_get_linux_io_uring(bdrv_get_aio_context(bs));
assert(qiov->size == bytes);
- return luring_co_submit(bs, aio, s->fd, offset, qiov, type);
+ ret = luring_co_submit(bs, aio, s->fd, offset, qiov, type);
+ goto out;
#endif
#ifdef CONFIG_LINUX_AIO
} else if (s->use_linux_aio) {
LinuxAioState *aio = aio_get_linux_aio(bdrv_get_aio_context(bs));
assert(qiov->size == bytes);
- return laio_co_submit(bs, aio, s->fd, offset, qiov, type,
+ ret = laio_co_submit(bs, aio, s->fd, offset, qiov, type,
s->aio_max_batch);
+ goto out;
#endif
}
@@ -2378,7 +2487,32 @@ static int coroutine_fn raw_co_prw(BlockDriverState *bs, uint64_t offset,
};
assert(qiov->size == bytes);
- return raw_thread_pool_submit(bs, handle_aiocb_rw, &acb);
+ ret = raw_thread_pool_submit(bs, handle_aiocb_rw, &acb);
+
+out:
+#if defined(CONFIG_BLKZONED)
+ BlockZoneWps *wps = bs->wps;
+ if (ret == 0) {
+ if (type & QEMU_AIO_WRITE && wps && bs->bl.zone_size) {
+ uint64_t *wp = &wps->wp[offset / bs->bl.zone_size];
+ if (!BDRV_ZT_IS_CONV(*wp)) {
+ /* Advance the wp if needed */
+ if (offset + bytes > *wp) {
+ *wp = offset + bytes;
+ }
+ }
+ }
+ } else {
+ if (type & QEMU_AIO_WRITE) {
+ update_zones_wp(bs, s->fd, 0, 1);
+ }
+ }
+
+ if (type & QEMU_AIO_WRITE && wps) {
+ qemu_co_mutex_unlock(&wps->colock);
+ }
+#endif
+ return ret;
}
static int coroutine_fn raw_co_preadv(BlockDriverState *bs, int64_t offset,
@@ -2486,6 +2620,9 @@ static void raw_close(BlockDriverState *bs)
BDRVRawState *s = bs->opaque;
if (s->fd >= 0) {
+#if defined(CONFIG_BLKZONED)
+ g_free(bs->wps);
+#endif
qemu_close(s->fd);
s->fd = -1;
}
@@ -3283,6 +3420,7 @@ static int coroutine_fn raw_co_zone_mgmt(BlockDriverState *bs, BlockZoneOp op,
const char *op_name;
unsigned long zo;
int ret;
+ BlockZoneWps *wps = bs->wps;
int64_t capacity = bs->total_sectors << BDRV_SECTOR_BITS;
zone_size = bs->bl.zone_size;
@@ -3300,6 +3438,15 @@ static int coroutine_fn raw_co_zone_mgmt(BlockDriverState *bs, BlockZoneOp op,
return -EINVAL;
}
+ QEMU_LOCK_GUARD(&wps->colock);
+ uint32_t i = offset / bs->bl.zone_size;
+ uint32_t nrz = len / bs->bl.zone_size;
+ uint64_t *wp = &wps->wp[i];
+ if (BDRV_ZT_IS_CONV(*wp) && len != capacity) {
+ error_report("zone mgmt operations are not allowed for conventional zones");
+ return -EIO;
+ }
+
switch (op) {
case BLK_ZO_OPEN:
op_name = "BLKOPENZONE";
@@ -3337,7 +3484,27 @@ static int coroutine_fn raw_co_zone_mgmt(BlockDriverState *bs, BlockZoneOp op,
len >> BDRV_SECTOR_BITS);
ret = raw_thread_pool_submit(bs, handle_aiocb_zone_mgmt, &acb);
if (ret != 0) {
+ update_zones_wp(bs, s->fd, offset, i);
error_report("ioctl %s failed %d", op_name, ret);
+ return ret;
+ }
+
+ if (zo == BLKRESETZONE && len == capacity) {
+ ret = get_zones_wp(bs, s->fd, 0, bs->bl.nr_zones, 1);
+ if (ret < 0) {
+ error_report("reporting single wp failed");
+ return ret;
+ }
+ } else if (zo == BLKRESETZONE) {
+ for (int j = 0; j < nrz; ++j) {
+ wp[j] = offset + j * zone_size;
+ }
+ } else if (zo == BLKFINISHZONE) {
+ for (int j = 0; j < nrz; ++j) {
+ /* The zoned device allows the last zone smaller that the
+ * zone size. */
+ wp[j] = MIN(offset + (j + 1) * zone_size, offset + len);
+ }
}
return ret;