Message ID | 20240705164640.2247869-1-hch@lst.de (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | nvme: implement ->get_unique_id | expand |
On Fri, Jul 05, 2024 at 06:46:26PM +0200, Christoph Hellwig wrote: > Implement the get_unique_id method to allow pNFS SCSI layout access to > NVMe namespaces. > > This is the server side implementation of RFC 9561 "Using the Parallel > NFS (pNFS) SCSI Layout to Access Non-Volatile Memory Express (NVMe) > Storage Devices". > > Signed-off-by: Christoph Hellwig <hch@lst.de> > --- > drivers/nvme/host/core.c | 27 +++++++++++++++++++++++++++ > drivers/nvme/host/multipath.c | 16 ++++++++++++++++ > drivers/nvme/host/nvme.h | 3 +++ > 3 files changed, 46 insertions(+) > > diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c > index 782090ce0bc10d..96e0879013b79d 100644 > --- a/drivers/nvme/host/core.c > +++ b/drivers/nvme/host/core.c > @@ -2230,6 +2230,32 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_ns_info *info) > return ret; > } > > +int nvme_ns_get_unique_id(struct nvme_ns *ns, u8 id[16], > + enum blk_unique_id type) > +{ > + struct nvme_ns_ids *ids = &ns->head->ids; > + > + if (type != BLK_UID_EUI64) > + return -EINVAL; > + > + if (memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) { > + memcpy(id, &ids->nguid, sizeof(ids->nguid)); > + return sizeof(ids->nguid); > + } > + if (memchr_inv(ids->eui64, 0, sizeof(ids->eui64))) { > + memcpy(id, &ids->eui64, sizeof(ids->eui64)); > + return sizeof(ids->eui64); > + } > + > + return -EINVAL; > +} > + > +static int nvme_get_unique_id(struct gendisk *disk, u8 id[16], > + enum blk_unique_id type) > +{ > + return nvme_ns_get_unique_id(disk->private_data, id, type); > +} > + > #ifdef CONFIG_BLK_SED_OPAL > static int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len, > bool send) > @@ -2285,6 +2311,7 @@ const struct block_device_operations nvme_bdev_ops = { > .open = nvme_open, > .release = nvme_release, > .getgeo = nvme_getgeo, > + .get_unique_id = nvme_get_unique_id, > .report_zones = nvme_report_zones, > .pr_ops = &nvme_pr_ops, > }; > diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c > index d8b6b4648eaff9..1aed93d792b610 100644 > --- a/drivers/nvme/host/multipath.c > +++ b/drivers/nvme/host/multipath.c > @@ -427,6 +427,21 @@ static void nvme_ns_head_release(struct gendisk *disk) > nvme_put_ns_head(disk->private_data); > } > > +static int nvme_ns_head_get_unique_id(struct gendisk *disk, u8 id[16], > + enum blk_unique_id type) > +{ > + struct nvme_ns_head *head = disk->private_data; > + struct nvme_ns *ns; > + int srcu_idx, ret = -EWOULDBLOCK; > + > + srcu_idx = srcu_read_lock(&head->srcu); > + ns = nvme_find_path(head); > + if (ns) > + ret = nvme_ns_get_unique_id(ns, id, type); > + srcu_read_unlock(&head->srcu, srcu_idx); > + return ret; > +} > + > #ifdef CONFIG_BLK_DEV_ZONED > static int nvme_ns_head_report_zones(struct gendisk *disk, sector_t sector, > unsigned int nr_zones, report_zones_cb cb, void *data) > @@ -454,6 +469,7 @@ const struct block_device_operations nvme_ns_head_ops = { > .ioctl = nvme_ns_head_ioctl, > .compat_ioctl = blkdev_compat_ptr_ioctl, > .getgeo = nvme_getgeo, > + .get_unique_id = nvme_ns_head_get_unique_id, > .report_zones = nvme_ns_head_report_zones, > .pr_ops = &nvme_pr_ops, > }; > diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h > index f3a41133ac3f97..1907fbc3f5dbb3 100644 > --- a/drivers/nvme/host/nvme.h > +++ b/drivers/nvme/host/nvme.h > @@ -1062,6 +1062,9 @@ static inline bool nvme_disk_is_ns_head(struct gendisk *disk) > } > #endif /* CONFIG_NVME_MULTIPATH */ > > +int nvme_ns_get_unique_id(struct nvme_ns *ns, u8 id[16], > + enum blk_unique_id type); > + > struct nvme_zone_info { > u64 zone_size; > unsigned int max_open_zones; > -- > 2.43.0 > > I am happy to see this. Fwiw: Acked-by: Chuck Lever <chuck.lever@oracle.com> I will connect with you offline to advise me on setting up a test harness similar to what I have with iSCSI.
On Fri, Jul 05, 2024 at 01:15:32PM -0400, Chuck Lever wrote: > I will connect with you offline to advise me on setting up a test > harness similar to what I have with iSCSI. Unfortunately there is nothing that works out of the box right now. What I've done is that I've taken this patch: https://lore.kernel.org/linux-nvme/20240229031241.8692-1-kanie@linux.alibaba.com/ which still needs to address a few issues before we can merge it, and then hacke the nvmet code to not autogenerate a UUID. I'm looking into a patch to disable uuid autogeneration, and hopefully we can upstream the persistent reservation support for 6.12. After that it should be very similar to the iSCSI test setup.
Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
On Fri, Jul 05, 2024 at 06:46:26PM +0200, Christoph Hellwig wrote: > Implement the get_unique_id method to allow pNFS SCSI layout access to > NVMe namespaces. > > This is the server side implementation of RFC 9561 "Using the Parallel > NFS (pNFS) SCSI Layout to Access Non-Volatile Memory Express (NVMe) > Storage Devices". Thanks, applied to nvme-6.11.
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 782090ce0bc10d..96e0879013b79d 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -2230,6 +2230,32 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_ns_info *info) return ret; } +int nvme_ns_get_unique_id(struct nvme_ns *ns, u8 id[16], + enum blk_unique_id type) +{ + struct nvme_ns_ids *ids = &ns->head->ids; + + if (type != BLK_UID_EUI64) + return -EINVAL; + + if (memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) { + memcpy(id, &ids->nguid, sizeof(ids->nguid)); + return sizeof(ids->nguid); + } + if (memchr_inv(ids->eui64, 0, sizeof(ids->eui64))) { + memcpy(id, &ids->eui64, sizeof(ids->eui64)); + return sizeof(ids->eui64); + } + + return -EINVAL; +} + +static int nvme_get_unique_id(struct gendisk *disk, u8 id[16], + enum blk_unique_id type) +{ + return nvme_ns_get_unique_id(disk->private_data, id, type); +} + #ifdef CONFIG_BLK_SED_OPAL static int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len, bool send) @@ -2285,6 +2311,7 @@ const struct block_device_operations nvme_bdev_ops = { .open = nvme_open, .release = nvme_release, .getgeo = nvme_getgeo, + .get_unique_id = nvme_get_unique_id, .report_zones = nvme_report_zones, .pr_ops = &nvme_pr_ops, }; diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index d8b6b4648eaff9..1aed93d792b610 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -427,6 +427,21 @@ static void nvme_ns_head_release(struct gendisk *disk) nvme_put_ns_head(disk->private_data); } +static int nvme_ns_head_get_unique_id(struct gendisk *disk, u8 id[16], + enum blk_unique_id type) +{ + struct nvme_ns_head *head = disk->private_data; + struct nvme_ns *ns; + int srcu_idx, ret = -EWOULDBLOCK; + + srcu_idx = srcu_read_lock(&head->srcu); + ns = nvme_find_path(head); + if (ns) + ret = nvme_ns_get_unique_id(ns, id, type); + srcu_read_unlock(&head->srcu, srcu_idx); + return ret; +} + #ifdef CONFIG_BLK_DEV_ZONED static int nvme_ns_head_report_zones(struct gendisk *disk, sector_t sector, unsigned int nr_zones, report_zones_cb cb, void *data) @@ -454,6 +469,7 @@ const struct block_device_operations nvme_ns_head_ops = { .ioctl = nvme_ns_head_ioctl, .compat_ioctl = blkdev_compat_ptr_ioctl, .getgeo = nvme_getgeo, + .get_unique_id = nvme_ns_head_get_unique_id, .report_zones = nvme_ns_head_report_zones, .pr_ops = &nvme_pr_ops, }; diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index f3a41133ac3f97..1907fbc3f5dbb3 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -1062,6 +1062,9 @@ static inline bool nvme_disk_is_ns_head(struct gendisk *disk) } #endif /* CONFIG_NVME_MULTIPATH */ +int nvme_ns_get_unique_id(struct nvme_ns *ns, u8 id[16], + enum blk_unique_id type); + struct nvme_zone_info { u64 zone_size; unsigned int max_open_zones;
Implement the get_unique_id method to allow pNFS SCSI layout access to NVMe namespaces. This is the server side implementation of RFC 9561 "Using the Parallel NFS (pNFS) SCSI Layout to Access Non-Volatile Memory Express (NVMe) Storage Devices". Signed-off-by: Christoph Hellwig <hch@lst.de> --- drivers/nvme/host/core.c | 27 +++++++++++++++++++++++++++ drivers/nvme/host/multipath.c | 16 ++++++++++++++++ drivers/nvme/host/nvme.h | 3 +++ 3 files changed, 46 insertions(+)