Message ID | 20241007-dcd-type2-upstream-v4-9-c261ee6eeded@intel.com |
---|---|
State | Superseded |
Headers | show |
Series | DCD: Add support for Dynamic Capacity Devices (DCD) | expand |
On Mon, 07 Oct 2024 18:16:15 -0500 ira.weiny@intel.com wrote: > From: Navneet Singh <navneet.singh@intel.com> > > Until now region modes and decoder modes were equivalent in that both > modes were either PMEM or RAM. The addition of Dynamic > Capacity partitions defines up to 8 DC partitions per device. > > The region mode is thus no longer equivalent to the endpoint decoder > mode. IOW the endpoint decoders may have modes of DC0-DC7 while the > region mode is simply DC. > > Define a new region mode enumeration which applies to regions separate > from the decoder mode. Adjust the code to process these modes > independently. > > There is no equal to decoder mode dead in region modes. Avoid > constructing regions with decoders which have been flagged as dead. > > Suggested-by: Jonathan Cameron <Jonathan.Cameron@Huawei.com> > Signed-off-by: Navneet Singh <navneet.singh@intel.com> > Co-developed-by: Ira Weiny <ira.weiny@intel.com> > Signed-off-by: Ira Weiny <ira.weiny@intel.com> LGTM Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
On Mon, Oct 07, 2024 at 06:16:15PM -0500, ira.weiny@intel.com wrote: > From: Navneet Singh <navneet.singh@intel.com> > > Until now region modes and decoder modes were equivalent in that both > modes were either PMEM or RAM. The addition of Dynamic > Capacity partitions defines up to 8 DC partitions per device. > > The region mode is thus no longer equivalent to the endpoint decoder > mode. IOW the endpoint decoders may have modes of DC0-DC7 while the > region mode is simply DC. > > Define a new region mode enumeration which applies to regions separate > from the decoder mode. Adjust the code to process these modes > independently. > > There is no equal to decoder mode dead in region modes. Avoid > constructing regions with decoders which have been flagged as dead. > > Suggested-by: Jonathan Cameron <Jonathan.Cameron@Huawei.com> > Signed-off-by: Navneet Singh <navneet.singh@intel.com> > Co-developed-by: Ira Weiny <ira.weiny@intel.com> > Signed-off-by: Ira Weiny <ira.weiny@intel.com> > Reviewed-by: Fan Ni <fan.ni@samsung.com> > --- > Changes: > [iweiny: rebase] > [Jonathan: remove dead code] > [Jonathan: clarify commit message] > --- > drivers/cxl/core/cdat.c | 6 ++-- > drivers/cxl/core/region.c | 75 ++++++++++++++++++++++++++++++++++------------- > drivers/cxl/cxl.h | 26 ++++++++++++++-- > 3 files changed, 82 insertions(+), 25 deletions(-) > > diff --git a/drivers/cxl/core/cdat.c b/drivers/cxl/core/cdat.c > index 438869df241a..bd50bb655741 100644 > --- a/drivers/cxl/core/cdat.c > +++ b/drivers/cxl/core/cdat.c > @@ -571,17 +571,17 @@ static bool dpa_perf_contains(struct cxl_dpa_perf *perf, > } > > static struct cxl_dpa_perf *cxled_get_dpa_perf(struct cxl_endpoint_decoder *cxled, > - enum cxl_decoder_mode mode) > + enum cxl_region_mode mode) > { > struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); > struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); > struct cxl_dpa_perf *perf; > > switch (mode) { > - case CXL_DECODER_RAM: > + case CXL_REGION_RAM: > perf = &mds->ram_perf; > break; > - case CXL_DECODER_PMEM: > + case CXL_REGION_PMEM: > perf = &mds->pmem_perf; > break; > default: > diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c > index e701e4b04032..f3a56003edc1 100644 > --- a/drivers/cxl/core/region.c > +++ b/drivers/cxl/core/region.c > @@ -144,7 +144,7 @@ static ssize_t uuid_show(struct device *dev, struct device_attribute *attr, > rc = down_read_interruptible(&cxl_region_rwsem); > if (rc) > return rc; > - if (cxlr->mode != CXL_DECODER_PMEM) > + if (cxlr->mode != CXL_REGION_PMEM) > rc = sysfs_emit(buf, "\n"); > else > rc = sysfs_emit(buf, "%pUb\n", &p->uuid); > @@ -457,7 +457,7 @@ static umode_t cxl_region_visible(struct kobject *kobj, struct attribute *a, > * Support tooling that expects to find a 'uuid' attribute for all > * regions regardless of mode. > */ > - if (a == &dev_attr_uuid.attr && cxlr->mode != CXL_DECODER_PMEM) > + if (a == &dev_attr_uuid.attr && cxlr->mode != CXL_REGION_PMEM) > return 0444; > return a->mode; > } > @@ -620,7 +620,7 @@ static ssize_t mode_show(struct device *dev, struct device_attribute *attr, > { > struct cxl_region *cxlr = to_cxl_region(dev); > > - return sysfs_emit(buf, "%s\n", cxl_decoder_mode_name(cxlr->mode)); > + return sysfs_emit(buf, "%s\n", cxl_region_mode_name(cxlr->mode)); > } > static DEVICE_ATTR_RO(mode); > > @@ -646,7 +646,7 @@ static int alloc_hpa(struct cxl_region *cxlr, resource_size_t size) > > /* ways, granularity and uuid (if PMEM) need to be set before HPA */ > if (!p->interleave_ways || !p->interleave_granularity || > - (cxlr->mode == CXL_DECODER_PMEM && uuid_is_null(&p->uuid))) > + (cxlr->mode == CXL_REGION_PMEM && uuid_is_null(&p->uuid))) > return -ENXIO; > > div64_u64_rem(size, (u64)SZ_256M * p->interleave_ways, &remainder); > @@ -1863,6 +1863,17 @@ static int cxl_region_sort_targets(struct cxl_region *cxlr) > return rc; > } > > +static bool cxl_modes_compatible(enum cxl_region_mode rmode, > + enum cxl_decoder_mode dmode) > +{ > + if (rmode == CXL_REGION_RAM && dmode == CXL_DECODER_RAM) > + return true; > + if (rmode == CXL_REGION_PMEM && dmode == CXL_DECODER_PMEM) > + return true; > + > + return false; > +} > + > static int cxl_region_attach(struct cxl_region *cxlr, > struct cxl_endpoint_decoder *cxled, int pos) > { > @@ -1882,9 +1893,11 @@ static int cxl_region_attach(struct cxl_region *cxlr, > return rc; > } > > - if (cxled->mode != cxlr->mode) { > - dev_dbg(&cxlr->dev, "%s region mode: %d mismatch: %d\n", > - dev_name(&cxled->cxld.dev), cxlr->mode, cxled->mode); > + if (!cxl_modes_compatible(cxlr->mode, cxled->mode)) { > + dev_dbg(&cxlr->dev, "%s region mode: %s mismatch decoder: %s\n", > + dev_name(&cxled->cxld.dev), > + cxl_region_mode_name(cxlr->mode), > + cxl_decoder_mode_name(cxled->mode)); > return -EINVAL; > } > > @@ -2446,7 +2459,7 @@ static int cxl_region_calculate_adistance(struct notifier_block *nb, > * devm_cxl_add_region - Adds a region to a decoder > * @cxlrd: root decoder > * @id: memregion id to create, or memregion_free() on failure > - * @mode: mode for the endpoint decoders of this region > + * @mode: mode of this region > * @type: select whether this is an expander or accelerator (type-2 or type-3) > * > * This is the second step of region initialization. Regions exist within an > @@ -2457,7 +2470,7 @@ static int cxl_region_calculate_adistance(struct notifier_block *nb, > */ > static struct cxl_region *devm_cxl_add_region(struct cxl_root_decoder *cxlrd, > int id, > - enum cxl_decoder_mode mode, > + enum cxl_region_mode mode, > enum cxl_decoder_type type) > { > struct cxl_port *port = to_cxl_port(cxlrd->cxlsd.cxld.dev.parent); > @@ -2511,16 +2524,17 @@ static ssize_t create_ram_region_show(struct device *dev, > } > > static struct cxl_region *__create_region(struct cxl_root_decoder *cxlrd, > - enum cxl_decoder_mode mode, int id) > + enum cxl_region_mode mode, int id) > { > int rc; > > switch (mode) { > - case CXL_DECODER_RAM: > - case CXL_DECODER_PMEM: > + case CXL_REGION_RAM: > + case CXL_REGION_PMEM: > break; > default: > - dev_err(&cxlrd->cxlsd.cxld.dev, "unsupported mode %d\n", mode); > + dev_err(&cxlrd->cxlsd.cxld.dev, "unsupported mode %s\n", > + cxl_region_mode_name(mode)); > return ERR_PTR(-EINVAL); > } > > @@ -2548,7 +2562,7 @@ static ssize_t create_pmem_region_store(struct device *dev, > if (rc != 1) > return -EINVAL; > > - cxlr = __create_region(cxlrd, CXL_DECODER_PMEM, id); > + cxlr = __create_region(cxlrd, CXL_REGION_PMEM, id); > if (IS_ERR(cxlr)) > return PTR_ERR(cxlr); > > @@ -2568,7 +2582,7 @@ static ssize_t create_ram_region_store(struct device *dev, > if (rc != 1) > return -EINVAL; > > - cxlr = __create_region(cxlrd, CXL_DECODER_RAM, id); > + cxlr = __create_region(cxlrd, CXL_REGION_RAM, id); > if (IS_ERR(cxlr)) > return PTR_ERR(cxlr); > > @@ -3215,6 +3229,22 @@ static int match_region_by_range(struct device *dev, void *data) > return rc; > } > > +static enum cxl_region_mode > +cxl_decoder_to_region_mode(enum cxl_decoder_mode mode) > +{ > + switch (mode) { > + case CXL_DECODER_NONE: > + return CXL_REGION_NONE; > + case CXL_DECODER_RAM: > + return CXL_REGION_RAM; > + case CXL_DECODER_PMEM: > + return CXL_REGION_PMEM; > + case CXL_DECODER_MIXED: > + default: > + return CXL_REGION_MIXED; > + } > +} > + > /* Establish an empty region covering the given HPA range */ > static struct cxl_region *construct_region(struct cxl_root_decoder *cxlrd, > struct cxl_endpoint_decoder *cxled) > @@ -3223,12 +3253,17 @@ static struct cxl_region *construct_region(struct cxl_root_decoder *cxlrd, > struct cxl_port *port = cxlrd_to_port(cxlrd); > struct range *hpa = &cxled->cxld.hpa_range; > struct cxl_region_params *p; > + enum cxl_region_mode mode; > struct cxl_region *cxlr; > struct resource *res; > int rc; > > + if (cxled->mode == CXL_DECODER_DEAD) > + return ERR_PTR(-EINVAL); > + > + mode = cxl_decoder_to_region_mode(cxled->mode); > do { > - cxlr = __create_region(cxlrd, cxled->mode, > + cxlr = __create_region(cxlrd, mode, > atomic_read(&cxlrd->region_id)); > } while (IS_ERR(cxlr) && PTR_ERR(cxlr) == -EBUSY); > > @@ -3431,9 +3466,9 @@ static int cxl_region_probe(struct device *dev) > return rc; > > switch (cxlr->mode) { > - case CXL_DECODER_PMEM: > + case CXL_REGION_PMEM: > return devm_cxl_add_pmem_region(cxlr); > - case CXL_DECODER_RAM: > + case CXL_REGION_RAM: > /* > * The region can not be manged by CXL if any portion of > * it is already online as 'System RAM' > @@ -3445,8 +3480,8 @@ static int cxl_region_probe(struct device *dev) > return 0; > return devm_cxl_add_dax_region(cxlr); > default: > - dev_dbg(&cxlr->dev, "unsupported region mode: %d\n", > - cxlr->mode); > + dev_dbg(&cxlr->dev, "unsupported region mode: %s\n", > + cxl_region_mode_name(cxlr->mode)); > return -ENXIO; > } > } > diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h > index 0d8b810a51f0..5d74eb4ffab3 100644 > --- a/drivers/cxl/cxl.h > +++ b/drivers/cxl/cxl.h > @@ -388,6 +388,27 @@ static inline const char *cxl_decoder_mode_name(enum cxl_decoder_mode mode) > return "mixed"; > } > > +enum cxl_region_mode { > + CXL_REGION_NONE, > + CXL_REGION_RAM, > + CXL_REGION_PMEM, > + CXL_REGION_MIXED, > +}; > + > +static inline const char *cxl_region_mode_name(enum cxl_region_mode mode) > +{ > + static const char * const names[] = { > + [CXL_REGION_NONE] = "none", > + [CXL_REGION_RAM] = "ram", > + [CXL_REGION_PMEM] = "pmem", > + [CXL_REGION_MIXED] = "mixed", > + }; > + > + if (mode >= CXL_REGION_NONE && mode <= CXL_REGION_MIXED) > + return names[mode]; > + return "mixed"; > +} > + > /* > * Track whether this decoder is reserved for region autodiscovery, or > * free for userspace provisioning. > @@ -515,7 +536,8 @@ struct cxl_region_params { > * struct cxl_region - CXL region > * @dev: This region's device > * @id: This region's id. Id is globally unique across all regions > - * @mode: Endpoint decoder allocation / access mode > + * @mode: Region mode which defines which endpoint decoder modes the region is > + * compatible with > * @type: Endpoint decoder target type > * @cxl_nvb: nvdimm bridge for coordinating @cxlr_pmem setup / shutdown > * @cxlr_pmem: (for pmem regions) cached copy of the nvdimm bridge > @@ -528,7 +550,7 @@ struct cxl_region_params { > struct cxl_region { > struct device dev; > int id; > - enum cxl_decoder_mode mode; > + enum cxl_region_mode mode; > enum cxl_decoder_type type; > struct cxl_nvdimm_bridge *cxl_nvb; > struct cxl_pmem_region *cxlr_pmem; > > -- > 2.46.0 >
diff --git a/drivers/cxl/core/cdat.c b/drivers/cxl/core/cdat.c index 438869df241a..bd50bb655741 100644 --- a/drivers/cxl/core/cdat.c +++ b/drivers/cxl/core/cdat.c @@ -571,17 +571,17 @@ static bool dpa_perf_contains(struct cxl_dpa_perf *perf, } static struct cxl_dpa_perf *cxled_get_dpa_perf(struct cxl_endpoint_decoder *cxled, - enum cxl_decoder_mode mode) + enum cxl_region_mode mode) { struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); struct cxl_dpa_perf *perf; switch (mode) { - case CXL_DECODER_RAM: + case CXL_REGION_RAM: perf = &mds->ram_perf; break; - case CXL_DECODER_PMEM: + case CXL_REGION_PMEM: perf = &mds->pmem_perf; break; default: diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c index e701e4b04032..f3a56003edc1 100644 --- a/drivers/cxl/core/region.c +++ b/drivers/cxl/core/region.c @@ -144,7 +144,7 @@ static ssize_t uuid_show(struct device *dev, struct device_attribute *attr, rc = down_read_interruptible(&cxl_region_rwsem); if (rc) return rc; - if (cxlr->mode != CXL_DECODER_PMEM) + if (cxlr->mode != CXL_REGION_PMEM) rc = sysfs_emit(buf, "\n"); else rc = sysfs_emit(buf, "%pUb\n", &p->uuid); @@ -457,7 +457,7 @@ static umode_t cxl_region_visible(struct kobject *kobj, struct attribute *a, * Support tooling that expects to find a 'uuid' attribute for all * regions regardless of mode. */ - if (a == &dev_attr_uuid.attr && cxlr->mode != CXL_DECODER_PMEM) + if (a == &dev_attr_uuid.attr && cxlr->mode != CXL_REGION_PMEM) return 0444; return a->mode; } @@ -620,7 +620,7 @@ static ssize_t mode_show(struct device *dev, struct device_attribute *attr, { struct cxl_region *cxlr = to_cxl_region(dev); - return sysfs_emit(buf, "%s\n", cxl_decoder_mode_name(cxlr->mode)); + return sysfs_emit(buf, "%s\n", cxl_region_mode_name(cxlr->mode)); } static DEVICE_ATTR_RO(mode); @@ -646,7 +646,7 @@ static int alloc_hpa(struct cxl_region *cxlr, resource_size_t size) /* ways, granularity and uuid (if PMEM) need to be set before HPA */ if (!p->interleave_ways || !p->interleave_granularity || - (cxlr->mode == CXL_DECODER_PMEM && uuid_is_null(&p->uuid))) + (cxlr->mode == CXL_REGION_PMEM && uuid_is_null(&p->uuid))) return -ENXIO; div64_u64_rem(size, (u64)SZ_256M * p->interleave_ways, &remainder); @@ -1863,6 +1863,17 @@ static int cxl_region_sort_targets(struct cxl_region *cxlr) return rc; } +static bool cxl_modes_compatible(enum cxl_region_mode rmode, + enum cxl_decoder_mode dmode) +{ + if (rmode == CXL_REGION_RAM && dmode == CXL_DECODER_RAM) + return true; + if (rmode == CXL_REGION_PMEM && dmode == CXL_DECODER_PMEM) + return true; + + return false; +} + static int cxl_region_attach(struct cxl_region *cxlr, struct cxl_endpoint_decoder *cxled, int pos) { @@ -1882,9 +1893,11 @@ static int cxl_region_attach(struct cxl_region *cxlr, return rc; } - if (cxled->mode != cxlr->mode) { - dev_dbg(&cxlr->dev, "%s region mode: %d mismatch: %d\n", - dev_name(&cxled->cxld.dev), cxlr->mode, cxled->mode); + if (!cxl_modes_compatible(cxlr->mode, cxled->mode)) { + dev_dbg(&cxlr->dev, "%s region mode: %s mismatch decoder: %s\n", + dev_name(&cxled->cxld.dev), + cxl_region_mode_name(cxlr->mode), + cxl_decoder_mode_name(cxled->mode)); return -EINVAL; } @@ -2446,7 +2459,7 @@ static int cxl_region_calculate_adistance(struct notifier_block *nb, * devm_cxl_add_region - Adds a region to a decoder * @cxlrd: root decoder * @id: memregion id to create, or memregion_free() on failure - * @mode: mode for the endpoint decoders of this region + * @mode: mode of this region * @type: select whether this is an expander or accelerator (type-2 or type-3) * * This is the second step of region initialization. Regions exist within an @@ -2457,7 +2470,7 @@ static int cxl_region_calculate_adistance(struct notifier_block *nb, */ static struct cxl_region *devm_cxl_add_region(struct cxl_root_decoder *cxlrd, int id, - enum cxl_decoder_mode mode, + enum cxl_region_mode mode, enum cxl_decoder_type type) { struct cxl_port *port = to_cxl_port(cxlrd->cxlsd.cxld.dev.parent); @@ -2511,16 +2524,17 @@ static ssize_t create_ram_region_show(struct device *dev, } static struct cxl_region *__create_region(struct cxl_root_decoder *cxlrd, - enum cxl_decoder_mode mode, int id) + enum cxl_region_mode mode, int id) { int rc; switch (mode) { - case CXL_DECODER_RAM: - case CXL_DECODER_PMEM: + case CXL_REGION_RAM: + case CXL_REGION_PMEM: break; default: - dev_err(&cxlrd->cxlsd.cxld.dev, "unsupported mode %d\n", mode); + dev_err(&cxlrd->cxlsd.cxld.dev, "unsupported mode %s\n", + cxl_region_mode_name(mode)); return ERR_PTR(-EINVAL); } @@ -2548,7 +2562,7 @@ static ssize_t create_pmem_region_store(struct device *dev, if (rc != 1) return -EINVAL; - cxlr = __create_region(cxlrd, CXL_DECODER_PMEM, id); + cxlr = __create_region(cxlrd, CXL_REGION_PMEM, id); if (IS_ERR(cxlr)) return PTR_ERR(cxlr); @@ -2568,7 +2582,7 @@ static ssize_t create_ram_region_store(struct device *dev, if (rc != 1) return -EINVAL; - cxlr = __create_region(cxlrd, CXL_DECODER_RAM, id); + cxlr = __create_region(cxlrd, CXL_REGION_RAM, id); if (IS_ERR(cxlr)) return PTR_ERR(cxlr); @@ -3215,6 +3229,22 @@ static int match_region_by_range(struct device *dev, void *data) return rc; } +static enum cxl_region_mode +cxl_decoder_to_region_mode(enum cxl_decoder_mode mode) +{ + switch (mode) { + case CXL_DECODER_NONE: + return CXL_REGION_NONE; + case CXL_DECODER_RAM: + return CXL_REGION_RAM; + case CXL_DECODER_PMEM: + return CXL_REGION_PMEM; + case CXL_DECODER_MIXED: + default: + return CXL_REGION_MIXED; + } +} + /* Establish an empty region covering the given HPA range */ static struct cxl_region *construct_region(struct cxl_root_decoder *cxlrd, struct cxl_endpoint_decoder *cxled) @@ -3223,12 +3253,17 @@ static struct cxl_region *construct_region(struct cxl_root_decoder *cxlrd, struct cxl_port *port = cxlrd_to_port(cxlrd); struct range *hpa = &cxled->cxld.hpa_range; struct cxl_region_params *p; + enum cxl_region_mode mode; struct cxl_region *cxlr; struct resource *res; int rc; + if (cxled->mode == CXL_DECODER_DEAD) + return ERR_PTR(-EINVAL); + + mode = cxl_decoder_to_region_mode(cxled->mode); do { - cxlr = __create_region(cxlrd, cxled->mode, + cxlr = __create_region(cxlrd, mode, atomic_read(&cxlrd->region_id)); } while (IS_ERR(cxlr) && PTR_ERR(cxlr) == -EBUSY); @@ -3431,9 +3466,9 @@ static int cxl_region_probe(struct device *dev) return rc; switch (cxlr->mode) { - case CXL_DECODER_PMEM: + case CXL_REGION_PMEM: return devm_cxl_add_pmem_region(cxlr); - case CXL_DECODER_RAM: + case CXL_REGION_RAM: /* * The region can not be manged by CXL if any portion of * it is already online as 'System RAM' @@ -3445,8 +3480,8 @@ static int cxl_region_probe(struct device *dev) return 0; return devm_cxl_add_dax_region(cxlr); default: - dev_dbg(&cxlr->dev, "unsupported region mode: %d\n", - cxlr->mode); + dev_dbg(&cxlr->dev, "unsupported region mode: %s\n", + cxl_region_mode_name(cxlr->mode)); return -ENXIO; } } diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h index 0d8b810a51f0..5d74eb4ffab3 100644 --- a/drivers/cxl/cxl.h +++ b/drivers/cxl/cxl.h @@ -388,6 +388,27 @@ static inline const char *cxl_decoder_mode_name(enum cxl_decoder_mode mode) return "mixed"; } +enum cxl_region_mode { + CXL_REGION_NONE, + CXL_REGION_RAM, + CXL_REGION_PMEM, + CXL_REGION_MIXED, +}; + +static inline const char *cxl_region_mode_name(enum cxl_region_mode mode) +{ + static const char * const names[] = { + [CXL_REGION_NONE] = "none", + [CXL_REGION_RAM] = "ram", + [CXL_REGION_PMEM] = "pmem", + [CXL_REGION_MIXED] = "mixed", + }; + + if (mode >= CXL_REGION_NONE && mode <= CXL_REGION_MIXED) + return names[mode]; + return "mixed"; +} + /* * Track whether this decoder is reserved for region autodiscovery, or * free for userspace provisioning. @@ -515,7 +536,8 @@ struct cxl_region_params { * struct cxl_region - CXL region * @dev: This region's device * @id: This region's id. Id is globally unique across all regions - * @mode: Endpoint decoder allocation / access mode + * @mode: Region mode which defines which endpoint decoder modes the region is + * compatible with * @type: Endpoint decoder target type * @cxl_nvb: nvdimm bridge for coordinating @cxlr_pmem setup / shutdown * @cxlr_pmem: (for pmem regions) cached copy of the nvdimm bridge @@ -528,7 +550,7 @@ struct cxl_region_params { struct cxl_region { struct device dev; int id; - enum cxl_decoder_mode mode; + enum cxl_region_mode mode; enum cxl_decoder_type type; struct cxl_nvdimm_bridge *cxl_nvb; struct cxl_pmem_region *cxlr_pmem;