@@ -213,6 +213,22 @@ struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev)
}
EXPORT_SYMBOL_NS_GPL(to_cxl_nvdimm, CXL);
+static int match_cxl_nvdimm(struct device *dev, void *data)
+{
+ return is_cxl_nvdimm(dev);
+}
+
+struct cxl_nvdimm *cxl_find_nvdimm(struct cxl_memdev *cxlmd)
+{
+ struct device *dev;
+
+ dev = device_find_child(&cxlmd->dev, NULL, match_cxl_nvdimm);
+ if (!dev)
+ return NULL;
+ return to_cxl_nvdimm(dev);
+}
+EXPORT_SYMBOL_NS_GPL(cxl_find_nvdimm, CXL);
+
static struct cxl_nvdimm *cxl_nvdimm_alloc(struct cxl_memdev *cxlmd)
{
struct cxl_nvdimm *cxl_nvd;
@@ -434,6 +434,7 @@ bool is_cxl_nvdimm(struct device *dev);
bool is_cxl_nvdimm_bridge(struct device *dev);
int devm_cxl_add_nvdimm(struct device *host, struct cxl_memdev *cxlmd);
struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_memdev *cxlmd);
+struct cxl_nvdimm *cxl_find_nvdimm(struct cxl_memdev *cxlmd);
/*
* Unit test builds overrides this to __weak, find the 'strong' version
@@ -623,6 +623,52 @@ static int bind_region(struct cxl_region *region)
return rc;
}
+static int connect_to_libnvdimm(struct cxl_region *region)
+{
+ struct nd_region_desc ndr_desc;
+ struct cxl_nvdimm_bridge *nvb;
+ struct nd_region *ndr;
+ int rc = 0;
+
+ nvb = cxl_find_nvdimm_bridge(region->config.targets[0]);
+ device_lock(&nvb->dev);
+ if (!nvb->nvdimm_bus) {
+ rc = -ENXIO;
+ goto out;
+ }
+
+ memset(&ndr_desc, 0, sizeof(ndr_desc));
+
+ ndr_desc.res = region->res;
+
+ ndr_desc.numa_node = memory_add_physaddr_to_nid(region->res->start);
+ ndr_desc.target_node = phys_to_target_node(region->res->start);
+ if (ndr_desc.numa_node == NUMA_NO_NODE) {
+ ndr_desc.numa_node =
+ memory_add_physaddr_to_nid(region->res->start);
+ dev_info(®ion->dev,
+ "changing numa node from %d to %d for CXL region %pR",
+ NUMA_NO_NODE, ndr_desc.numa_node, region->res);
+ }
+ if (ndr_desc.target_node == NUMA_NO_NODE) {
+ ndr_desc.target_node = ndr_desc.numa_node;
+ dev_info(®ion->dev,
+ "changing target node from %d to %d for CXL region %pR",
+ NUMA_NO_NODE, ndr_desc.target_node, region->res);
+ }
+
+ ndr = nvdimm_pmem_region_create(nvb->nvdimm_bus, &ndr_desc);
+ if (IS_ERR(ndr))
+ rc = PTR_ERR(ndr);
+ else
+ dev_set_drvdata(®ion->dev, ndr);
+
+out:
+ device_unlock(&nvb->dev);
+ put_device(&nvb->dev);
+ return rc;
+}
+
static void region_unregister(void *dev)
{
struct cxl_region *region = to_cxl_region(dev);
@@ -704,6 +750,12 @@ static int cxl_region_probe(struct device *dev)
return ret;
}
+ ret = connect_to_libnvdimm(region);
+ if (ret) {
+ region_unregister(dev);
+ return ret;
+ }
+
region->active = true;
dev_info(dev, "Bound");
LIBNVDIMM supports the creation of regions for both persistent and volatile memory ranges. The cxl_region driver is capable of handling the CXL side of region creation but will reuse LIBVDIMM for interfacing with the rest of the kernel. TODO: CXL regions can go away. As a result the nd_region must also be torn down. TODO2: Handle mappings. LIBNVDIMM is capable of being informed about which parts of devices contribute to a region and validating whether or not the region is configured properly. To do this properly requires tracking allocations per device. Signed-off-by: Ben Widawsky <ben.widawsky@intel.com> --- drivers/cxl/core/pmem.c | 16 +++++++++++++ drivers/cxl/cxl.h | 1 + drivers/cxl/region.c | 52 +++++++++++++++++++++++++++++++++++++++++ 3 files changed, 69 insertions(+)