@@ -3,6 +3,7 @@
#include <linux/seq_file.h>
#include <linux/device.h>
#include <linux/delay.h>
+#include <cxl/cxl.h>
#include "cxlmem.h"
#include "core.h"
@@ -419,6 +420,7 @@ int cxl_dpa_free(struct cxl_endpoint_decoder *cxled)
up_write(&cxl_dpa_rwsem);
return rc;
}
+EXPORT_SYMBOL_NS_GPL(cxl_dpa_free, CXL);
int cxl_dpa_set_mode(struct cxl_endpoint_decoder *cxled,
enum cxl_decoder_mode mode)
@@ -457,31 +459,17 @@ int cxl_dpa_set_mode(struct cxl_endpoint_decoder *cxled,
return 0;
}
-int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size)
+static resource_size_t cxl_dpa_freespace(struct cxl_endpoint_decoder *cxled,
+ resource_size_t *start_out,
+ resource_size_t *skip_out)
{
struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
resource_size_t free_ram_start, free_pmem_start;
- struct cxl_port *port = cxled_to_port(cxled);
struct cxl_dev_state *cxlds = cxlmd->cxlds;
- struct device *dev = &cxled->cxld.dev;
resource_size_t start, avail, skip;
struct resource *p, *last;
- int rc;
-
- down_write(&cxl_dpa_rwsem);
- if (cxled->cxld.region) {
- dev_dbg(dev, "decoder attached to %s\n",
- dev_name(&cxled->cxld.region->dev));
- rc = -EBUSY;
- goto out;
- }
-
- if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
- dev_dbg(dev, "decoder enabled\n");
- rc = -EBUSY;
- goto out;
- }
+ lockdep_assert_held(&cxl_dpa_rwsem);
for (p = cxlds->ram_res.child, last = NULL; p; p = p->sibling)
last = p;
if (last)
@@ -518,14 +506,45 @@ int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size)
skip_end = start - 1;
skip = skip_end - skip_start + 1;
} else {
- dev_dbg(dev, "mode not set\n");
- rc = -EINVAL;
+ avail = 0;
+ }
+
+ if (!avail)
+ return 0;
+ if (start_out)
+ *start_out = start;
+ if (skip_out)
+ *skip_out = skip;
+ return avail;
+}
+
+int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size)
+{
+ struct cxl_port *port = cxled_to_port(cxled);
+ struct device *dev = &cxled->cxld.dev;
+ resource_size_t start, avail, skip;
+ int rc;
+
+ down_write(&cxl_dpa_rwsem);
+ if (cxled->cxld.region) {
+ dev_dbg(dev, "EBUSY, decoder attached to %s\n",
+ dev_name(&cxled->cxld.region->dev));
+ rc = -EBUSY;
+ goto out;
+ }
+
+ if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
+ dev_dbg(dev, "EBUSY, decoder enabled\n");
+ rc = -EBUSY;
goto out;
}
+ avail = cxl_dpa_freespace(cxled, &start, &skip);
+
if (size > avail) {
dev_dbg(dev, "%pa exceeds available %s capacity: %pa\n", &size,
- cxl_decoder_mode_name(cxled->mode), &avail);
+ cxled->mode == CXL_DECODER_RAM ? "ram" : "pmem",
+ &avail);
rc = -ENOSPC;
goto out;
}
@@ -540,6 +559,99 @@ int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size)
return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled);
}
+static int find_free_decoder(struct device *dev, void *data)
+{
+ struct cxl_endpoint_decoder *cxled;
+ struct cxl_port *port;
+
+ if (!is_endpoint_decoder(dev))
+ return 0;
+
+ cxled = to_cxl_endpoint_decoder(dev);
+ port = cxled_to_port(cxled);
+
+ if (cxled->cxld.id != port->hdm_end + 1)
+ return 0;
+
+ return 1;
+}
+
+/**
+ * cxl_request_dpa - search and reserve DPA given input constraints
+ * @endpoint: an endpoint port with available decoders
+ * @is_ram: DPA operation mode (ram vs pmem)
+ * @min: the minimum amount of capacity the call needs
+ * @max: extra capacity to allocate after min is satisfied
+ *
+ * Given that a region needs to allocate from limited HPA capacity it
+ * may be the case that a device has more mappable DPA capacity than
+ * available HPA. So, the expectation is that @min is a driver known
+ * value for how much capacity is needed, and @max is based the limit of
+ * how much HPA space is available for a new region.
+ *
+ * Returns a pinned cxl_decoder with at least @min bytes of capacity
+ * reserved, or an error pointer. The caller is also expected to own the
+ * lifetime of the memdev registration associated with the endpoint to
+ * pin the decoder registered as well.
+ */
+struct cxl_endpoint_decoder *cxl_request_dpa(struct cxl_memdev *cxlmd,
+ bool is_ram,
+ resource_size_t min,
+ resource_size_t max)
+{
+ struct cxl_port *endpoint = cxlmd->endpoint;
+ struct cxl_endpoint_decoder *cxled;
+ enum cxl_decoder_mode mode;
+ struct device *cxled_dev;
+ resource_size_t alloc;
+ int rc;
+
+ if (!IS_ALIGNED(min | max, SZ_256M))
+ return ERR_PTR(-EINVAL);
+
+ down_read(&cxl_dpa_rwsem);
+ cxled_dev = device_find_child(&endpoint->dev, NULL, find_free_decoder);
+ up_read(&cxl_dpa_rwsem);
+
+ if (!cxled_dev)
+ cxled = ERR_PTR(-ENXIO);
+ else
+ cxled = to_cxl_endpoint_decoder(cxled_dev);
+
+ if (!cxled || IS_ERR(cxled))
+ return cxled;
+
+ if (is_ram)
+ mode = CXL_DECODER_RAM;
+ else
+ mode = CXL_DECODER_PMEM;
+
+ rc = cxl_dpa_set_mode(cxled, mode);
+ if (rc)
+ goto err;
+
+ down_read(&cxl_dpa_rwsem);
+ alloc = cxl_dpa_freespace(cxled, NULL, NULL);
+ up_read(&cxl_dpa_rwsem);
+
+ if (max)
+ alloc = min(max, alloc);
+ if (alloc < min) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ rc = cxl_dpa_alloc(cxled, alloc);
+ if (rc)
+ goto err;
+
+ return cxled;
+err:
+ put_device(cxled_dev);
+ return ERR_PTR(rc);
+}
+EXPORT_SYMBOL_NS_GPL(cxl_request_dpa, CXL);
+
static void cxld_set_interleave(struct cxl_decoder *cxld, u32 *ctrl)
{
u16 eig;
@@ -55,4 +55,9 @@ struct cxl_port;
struct cxl_root_decoder *cxl_get_hpa_freespace(struct cxl_memdev *cxlmd,
unsigned long flags,
resource_size_t *max);
+struct cxl_endpoint_decoder *cxl_request_dpa(struct cxl_memdev *cxlmd,
+ bool is_ram,
+ resource_size_t min,
+ resource_size_t max);
+int cxl_dpa_free(struct cxl_endpoint_decoder *cxled);
#endif