diff mbox series

[v9,15/27] cxl: define a driver interface for HPA free space enumeration

Message ID 20241230214445.27602-16-alejandro.lucero-palau@amd.com
State New
Headers show
Series cxl: add type2 device basic support | expand

Commit Message

Lucero Palau, Alejandro Dec. 30, 2024, 9:44 p.m. UTC
From: Alejandro Lucero <alucerop@amd.com>

CXL region creation involves allocating capacity from device DPA
(device-physical-address space) and assigning it to decode a given HPA
(host-physical-address space). Before determining how much DPA to
allocate the amount of available HPA must be determined. Also, not all
HPA is created equal, some specifically targets RAM, some target PMEM,
some is prepared for device-memory flows like HDM-D and HDM-DB, and some
is host-only (HDM-H).

Wrap all of those concerns into an API that retrieves a root decoder
(platform CXL window) that fits the specified constraints and the
capacity available for a new region.

Based on https://lore.kernel.org/linux-cxl/168592159290.1948938.13522227102445462976.stgit@dwillia2-xfh.jf.intel.com/

Signed-off-by: Alejandro Lucero <alucerop@amd.com>
Co-developed-by: Dan Williams <dan.j.williams@intel.com>
---
 drivers/cxl/core/region.c | 155 ++++++++++++++++++++++++++++++++++++++
 drivers/cxl/cxl.h         |   3 +
 include/cxl/cxl.h         |   8 ++
 3 files changed, 166 insertions(+)

Comments

Jonathan Cameron Jan. 2, 2025, 3:10 p.m. UTC | #1
On Mon, 30 Dec 2024 21:44:33 +0000
<alejandro.lucero-palau@amd.com> wrote:

> From: Alejandro Lucero <alucerop@amd.com>
> 
> CXL region creation involves allocating capacity from device DPA
> (device-physical-address space) and assigning it to decode a given HPA
> (host-physical-address space). Before determining how much DPA to
> allocate the amount of available HPA must be determined. Also, not all
> HPA is created equal, some specifically targets RAM, some target PMEM,
> some is prepared for device-memory flows like HDM-D and HDM-DB, and some
> is host-only (HDM-H).
> 
> Wrap all of those concerns into an API that retrieves a root decoder
> (platform CXL window) that fits the specified constraints and the
> capacity available for a new region.
> 
> Based on https://lore.kernel.org/linux-cxl/168592159290.1948938.13522227102445462976.stgit@dwillia2-xfh.jf.intel.com/
> 
> Signed-off-by: Alejandro Lucero <alucerop@amd.com>
> Co-developed-by: Dan Williams <dan.j.williams@intel.com>
A couple of trivial things inline.

With those tidied up.

Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>

> ---
>  drivers/cxl/core/region.c | 155 ++++++++++++++++++++++++++++++++++++++
>  drivers/cxl/cxl.h         |   3 +
>  include/cxl/cxl.h         |   8 ++
>  3 files changed, 166 insertions(+)
> 
> diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
> index 967132b49832..239fe49bf6a6 100644
> --- a/drivers/cxl/core/region.c
> +++ b/drivers/cxl/core/region.c
> @@ -687,6 +687,161 @@ static int free_hpa(struct cxl_region *cxlr)
>  	return 0;
>  }
>  
> +struct cxlrd_max_context {
> +	struct device *host_bridge;
> +	unsigned long flags;
> +	resource_size_t max_hpa;
> +	struct cxl_root_decoder *cxlrd;
> +};
> +
> +static int find_max_hpa(struct device *dev, void *data)
> +{
> +	struct cxlrd_max_context *ctx = data;
> +	struct cxl_switch_decoder *cxlsd;
> +	struct cxl_root_decoder *cxlrd;
> +	struct resource *res, *prev;
> +	struct cxl_decoder *cxld;
> +	resource_size_t max;
> +
> +	if (!is_root_decoder(dev))
> +		return 0;
> +
> +	cxlrd = to_cxl_root_decoder(dev);
> +	cxlsd = &cxlrd->cxlsd;
> +	cxld = &cxlsd->cxld;
> +	if ((cxld->flags & ctx->flags) != ctx->flags) {
> +		dev_dbg(dev, "%s, flags not matching: %08lx vs %08lx\n",
> +			__func__, cxld->flags, ctx->flags);
Another __func__ to drop.
Check for other cases where dynamic debug applies.



> +
> +/**
> + * cxl_get_hpa_freespace - find a root decoder with free capacity per constraints
> + * @cxlmd: the CXL memory device with an endpoint that is mapped by the returned
> + *	    decoder
> + * @flags: CXL_DECODER_F flags for selecting RAM vs PMEM, and HDM-H vs HDM-D[B]
> + * @max_avail_contig: output parameter of max contiguous bytes available in the
> + *		      returned decoder
> + *
> + * The return tuple of a 'struct cxl_root_decoder' and 'bytes available given
> + * in (@max_avail_contig))' is a point in time snapshot. If by the time the
> + * caller goes to use this root decoder's capacity the capacity is reduced then
> + * caller needs to loop and retry.
> + *
> + * The returned root decoder has an elevated reference count that needs to be
> + * put with put_device(cxlrd_dev(cxlrd)). Locking context is with

put_device(CXLRD_DEV(cxlrd))

> + * cxl_{acquire,release}_endpoint(), that ensures removal of the root decoder
> + * does not race.
> + */
> +struct cxl_root_decoder *cxl_get_hpa_freespace(struct cxl_memdev *cxlmd,
> +					       unsigned long flags,
> +					       resource_size_t *max_avail_contig)
> +{
> +	struct cxl_port *endpoint = cxlmd->endpoint;
> +	struct cxlrd_max_context ctx = {
> +		.host_bridge = endpoint->host_bridge,
> +		.flags = flags,
> +	};
> +	struct cxl_port *root_port;
> +	struct cxl_root *root __free(put_cxl_root) = find_cxl_root(endpoint);
> +
> +	if (!is_cxl_endpoint(endpoint)) {
> +		dev_dbg(&endpoint->dev, "hpa requestor is not an endpoint\n");
> +		return ERR_PTR(-EINVAL);
> +	}
> +
> +	if (!root) {
> +		dev_dbg(&endpoint->dev, "endpoint can not be related to a root port\n");
> +		return ERR_PTR(-ENXIO);
> +	}
> +
> +	root_port = &root->port;
> +	down_read(&cxl_region_rwsem);
> +	device_for_each_child(&root_port->dev, &ctx, find_max_hpa);
> +	up_read(&cxl_region_rwsem);
> +
> +	if (!ctx.cxlrd)
> +		return ERR_PTR(-ENOMEM);
> +
> +	*max_avail_contig = ctx.max_hpa;
> +	return ctx.cxlrd;
> +}
> +EXPORT_SYMBOL_NS_GPL(cxl_get_hpa_freespace, "CXL");
> +
Alejandro Lucero Palau Jan. 3, 2025, 7:55 a.m. UTC | #2
On 1/2/25 15:10, Jonathan Cameron wrote:
> On Mon, 30 Dec 2024 21:44:33 +0000
> <alejandro.lucero-palau@amd.com> wrote:
>
>> From: Alejandro Lucero <alucerop@amd.com>
>>
>> CXL region creation involves allocating capacity from device DPA
>> (device-physical-address space) and assigning it to decode a given HPA
>> (host-physical-address space). Before determining how much DPA to
>> allocate the amount of available HPA must be determined. Also, not all
>> HPA is created equal, some specifically targets RAM, some target PMEM,
>> some is prepared for device-memory flows like HDM-D and HDM-DB, and some
>> is host-only (HDM-H).
>>
>> Wrap all of those concerns into an API that retrieves a root decoder
>> (platform CXL window) that fits the specified constraints and the
>> capacity available for a new region.
>>
>> Based on https://lore.kernel.org/linux-cxl/168592159290.1948938.13522227102445462976.stgit@dwillia2-xfh.jf.intel.com/
>>
>> Signed-off-by: Alejandro Lucero <alucerop@amd.com>
>> Co-developed-by: Dan Williams <dan.j.williams@intel.com>
> A couple of trivial things inline.
>
> With those tidied up.
>
> Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
>
>> ---
>>   drivers/cxl/core/region.c | 155 ++++++++++++++++++++++++++++++++++++++
>>   drivers/cxl/cxl.h         |   3 +
>>   include/cxl/cxl.h         |   8 ++
>>   3 files changed, 166 insertions(+)
>>
>> diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
>> index 967132b49832..239fe49bf6a6 100644
>> --- a/drivers/cxl/core/region.c
>> +++ b/drivers/cxl/core/region.c
>> @@ -687,6 +687,161 @@ static int free_hpa(struct cxl_region *cxlr)
>>   	return 0;
>>   }
>>   
>> +struct cxlrd_max_context {
>> +	struct device *host_bridge;
>> +	unsigned long flags;
>> +	resource_size_t max_hpa;
>> +	struct cxl_root_decoder *cxlrd;
>> +};
>> +
>> +static int find_max_hpa(struct device *dev, void *data)
>> +{
>> +	struct cxlrd_max_context *ctx = data;
>> +	struct cxl_switch_decoder *cxlsd;
>> +	struct cxl_root_decoder *cxlrd;
>> +	struct resource *res, *prev;
>> +	struct cxl_decoder *cxld;
>> +	resource_size_t max;
>> +
>> +	if (!is_root_decoder(dev))
>> +		return 0;
>> +
>> +	cxlrd = to_cxl_root_decoder(dev);
>> +	cxlsd = &cxlrd->cxlsd;
>> +	cxld = &cxlsd->cxld;
>> +	if ((cxld->flags & ctx->flags) != ctx->flags) {
>> +		dev_dbg(dev, "%s, flags not matching: %08lx vs %08lx\n",
>> +			__func__, cxld->flags, ctx->flags);
> Another __func__ to drop.
> Check for other cases where dynamic debug applies.
>

I'll do.


>
>> +
>> +/**
>> + * cxl_get_hpa_freespace - find a root decoder with free capacity per constraints
>> + * @cxlmd: the CXL memory device with an endpoint that is mapped by the returned
>> + *	    decoder
>> + * @flags: CXL_DECODER_F flags for selecting RAM vs PMEM, and HDM-H vs HDM-D[B]
>> + * @max_avail_contig: output parameter of max contiguous bytes available in the
>> + *		      returned decoder
>> + *
>> + * The return tuple of a 'struct cxl_root_decoder' and 'bytes available given
>> + * in (@max_avail_contig))' is a point in time snapshot. If by the time the
>> + * caller goes to use this root decoder's capacity the capacity is reduced then
>> + * caller needs to loop and retry.
>> + *
>> + * The returned root decoder has an elevated reference count that needs to be
>> + * put with put_device(cxlrd_dev(cxlrd)). Locking context is with
> put_device(CXLRD_DEV(cxlrd))


Right.

This helps to see the reference to cxl_acquire/release_endpoint needs to 
be dropped.

Thanks!


>> + * cxl_{acquire,release}_endpoint(), that ensures removal of the root decoder
>> + * does not race.
>> + */
>> +struct cxl_root_decoder *cxl_get_hpa_freespace(struct cxl_memdev *cxlmd,
>> +					       unsigned long flags,
>> +					       resource_size_t *max_avail_contig)
>> +{
>> +	struct cxl_port *endpoint = cxlmd->endpoint;
>> +	struct cxlrd_max_context ctx = {
>> +		.host_bridge = endpoint->host_bridge,
>> +		.flags = flags,
>> +	};
>> +	struct cxl_port *root_port;
>> +	struct cxl_root *root __free(put_cxl_root) = find_cxl_root(endpoint);
>> +
>> +	if (!is_cxl_endpoint(endpoint)) {
>> +		dev_dbg(&endpoint->dev, "hpa requestor is not an endpoint\n");
>> +		return ERR_PTR(-EINVAL);
>> +	}
>> +
>> +	if (!root) {
>> +		dev_dbg(&endpoint->dev, "endpoint can not be related to a root port\n");
>> +		return ERR_PTR(-ENXIO);
>> +	}
>> +
>> +	root_port = &root->port;
>> +	down_read(&cxl_region_rwsem);
>> +	device_for_each_child(&root_port->dev, &ctx, find_max_hpa);
>> +	up_read(&cxl_region_rwsem);
>> +
>> +	if (!ctx.cxlrd)
>> +		return ERR_PTR(-ENOMEM);
>> +
>> +	*max_avail_contig = ctx.max_hpa;
>> +	return ctx.cxlrd;
>> +}
>> +EXPORT_SYMBOL_NS_GPL(cxl_get_hpa_freespace, "CXL");
>> +
Dan Williams Jan. 18, 2025, 3:02 a.m. UTC | #3
alejandro.lucero-palau@ wrote:
> From: Alejandro Lucero <alucerop@amd.com>
> 
> CXL region creation involves allocating capacity from device DPA
> (device-physical-address space) and assigning it to decode a given HPA
> (host-physical-address space). Before determining how much DPA to
> allocate the amount of available HPA must be determined. Also, not all
> HPA is created equal, some specifically targets RAM, some target PMEM,
> some is prepared for device-memory flows like HDM-D and HDM-DB, and some
> is host-only (HDM-H).
> 
> Wrap all of those concerns into an API that retrieves a root decoder
> (platform CXL window) that fits the specified constraints and the
> capacity available for a new region.
> 
> Based on https://lore.kernel.org/linux-cxl/168592159290.1948938.13522227102445462976.stgit@dwillia2-xfh.jf.intel.com/

What needed changing such that you could not use the patch verbatim?
Then I can focus on that, although I am also critical of code I wrote
(like the DPA layout mess).

> Signed-off-by: Alejandro Lucero <alucerop@amd.com>
> Co-developed-by: Dan Williams <dan.j.williams@intel.com>

Include Signed-off-by: whenever including Co-developed-by

> ---
>  drivers/cxl/core/region.c | 155 ++++++++++++++++++++++++++++++++++++++
>  drivers/cxl/cxl.h         |   3 +
>  include/cxl/cxl.h         |   8 ++
>  3 files changed, 166 insertions(+)
> 
> diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
> index 967132b49832..239fe49bf6a6 100644
> --- a/drivers/cxl/core/region.c
> +++ b/drivers/cxl/core/region.c
> @@ -687,6 +687,161 @@ static int free_hpa(struct cxl_region *cxlr)
>  	return 0;
>  }
>  
> +struct cxlrd_max_context {
> +	struct device *host_bridge;
> +	unsigned long flags;
> +	resource_size_t max_hpa;
> +	struct cxl_root_decoder *cxlrd;
> +};
> +
> +static int find_max_hpa(struct device *dev, void *data)
> +{
> +	struct cxlrd_max_context *ctx = data;
> +	struct cxl_switch_decoder *cxlsd;
> +	struct cxl_root_decoder *cxlrd;
> +	struct resource *res, *prev;
> +	struct cxl_decoder *cxld;
> +	resource_size_t max;
> +
> +	if (!is_root_decoder(dev))
> +		return 0;
> +
> +	cxlrd = to_cxl_root_decoder(dev);
> +	cxlsd = &cxlrd->cxlsd;
> +	cxld = &cxlsd->cxld;
> +	if ((cxld->flags & ctx->flags) != ctx->flags) {
> +		dev_dbg(dev, "%s, flags not matching: %08lx vs %08lx\n",
> +			__func__, cxld->flags, ctx->flags);
> +		return 0;
> +	}
> +
> +	/*
> +	 * The CXL specs do not forbid an accelerator being part of an
> +	 * interleaved HPA range, but it is unlikely and because it simplifies
> +	 * the code, don´t allow it.
> +	 */
> +	if (cxld->interleave_ways != 1) {
> +		dev_dbg(dev, "interleave_ways not matching\n");
> +		return 0;
> +	}

Why does the core need to carry this quirk? If an accelerator does not
want to support interleaving then just don't ask for interleaved
capacity?

> +
> +	guard(rwsem_read)(&cxl_region_rwsem);

See below...

> +	if (ctx->host_bridge != cxlsd->target[0]->dport_dev) {
> +		dev_dbg(dev, "host bridge does not match\n");
> +		return 0;
> +	}
> +
> +	/*
> +	 * Walk the root decoder resource range relying on cxl_region_rwsem to
> +	 * preclude sibling arrival/departure and find the largest free space
> +	 * gap.
> +	 */
> +	lockdep_assert_held_read(&cxl_region_rwsem);

The lock was just acquired a few lines up, no need for extra lockdep
assertion paranoia. However, I think the lock belongs outside of this
function otherwise the iterator of region is racing region creation.
However2, cxl_get_hpa_freespace() is already holding the lock!

So, I am not sure this code path has ever been tested as lockdep should
complain about the double acquisition.

> +	max = 0;
> +	res = cxlrd->res->child;
> +
> +	/* With no resource child the whole parent resource is available */
> +	if (!res)
> +		max = resource_size(cxlrd->res);
> +	else
> +		max = 0;
> +
> +	for (prev = NULL; res; prev = res, res = res->sibling) {
> +		struct resource *next = res->sibling;
> +		resource_size_t free = 0;
> +
> +		/*
> +		 * Sanity check for preventing arithmetic problems below as a
> +		 * resource with size 0 could imply using the end field below
> +		 * when set to unsigned zero - 1 or all f in hex.
> +		 */
> +		if (prev && !resource_size(prev))
> +			continue;
> +
> +		if (!prev && res->start > cxlrd->res->start) {
> +			free = res->start - cxlrd->res->start;
> +			max = max(free, max);
> +		}
> +		if (prev && res->start > prev->end + 1) {
> +			free = res->start - prev->end + 1;
> +			max = max(free, max);
> +		}
> +		if (next && res->end + 1 < next->start) {
> +			free = next->start - res->end + 1;
> +			max = max(free, max);
> +		}
> +		if (!next && res->end + 1 < cxlrd->res->end + 1) {
> +			free = cxlrd->res->end + 1 - res->end + 1;
> +			max = max(free, max);
> +		}
> +	}
> +
> +	dev_dbg(CXLRD_DEV(cxlrd), "found %pa bytes of free space\n", &max);
> +	if (max > ctx->max_hpa) {
> +		if (ctx->cxlrd)
> +			put_device(CXLRD_DEV(ctx->cxlrd));

What drove capitalizing "cxlrd_dev"?

> +		get_device(CXLRD_DEV(cxlrd));
> +		ctx->cxlrd = cxlrd;
> +		ctx->max_hpa = max;
> +		dev_dbg(CXLRD_DEV(cxlrd), "found %pa bytes of free space\n",
> +			&max);
> +	}
> +	return 0;
> +}
> +
> +/**
> + * cxl_get_hpa_freespace - find a root decoder with free capacity per constraints
> + * @cxlmd: the CXL memory device with an endpoint that is mapped by the returned
> + *	    decoder
> + * @flags: CXL_DECODER_F flags for selecting RAM vs PMEM, and HDM-H vs HDM-D[B]
> + * @max_avail_contig: output parameter of max contiguous bytes available in the
> + *		      returned decoder
> + *
> + * The return tuple of a 'struct cxl_root_decoder' and 'bytes available given
> + * in (@max_avail_contig))' is a point in time snapshot. If by the time the
> + * caller goes to use this root decoder's capacity the capacity is reduced then
> + * caller needs to loop and retry.
> + *
> + * The returned root decoder has an elevated reference count that needs to be
> + * put with put_device(cxlrd_dev(cxlrd)). Locking context is with
> + * cxl_{acquire,release}_endpoint(), that ensures removal of the root decoder
> + * does not race.
> + */
> +struct cxl_root_decoder *cxl_get_hpa_freespace(struct cxl_memdev *cxlmd,
> +					       unsigned long flags,
> +					       resource_size_t *max_avail_contig)

I don't understand the rationale throwing away the ability to search
root decoders by additional constraints.

> +{
> +	struct cxl_port *endpoint = cxlmd->endpoint;
> +	struct cxlrd_max_context ctx = {
> +		.host_bridge = endpoint->host_bridge,
> +		.flags = flags,
> +	};
> +	struct cxl_port *root_port;
> +	struct cxl_root *root __free(put_cxl_root) = find_cxl_root(endpoint);
> +
> +	if (!is_cxl_endpoint(endpoint)) {
> +		dev_dbg(&endpoint->dev, "hpa requestor is not an endpoint\n");
> +		return ERR_PTR(-EINVAL);
> +	}
> +
> +	if (!root) {
> +		dev_dbg(&endpoint->dev, "endpoint can not be related to a root port\n");
> +		return ERR_PTR(-ENXIO);
> +	}
> +
> +	root_port = &root->port;
> +	down_read(&cxl_region_rwsem);
> +	device_for_each_child(&root_port->dev, &ctx, find_max_hpa);
> +	up_read(&cxl_region_rwsem);
> +
> +	if (!ctx.cxlrd)
> +		return ERR_PTR(-ENOMEM);
> +
> +	*max_avail_contig = ctx.max_hpa;
> +	return ctx.cxlrd;
> +}
> +EXPORT_SYMBOL_NS_GPL(cxl_get_hpa_freespace, "CXL");

Lets just do EXPORT_SYMBOL_GPL() for any API that an accelerator would
use. The symbol namespace was more for warning about potential semantic
shortcuts and liberties taken by drivers/cxl/ modules talking to each
other. Anything that is exported for outside of drivers/cxl/ usage
should not take those liberties.

> +
>  static ssize_t size_store(struct device *dev, struct device_attribute *attr,
>  			  const char *buf, size_t len)
>  {
> diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
> index a662b1b88408..efdd4627b774 100644
> --- a/drivers/cxl/cxl.h
> +++ b/drivers/cxl/cxl.h
> @@ -785,6 +785,9 @@ static inline void cxl_dport_init_ras_reporting(struct cxl_dport *dport,
>  struct cxl_decoder *to_cxl_decoder(struct device *dev);
>  struct cxl_root_decoder *to_cxl_root_decoder(struct device *dev);
>  struct cxl_switch_decoder *to_cxl_switch_decoder(struct device *dev);
> +
> +#define CXLRD_DEV(cxlrd) (&(cxlrd)->cxlsd.cxld.dev)

...oh, it's a macro now for some reason.

> +
>  struct cxl_endpoint_decoder *to_cxl_endpoint_decoder(struct device *dev);
>  bool is_root_decoder(struct device *dev);
>  bool is_switch_decoder(struct device *dev);
> diff --git a/include/cxl/cxl.h b/include/cxl/cxl.h
> index f7ce683465f0..4a8434a2b5da 100644
> --- a/include/cxl/cxl.h
> +++ b/include/cxl/cxl.h
> @@ -6,6 +6,10 @@
>  
>  #include <linux/ioport.h>
>  
> +#define CXL_DECODER_F_RAM   BIT(0)
> +#define CXL_DECODER_F_PMEM  BIT(1)
> +#define CXL_DECODER_F_TYPE2 BIT(2)
> +
>  enum cxl_resource {
>  	CXL_RES_DPA,
>  	CXL_RES_RAM,
> @@ -50,4 +54,8 @@ int cxl_release_resource(struct cxl_dev_state *cxlds, enum cxl_resource type);
>  void cxl_set_media_ready(struct cxl_dev_state *cxlds);
>  struct cxl_memdev *devm_cxl_add_memdev(struct device *host,
>  				       struct cxl_dev_state *cxlds);
> +struct cxl_port;
> +struct cxl_root_decoder *cxl_get_hpa_freespace(struct cxl_memdev *cxlmd,
> +					       unsigned long flags,
> +					       resource_size_t *max);

The name does not track for me, because nothing is acquired in this
function. It just surveys for a root decoder that meets the constraints.
It is possible that by the time the caller turns around to use that
freespace something else already grabbed it.
Alejandro Lucero Palau Jan. 20, 2025, 6:16 p.m. UTC | #4
On 1/18/25 03:02, Dan Williams wrote:
> alejandro.lucero-palau@ wrote:
>> From: Alejandro Lucero <alucerop@amd.com>
>>
>> CXL region creation involves allocating capacity from device DPA
>> (device-physical-address space) and assigning it to decode a given HPA
>> (host-physical-address space). Before determining how much DPA to
>> allocate the amount of available HPA must be determined. Also, not all
>> HPA is created equal, some specifically targets RAM, some target PMEM,
>> some is prepared for device-memory flows like HDM-D and HDM-DB, and some
>> is host-only (HDM-H).
>>
>> Wrap all of those concerns into an API that retrieves a root decoder
>> (platform CXL window) that fits the specified constraints and the
>> capacity available for a new region.
>>
>> Based on https://lore.kernel.org/linux-cxl/168592159290.1948938.13522227102445462976.stgit@dwillia2-xfh.jf.intel.com/
> What needed changing such that you could not use the patch verbatim?
> Then I can focus on that, although I am also critical of code I wrote
> (like the DPA layout mess).


One thing modified is related to that ugly double lock you found out below.

I do not remember what was the problem but the original code using 
sequential locks did not work for me.

More about this later.


>> Signed-off-by: Alejandro Lucero <alucerop@amd.com>
>> Co-developed-by: Dan Williams <dan.j.williams@intel.com>
> Include Signed-off-by: whenever including Co-developed-by


I'll do but it is weird. I think, at least in this case where the 
co-development means different times and not close cooperation, you 
should add it explicitly.


>
>> ---
>>   drivers/cxl/core/region.c | 155 ++++++++++++++++++++++++++++++++++++++
>>   drivers/cxl/cxl.h         |   3 +
>>   include/cxl/cxl.h         |   8 ++
>>   3 files changed, 166 insertions(+)
>>
>> diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
>> index 967132b49832..239fe49bf6a6 100644
>> --- a/drivers/cxl/core/region.c
>> +++ b/drivers/cxl/core/region.c
>> @@ -687,6 +687,161 @@ static int free_hpa(struct cxl_region *cxlr)
>>   	return 0;
>>   }
>>   
>> +struct cxlrd_max_context {
>> +	struct device *host_bridge;
>> +	unsigned long flags;
>> +	resource_size_t max_hpa;
>> +	struct cxl_root_decoder *cxlrd;
>> +};
>> +
>> +static int find_max_hpa(struct device *dev, void *data)
>> +{
>> +	struct cxlrd_max_context *ctx = data;
>> +	struct cxl_switch_decoder *cxlsd;
>> +	struct cxl_root_decoder *cxlrd;
>> +	struct resource *res, *prev;
>> +	struct cxl_decoder *cxld;
>> +	resource_size_t max;
>> +
>> +	if (!is_root_decoder(dev))
>> +		return 0;
>> +
>> +	cxlrd = to_cxl_root_decoder(dev);
>> +	cxlsd = &cxlrd->cxlsd;
>> +	cxld = &cxlsd->cxld;
>> +	if ((cxld->flags & ctx->flags) != ctx->flags) {
>> +		dev_dbg(dev, "%s, flags not matching: %08lx vs %08lx\n",
>> +			__func__, cxld->flags, ctx->flags);
>> +		return 0;
>> +	}
>> +
>> +	/*
>> +	 * The CXL specs do not forbid an accelerator being part of an
>> +	 * interleaved HPA range, but it is unlikely and because it simplifies
>> +	 * the code, don´t allow it.
>> +	 */
>> +	if (cxld->interleave_ways != 1) {
>> +		dev_dbg(dev, "interleave_ways not matching\n");
>> +		return 0;
>> +	}
> Why does the core need to carry this quirk? If an accelerator does not
> want to support interleaving then just don't ask for interleaved
> capacity?
>

I think it was suggested as a simplification for initial Type2 support.


>> +
>> +	guard(rwsem_read)(&cxl_region_rwsem);
> See below...
>
>> +	if (ctx->host_bridge != cxlsd->target[0]->dport_dev) {
>> +		dev_dbg(dev, "host bridge does not match\n");
>> +		return 0;
>> +	}
>> +
>> +	/*
>> +	 * Walk the root decoder resource range relying on cxl_region_rwsem to
>> +	 * preclude sibling arrival/departure and find the largest free space
>> +	 * gap.
>> +	 */
>> +	lockdep_assert_held_read(&cxl_region_rwsem);
> The lock was just acquired a few lines up, no need for extra lockdep
> assertion paranoia. However, I think the lock belongs outside of this
> function otherwise the iterator of region is racing region creation.
> However2, cxl_get_hpa_freespace() is already holding the lock!


You are right and  this is so obviously wrong ...

I think the problem is the adaptation of that initial patch with the 
seqlocks, and I ended up mixing things here.

I'll try to figure out why I had to adapt it and if I mistook the lock 
to use.


>
> So, I am not sure this code path has ever been tested as lockdep should
> complain about the double acquisition.


Oddly enough, it has been tested with two different drivers and with the 
kernel configuring lockdep.

It is worth to investigate ...


>
>> +	max = 0;
>> +	res = cxlrd->res->child;
>> +
>> +	/* With no resource child the whole parent resource is available */
>> +	if (!res)
>> +		max = resource_size(cxlrd->res);
>> +	else
>> +		max = 0;
>> +
>> +	for (prev = NULL; res; prev = res, res = res->sibling) {
>> +		struct resource *next = res->sibling;
>> +		resource_size_t free = 0;
>> +
>> +		/*
>> +		 * Sanity check for preventing arithmetic problems below as a
>> +		 * resource with size 0 could imply using the end field below
>> +		 * when set to unsigned zero - 1 or all f in hex.
>> +		 */
>> +		if (prev && !resource_size(prev))
>> +			continue;
>> +
>> +		if (!prev && res->start > cxlrd->res->start) {
>> +			free = res->start - cxlrd->res->start;
>> +			max = max(free, max);
>> +		}
>> +		if (prev && res->start > prev->end + 1) {
>> +			free = res->start - prev->end + 1;
>> +			max = max(free, max);
>> +		}
>> +		if (next && res->end + 1 < next->start) {
>> +			free = next->start - res->end + 1;
>> +			max = max(free, max);
>> +		}
>> +		if (!next && res->end + 1 < cxlrd->res->end + 1) {
>> +			free = cxlrd->res->end + 1 - res->end + 1;
>> +			max = max(free, max);
>> +		}
>> +	}
>> +
>> +	dev_dbg(CXLRD_DEV(cxlrd), "found %pa bytes of free space\n", &max);
>> +	if (max > ctx->max_hpa) {
>> +		if (ctx->cxlrd)
>> +			put_device(CXLRD_DEV(ctx->cxlrd));
> What drove capitalizing "cxlrd_dev"?
>
>> +		get_device(CXLRD_DEV(cxlrd));
>> +		ctx->cxlrd = cxlrd;
>> +		ctx->max_hpa = max;
>> +		dev_dbg(CXLRD_DEV(cxlrd), "found %pa bytes of free space\n",
>> +			&max);
>> +	}
>> +	return 0;
>> +}
>> +
>> +/**
>> + * cxl_get_hpa_freespace - find a root decoder with free capacity per constraints
>> + * @cxlmd: the CXL memory device with an endpoint that is mapped by the returned
>> + *	    decoder
>> + * @flags: CXL_DECODER_F flags for selecting RAM vs PMEM, and HDM-H vs HDM-D[B]
>> + * @max_avail_contig: output parameter of max contiguous bytes available in the
>> + *		      returned decoder
>> + *
>> + * The return tuple of a 'struct cxl_root_decoder' and 'bytes available given
>> + * in (@max_avail_contig))' is a point in time snapshot. If by the time the
>> + * caller goes to use this root decoder's capacity the capacity is reduced then
>> + * caller needs to loop and retry.
>> + *
>> + * The returned root decoder has an elevated reference count that needs to be
>> + * put with put_device(cxlrd_dev(cxlrd)). Locking context is with
>> + * cxl_{acquire,release}_endpoint(), that ensures removal of the root decoder
>> + * does not race.
>> + */
>> +struct cxl_root_decoder *cxl_get_hpa_freespace(struct cxl_memdev *cxlmd,
>> +					       unsigned long flags,
>> +					       resource_size_t *max_avail_contig)
> I don't understand the rationale throwing away the ability to search
> root decoders by additional constraints.


Not sure I follow you here. I think the constraints, set by the caller, 
is something to check for sure.


>> +{
>> +	struct cxl_port *endpoint = cxlmd->endpoint;
>> +	struct cxlrd_max_context ctx = {
>> +		.host_bridge = endpoint->host_bridge,
>> +		.flags = flags,
>> +	};
>> +	struct cxl_port *root_port;
>> +	struct cxl_root *root __free(put_cxl_root) = find_cxl_root(endpoint);
>> +
>> +	if (!is_cxl_endpoint(endpoint)) {
>> +		dev_dbg(&endpoint->dev, "hpa requestor is not an endpoint\n");
>> +		return ERR_PTR(-EINVAL);
>> +	}
>> +
>> +	if (!root) {
>> +		dev_dbg(&endpoint->dev, "endpoint can not be related to a root port\n");
>> +		return ERR_PTR(-ENXIO);
>> +	}
>> +
>> +	root_port = &root->port;
>> +	down_read(&cxl_region_rwsem);
>> +	device_for_each_child(&root_port->dev, &ctx, find_max_hpa);
>> +	up_read(&cxl_region_rwsem);
>> +
>> +	if (!ctx.cxlrd)
>> +		return ERR_PTR(-ENOMEM);
>> +
>> +	*max_avail_contig = ctx.max_hpa;
>> +	return ctx.cxlrd;
>> +}
>> +EXPORT_SYMBOL_NS_GPL(cxl_get_hpa_freespace, "CXL");
> Lets just do EXPORT_SYMBOL_GPL() for any API that an accelerator would
> use. The symbol namespace was more for warning about potential semantic
> shortcuts and liberties taken by drivers/cxl/ modules talking to each
> other. Anything that is exported for outside of drivers/cxl/ usage
> should not take those liberties.


OK


>
>> +
>>   static ssize_t size_store(struct device *dev, struct device_attribute *attr,
>>   			  const char *buf, size_t len)
>>   {
>> diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
>> index a662b1b88408..efdd4627b774 100644
>> --- a/drivers/cxl/cxl.h
>> +++ b/drivers/cxl/cxl.h
>> @@ -785,6 +785,9 @@ static inline void cxl_dport_init_ras_reporting(struct cxl_dport *dport,
>>   struct cxl_decoder *to_cxl_decoder(struct device *dev);
>>   struct cxl_root_decoder *to_cxl_root_decoder(struct device *dev);
>>   struct cxl_switch_decoder *to_cxl_switch_decoder(struct device *dev);
>> +
>> +#define CXLRD_DEV(cxlrd) (&(cxlrd)->cxlsd.cxld.dev)
> ...oh, it's a macro now for some reason.
>
>> +
>>   struct cxl_endpoint_decoder *to_cxl_endpoint_decoder(struct device *dev);
>>   bool is_root_decoder(struct device *dev);
>>   bool is_switch_decoder(struct device *dev);
>> diff --git a/include/cxl/cxl.h b/include/cxl/cxl.h
>> index f7ce683465f0..4a8434a2b5da 100644
>> --- a/include/cxl/cxl.h
>> +++ b/include/cxl/cxl.h
>> @@ -6,6 +6,10 @@
>>   
>>   #include <linux/ioport.h>
>>   
>> +#define CXL_DECODER_F_RAM   BIT(0)
>> +#define CXL_DECODER_F_PMEM  BIT(1)
>> +#define CXL_DECODER_F_TYPE2 BIT(2)
>> +
>>   enum cxl_resource {
>>   	CXL_RES_DPA,
>>   	CXL_RES_RAM,
>> @@ -50,4 +54,8 @@ int cxl_release_resource(struct cxl_dev_state *cxlds, enum cxl_resource type);
>>   void cxl_set_media_ready(struct cxl_dev_state *cxlds);
>>   struct cxl_memdev *devm_cxl_add_memdev(struct device *host,
>>   				       struct cxl_dev_state *cxlds);
>> +struct cxl_port;
>> +struct cxl_root_decoder *cxl_get_hpa_freespace(struct cxl_memdev *cxlmd,
>> +					       unsigned long flags,
>> +					       resource_size_t *max);
> The name does not track for me, because nothing is acquired in this
> function. It just surveys for a root decoder that meets the constraints.
> It is possible that by the time the caller turns around to use that
> freespace something else already grabbed it.


I'll think in a better name.

Thanks!
Alejandro Lucero Palau Jan. 21, 2025, 2 p.m. UTC | #5
On 1/20/25 18:16, Alejandro Lucero Palau wrote:
>
> On 1/18/25 03:02, Dan Williams wrote:
>> alejandro.lucero-palau@ wrote:
>>> From: Alejandro Lucero <alucerop@amd.com>
>>>
>>> CXL region creation involves allocating capacity from device DPA
>>> (device-physical-address space) and assigning it to decode a given HPA
>>> (host-physical-address space). Before determining how much DPA to
>>> allocate the amount of available HPA must be determined. Also, not all
>>> HPA is created equal, some specifically targets RAM, some target PMEM,
>>> some is prepared for device-memory flows like HDM-D and HDM-DB, and 
>>> some
>>> is host-only (HDM-H).
>>>
>>> Wrap all of those concerns into an API that retrieves a root decoder
>>> (platform CXL window) that fits the specified constraints and the
>>> capacity available for a new region.
>>>
>>> Based on 
>>> https://lore.kernel.org/linux-cxl/168592159290.1948938.13522227102445462976.stgit@dwillia2-xfh.jf.intel.com/
>> What needed changing such that you could not use the patch verbatim?
>> Then I can focus on that, although I am also critical of code I wrote
>> (like the DPA layout mess).
>
>
> One thing modified is related to that ugly double lock you found out 
> below.
>
> I do not remember what was the problem but the original code using 
> sequential locks did not work for me.
>
> More about this later.
>
>
>>> Signed-off-by: Alejandro Lucero <alucerop@amd.com>
>>> Co-developed-by: Dan Williams <dan.j.williams@intel.com>
>> Include Signed-off-by: whenever including Co-developed-by
>
>
> I'll do but it is weird. I think, at least in this case where the 
> co-development means different times and not close cooperation, you 
> should add it explicitly.
>
>
>>
>>> ---
>>>   drivers/cxl/core/region.c | 155 
>>> ++++++++++++++++++++++++++++++++++++++
>>>   drivers/cxl/cxl.h         |   3 +
>>>   include/cxl/cxl.h         |   8 ++
>>>   3 files changed, 166 insertions(+)
>>>
>>> diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
>>> index 967132b49832..239fe49bf6a6 100644
>>> --- a/drivers/cxl/core/region.c
>>> +++ b/drivers/cxl/core/region.c
>>> @@ -687,6 +687,161 @@ static int free_hpa(struct cxl_region *cxlr)
>>>       return 0;
>>>   }
>>>   +struct cxlrd_max_context {
>>> +    struct device *host_bridge;
>>> +    unsigned long flags;
>>> +    resource_size_t max_hpa;
>>> +    struct cxl_root_decoder *cxlrd;
>>> +};
>>> +
>>> +static int find_max_hpa(struct device *dev, void *data)
>>> +{
>>> +    struct cxlrd_max_context *ctx = data;
>>> +    struct cxl_switch_decoder *cxlsd;
>>> +    struct cxl_root_decoder *cxlrd;
>>> +    struct resource *res, *prev;
>>> +    struct cxl_decoder *cxld;
>>> +    resource_size_t max;
>>> +
>>> +    if (!is_root_decoder(dev))
>>> +        return 0;
>>> +
>>> +    cxlrd = to_cxl_root_decoder(dev);
>>> +    cxlsd = &cxlrd->cxlsd;
>>> +    cxld = &cxlsd->cxld;
>>> +    if ((cxld->flags & ctx->flags) != ctx->flags) {
>>> +        dev_dbg(dev, "%s, flags not matching: %08lx vs %08lx\n",
>>> +            __func__, cxld->flags, ctx->flags);
>>> +        return 0;
>>> +    }
>>> +
>>> +    /*
>>> +     * The CXL specs do not forbid an accelerator being part of an
>>> +     * interleaved HPA range, but it is unlikely and because it 
>>> simplifies
>>> +     * the code, don´t allow it.
>>> +     */
>>> +    if (cxld->interleave_ways != 1) {
>>> +        dev_dbg(dev, "interleave_ways not matching\n");
>>> +        return 0;
>>> +    }
>> Why does the core need to carry this quirk? If an accelerator does not
>> want to support interleaving then just don't ask for interleaved
>> capacity?
>>
>
> I think it was suggested as a simplification for initial Type2 support.
>
>
>>> +
>>> +    guard(rwsem_read)(&cxl_region_rwsem);
>> See below...
>>
>>> +    if (ctx->host_bridge != cxlsd->target[0]->dport_dev) {
>>> +        dev_dbg(dev, "host bridge does not match\n");
>>> +        return 0;
>>> +    }
>>> +
>>> +    /*
>>> +     * Walk the root decoder resource range relying on 
>>> cxl_region_rwsem to
>>> +     * preclude sibling arrival/departure and find the largest free 
>>> space
>>> +     * gap.
>>> +     */
>>> +    lockdep_assert_held_read(&cxl_region_rwsem);
>> The lock was just acquired a few lines up, no need for extra lockdep
>> assertion paranoia. However, I think the lock belongs outside of this
>> function otherwise the iterator of region is racing region creation.
>> However2, cxl_get_hpa_freespace() is already holding the lock!
>
>
> You are right and  this is so obviously wrong ...
>
> I think the problem is the adaptation of that initial patch with the 
> seqlocks, and I ended up mixing things here.
>
> I'll try to figure out why I had to adapt it and if I mistook the lock 
> to use.
>
>

After looking at the original code, the problem was the locking you used 
had to change since the target_lock field inside cxl_switch_decoder was 
removed after your patch before the time I took it over.


I did look at where a cxl_switch_decoder could be modified and I 
realized (guessing because I have not studied this in detail now) it 
could be enough grabbing the cxl_region_rwsem lock instead.

So I ended up adding that line for the locking without realizing it was 
already taken by the caller function.

The code morphed, because it was suggested current Type2 support should 
not support interleaving, for simplicity and because the use case did 
not require it, but the lock remained as the access to the 
cxl_switch_decoder linked to the endpoint is still there, although more 
simpler, and as a sanity check.


If we keep the simpler approach by now about forgetting interleaving, 
that code can just be dropped. If interleaving is a must, what I think 
it is not at this point, I should work on this asap.


>>
>> So, I am not sure this code path has ever been tested as lockdep should
>> complain about the double acquisition.
>
>
> Oddly enough, it has been tested with two different drivers and with 
> the kernel configuring lockdep.
>
> It is worth to investigate ...
>

Confirmed the double lock is not an issue. Maybe the code hidden in 
those macros is checking if the current caller is the same one that the 
current owner of the lock. I will check that or investigate further.

Thank you


>
>>
>>> +    max = 0;
>>> +    res = cxlrd->res->child;
>>> +
>>> +    /* With no resource child the whole parent resource is 
>>> available */
>>> +    if (!res)
>>> +        max = resource_size(cxlrd->res);
>>> +    else
>>> +        max = 0;
>>> +
>>> +    for (prev = NULL; res; prev = res, res = res->sibling) {
>>> +        struct resource *next = res->sibling;
>>> +        resource_size_t free = 0;
>>> +
>>> +        /*
>>> +         * Sanity check for preventing arithmetic problems below as a
>>> +         * resource with size 0 could imply using the end field below
>>> +         * when set to unsigned zero - 1 or all f in hex.
>>> +         */
>>> +        if (prev && !resource_size(prev))
>>> +            continue;
>>> +
>>> +        if (!prev && res->start > cxlrd->res->start) {
>>> +            free = res->start - cxlrd->res->start;
>>> +            max = max(free, max);
>>> +        }
>>> +        if (prev && res->start > prev->end + 1) {
>>> +            free = res->start - prev->end + 1;
>>> +            max = max(free, max);
>>> +        }
>>> +        if (next && res->end + 1 < next->start) {
>>> +            free = next->start - res->end + 1;
>>> +            max = max(free, max);
>>> +        }
>>> +        if (!next && res->end + 1 < cxlrd->res->end + 1) {
>>> +            free = cxlrd->res->end + 1 - res->end + 1;
>>> +            max = max(free, max);
>>> +        }
>>> +    }
>>> +
>>> +    dev_dbg(CXLRD_DEV(cxlrd), "found %pa bytes of free space\n", 
>>> &max);
>>> +    if (max > ctx->max_hpa) {
>>> +        if (ctx->cxlrd)
>>> +            put_device(CXLRD_DEV(ctx->cxlrd));
>> What drove capitalizing "cxlrd_dev"?
>>
>>> +        get_device(CXLRD_DEV(cxlrd));
>>> +        ctx->cxlrd = cxlrd;
>>> +        ctx->max_hpa = max;
>>> +        dev_dbg(CXLRD_DEV(cxlrd), "found %pa bytes of free space\n",
>>> +            &max);
>>> +    }
>>> +    return 0;
>>> +}
>>> +
>>> +/**
>>> + * cxl_get_hpa_freespace - find a root decoder with free capacity 
>>> per constraints
>>> + * @cxlmd: the CXL memory device with an endpoint that is mapped by 
>>> the returned
>>> + *        decoder
>>> + * @flags: CXL_DECODER_F flags for selecting RAM vs PMEM, and HDM-H 
>>> vs HDM-D[B]
>>> + * @max_avail_contig: output parameter of max contiguous bytes 
>>> available in the
>>> + *              returned decoder
>>> + *
>>> + * The return tuple of a 'struct cxl_root_decoder' and 'bytes 
>>> available given
>>> + * in (@max_avail_contig))' is a point in time snapshot. If by the 
>>> time the
>>> + * caller goes to use this root decoder's capacity the capacity is 
>>> reduced then
>>> + * caller needs to loop and retry.
>>> + *
>>> + * The returned root decoder has an elevated reference count that 
>>> needs to be
>>> + * put with put_device(cxlrd_dev(cxlrd)). Locking context is with
>>> + * cxl_{acquire,release}_endpoint(), that ensures removal of the 
>>> root decoder
>>> + * does not race.
>>> + */
>>> +struct cxl_root_decoder *cxl_get_hpa_freespace(struct cxl_memdev 
>>> *cxlmd,
>>> +                           unsigned long flags,
>>> +                           resource_size_t *max_avail_contig)
>> I don't understand the rationale throwing away the ability to search
>> root decoders by additional constraints.
>
>
> Not sure I follow you here. I think the constraints, set by the 
> caller, is something to check for sure.
>
>
>>> +{
>>> +    struct cxl_port *endpoint = cxlmd->endpoint;
>>> +    struct cxlrd_max_context ctx = {
>>> +        .host_bridge = endpoint->host_bridge,
>>> +        .flags = flags,
>>> +    };
>>> +    struct cxl_port *root_port;
>>> +    struct cxl_root *root __free(put_cxl_root) = 
>>> find_cxl_root(endpoint);
>>> +
>>> +    if (!is_cxl_endpoint(endpoint)) {
>>> +        dev_dbg(&endpoint->dev, "hpa requestor is not an endpoint\n");
>>> +        return ERR_PTR(-EINVAL);
>>> +    }
>>> +
>>> +    if (!root) {
>>> +        dev_dbg(&endpoint->dev, "endpoint can not be related to a 
>>> root port\n");
>>> +        return ERR_PTR(-ENXIO);
>>> +    }
>>> +
>>> +    root_port = &root->port;
>>> +    down_read(&cxl_region_rwsem);
>>> +    device_for_each_child(&root_port->dev, &ctx, find_max_hpa);
>>> +    up_read(&cxl_region_rwsem);
>>> +
>>> +    if (!ctx.cxlrd)
>>> +        return ERR_PTR(-ENOMEM);
>>> +
>>> +    *max_avail_contig = ctx.max_hpa;
>>> +    return ctx.cxlrd;
>>> +}
>>> +EXPORT_SYMBOL_NS_GPL(cxl_get_hpa_freespace, "CXL");
>> Lets just do EXPORT_SYMBOL_GPL() for any API that an accelerator would
>> use. The symbol namespace was more for warning about potential semantic
>> shortcuts and liberties taken by drivers/cxl/ modules talking to each
>> other. Anything that is exported for outside of drivers/cxl/ usage
>> should not take those liberties.
>
>
> OK
>
>
>>
>>> +
>>>   static ssize_t size_store(struct device *dev, struct 
>>> device_attribute *attr,
>>>                 const char *buf, size_t len)
>>>   {
>>> diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
>>> index a662b1b88408..efdd4627b774 100644
>>> --- a/drivers/cxl/cxl.h
>>> +++ b/drivers/cxl/cxl.h
>>> @@ -785,6 +785,9 @@ static inline void 
>>> cxl_dport_init_ras_reporting(struct cxl_dport *dport,
>>>   struct cxl_decoder *to_cxl_decoder(struct device *dev);
>>>   struct cxl_root_decoder *to_cxl_root_decoder(struct device *dev);
>>>   struct cxl_switch_decoder *to_cxl_switch_decoder(struct device *dev);
>>> +
>>> +#define CXLRD_DEV(cxlrd) (&(cxlrd)->cxlsd.cxld.dev)
>> ...oh, it's a macro now for some reason.
>>
>>> +
>>>   struct cxl_endpoint_decoder *to_cxl_endpoint_decoder(struct device 
>>> *dev);
>>>   bool is_root_decoder(struct device *dev);
>>>   bool is_switch_decoder(struct device *dev);
>>> diff --git a/include/cxl/cxl.h b/include/cxl/cxl.h
>>> index f7ce683465f0..4a8434a2b5da 100644
>>> --- a/include/cxl/cxl.h
>>> +++ b/include/cxl/cxl.h
>>> @@ -6,6 +6,10 @@
>>>     #include <linux/ioport.h>
>>>   +#define CXL_DECODER_F_RAM   BIT(0)
>>> +#define CXL_DECODER_F_PMEM  BIT(1)
>>> +#define CXL_DECODER_F_TYPE2 BIT(2)
>>> +
>>>   enum cxl_resource {
>>>       CXL_RES_DPA,
>>>       CXL_RES_RAM,
>>> @@ -50,4 +54,8 @@ int cxl_release_resource(struct cxl_dev_state 
>>> *cxlds, enum cxl_resource type);
>>>   void cxl_set_media_ready(struct cxl_dev_state *cxlds);
>>>   struct cxl_memdev *devm_cxl_add_memdev(struct device *host,
>>>                          struct cxl_dev_state *cxlds);
>>> +struct cxl_port;
>>> +struct cxl_root_decoder *cxl_get_hpa_freespace(struct cxl_memdev 
>>> *cxlmd,
>>> +                           unsigned long flags,
>>> +                           resource_size_t *max);
>> The name does not track for me, because nothing is acquired in this
>> function. It just surveys for a root decoder that meets the constraints.
>> It is possible that by the time the caller turns around to use that
>> freespace something else already grabbed it.
>
>
> I'll think in a better name.
>
> Thanks!
>
>
Dan Williams Jan. 21, 2025, 11:35 p.m. UTC | #6
Alejandro Lucero Palau wrote:
> 
> On 1/18/25 03:02, Dan Williams wrote:
> > alejandro.lucero-palau@ wrote:
> >> From: Alejandro Lucero <alucerop@amd.com>
> >>
> >> CXL region creation involves allocating capacity from device DPA
> >> (device-physical-address space) and assigning it to decode a given HPA
> >> (host-physical-address space). Before determining how much DPA to
> >> allocate the amount of available HPA must be determined. Also, not all
> >> HPA is created equal, some specifically targets RAM, some target PMEM,
> >> some is prepared for device-memory flows like HDM-D and HDM-DB, and some
> >> is host-only (HDM-H).
> >>
> >> Wrap all of those concerns into an API that retrieves a root decoder
> >> (platform CXL window) that fits the specified constraints and the
> >> capacity available for a new region.
> >>
> >> Based on https://lore.kernel.org/linux-cxl/168592159290.1948938.13522227102445462976.stgit@dwillia2-xfh.jf.intel.com/
[..]
> >> +/**
> >> + * cxl_get_hpa_freespace - find a root decoder with free capacity per constraints
> >> + * @cxlmd: the CXL memory device with an endpoint that is mapped by the returned
> >> + *	    decoder
> >> + * @flags: CXL_DECODER_F flags for selecting RAM vs PMEM, and HDM-H vs HDM-D[B]
> >> + * @max_avail_contig: output parameter of max contiguous bytes available in the
> >> + *		      returned decoder
> >> + *
> >> + * The return tuple of a 'struct cxl_root_decoder' and 'bytes available given
> >> + * in (@max_avail_contig))' is a point in time snapshot. If by the time the
> >> + * caller goes to use this root decoder's capacity the capacity is reduced then
> >> + * caller needs to loop and retry.
> >> + *
> >> + * The returned root decoder has an elevated reference count that needs to be
> >> + * put with put_device(cxlrd_dev(cxlrd)). Locking context is with
> >> + * cxl_{acquire,release}_endpoint(), that ensures removal of the root decoder
> >> + * does not race.
> >> + */
> >> +struct cxl_root_decoder *cxl_get_hpa_freespace(struct cxl_memdev *cxlmd,
> >> +					       unsigned long flags,
> >> +					       resource_size_t *max_avail_contig)
> > I don't understand the rationale throwing away the ability to search
> > root decoders by additional constraints.
> 
> Not sure I follow you here. I think the constraints, set by the caller, 
> is something to check for sure.

This is the original proposal:

struct cxl_root_decoder *cxl_hpa_freespace(struct cxl_port *endpoint,
					   struct device *const *host_bridges,
					   int interleave_ways,
					   unsigned long flags,
					   resource_size_t *max)

...it includes support for selecting a CXL window that might be
interleaved. The support for checking interleaved vs non-interleaved
windows is trivial. The support for PMEM region assembly requires
interleaved capacity search even if accelerators do not, so it is worth
including that small incremental support from day one.
Dan Williams Jan. 21, 2025, 11:44 p.m. UTC | #7
Alejandro Lucero Palau wrote:
[..]
> >> So, I am not sure this code path has ever been tested as lockdep should
> >> complain about the double acquisition.
> >
> >
> > Oddly enough, it has been tested with two different drivers and with 
> > the kernel configuring lockdep.
> >
> > It is worth to investigate ...
> >
> 
> Confirmed the double lock is not an issue. Maybe the code hidden in 
> those macros is checking if the current caller is the same one that the 
> current owner of the lock. I will check that or investigate further.

Are you sure?

This splat:

 ============================================
 WARNING: possible recursive locking detected
 6.13.0-rc2+ #68 Tainted: G           OE     
 --------------------------------------------
 cat/1212 is trying to acquire lock:
 ffffffffc0591cf0 (cxl_region_rwsem){++++}-{4:4}, at: decoders_committed_show+0x2a/0x90 [cxl_core]
 
 but task is already holding lock:
 ffffffffc0591cf0 (cxl_region_rwsem){++++}-{4:4}, at: decoders_committed_show+0x1e/0x90 [cxl_core]
 
 other info that might help us debug this:
  Possible unsafe locking scenario:
 
        CPU0
        ----
   lock(cxl_region_rwsem);
   lock(cxl_region_rwsem);
 
  *** DEADLOCK ***


...results from this change:

diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c
index 72950f631d49..9ebe9d46422b 100644
--- a/drivers/cxl/core/port.c
+++ b/drivers/cxl/core/port.c
@@ -560,9 +560,11 @@ static ssize_t decoders_committed_show(struct device *dev,
        struct cxl_port *port = to_cxl_port(dev);
        int rc;
 
+       down_read(&cxl_region_rwsem);
        down_read(&cxl_region_rwsem);
        rc = sysfs_emit(buf, "%d\n", cxl_num_decoders_committed(port));
        up_read(&cxl_region_rwsem);
+       up_read(&cxl_region_rwsem);
 
        return rc;
 }

...and "cat /sys/bus/cxl/devices/port*/decoders_committed".
Alejandro Lucero Palau Jan. 22, 2025, 9:26 a.m. UTC | #8
On 1/21/25 23:44, Dan Williams wrote:
> Alejandro Lucero Palau wrote:
> [..]
>>>> So, I am not sure this code path has ever been tested as lockdep should
>>>> complain about the double acquisition.
>>>
>>> Oddly enough, it has been tested with two different drivers and with
>>> the kernel configuring lockdep.
>>>
>>> It is worth to investigate ...
>>>
>> Confirmed the double lock is not an issue. Maybe the code hidden in
>> those macros is checking if the current caller is the same one that the
>> current owner of the lock. I will check that or investigate further.
> Are you sure?


I'm sure it does not seem a problem ... with only 
CONFIG_LOCKDEP_SUPPORT=y what was what I saw in a quick search in the 
kernel config file.

But it triggers as expected if the right configuration does exist at 
kernel hacking->Lock debugging ->*


Moreover, my comment yesterday about checking current vs owner does not 
make sense since it is one of the reasons to check ...


Happy you spotted it. As I said, I think no special lock is needed for 
the following code, but I'll double check before v10.

Thanks!


> This splat:
>
>   ============================================
>   WARNING: possible recursive locking detected
>   6.13.0-rc2+ #68 Tainted: G           OE
>   --------------------------------------------
>   cat/1212 is trying to acquire lock:
>   ffffffffc0591cf0 (cxl_region_rwsem){++++}-{4:4}, at: decoders_committed_show+0x2a/0x90 [cxl_core]
>   
>   but task is already holding lock:
>   ffffffffc0591cf0 (cxl_region_rwsem){++++}-{4:4}, at: decoders_committed_show+0x1e/0x90 [cxl_core]
>   
>   other info that might help us debug this:
>    Possible unsafe locking scenario:
>   
>          CPU0
>          ----
>     lock(cxl_region_rwsem);
>     lock(cxl_region_rwsem);
>   
>    *** DEADLOCK ***
>
>
> ...results from this change:
>
> diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c
> index 72950f631d49..9ebe9d46422b 100644
> --- a/drivers/cxl/core/port.c
> +++ b/drivers/cxl/core/port.c
> @@ -560,9 +560,11 @@ static ssize_t decoders_committed_show(struct device *dev,
>          struct cxl_port *port = to_cxl_port(dev);
>          int rc;
>   
> +       down_read(&cxl_region_rwsem);
>          down_read(&cxl_region_rwsem);
>          rc = sysfs_emit(buf, "%d\n", cxl_num_decoders_committed(port));
>          up_read(&cxl_region_rwsem);
> +       up_read(&cxl_region_rwsem);
>   
>          return rc;
>   }
>
> ...and "cat /sys/bus/cxl/devices/port*/decoders_committed".
diff mbox series

Patch

diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
index 967132b49832..239fe49bf6a6 100644
--- a/drivers/cxl/core/region.c
+++ b/drivers/cxl/core/region.c
@@ -687,6 +687,161 @@  static int free_hpa(struct cxl_region *cxlr)
 	return 0;
 }
 
+struct cxlrd_max_context {
+	struct device *host_bridge;
+	unsigned long flags;
+	resource_size_t max_hpa;
+	struct cxl_root_decoder *cxlrd;
+};
+
+static int find_max_hpa(struct device *dev, void *data)
+{
+	struct cxlrd_max_context *ctx = data;
+	struct cxl_switch_decoder *cxlsd;
+	struct cxl_root_decoder *cxlrd;
+	struct resource *res, *prev;
+	struct cxl_decoder *cxld;
+	resource_size_t max;
+
+	if (!is_root_decoder(dev))
+		return 0;
+
+	cxlrd = to_cxl_root_decoder(dev);
+	cxlsd = &cxlrd->cxlsd;
+	cxld = &cxlsd->cxld;
+	if ((cxld->flags & ctx->flags) != ctx->flags) {
+		dev_dbg(dev, "%s, flags not matching: %08lx vs %08lx\n",
+			__func__, cxld->flags, ctx->flags);
+		return 0;
+	}
+
+	/*
+	 * The CXL specs do not forbid an accelerator being part of an
+	 * interleaved HPA range, but it is unlikely and because it simplifies
+	 * the code, don´t allow it.
+	 */
+	if (cxld->interleave_ways != 1) {
+		dev_dbg(dev, "interleave_ways not matching\n");
+		return 0;
+	}
+
+	guard(rwsem_read)(&cxl_region_rwsem);
+	if (ctx->host_bridge != cxlsd->target[0]->dport_dev) {
+		dev_dbg(dev, "host bridge does not match\n");
+		return 0;
+	}
+
+	/*
+	 * Walk the root decoder resource range relying on cxl_region_rwsem to
+	 * preclude sibling arrival/departure and find the largest free space
+	 * gap.
+	 */
+	lockdep_assert_held_read(&cxl_region_rwsem);
+	max = 0;
+	res = cxlrd->res->child;
+
+	/* With no resource child the whole parent resource is available */
+	if (!res)
+		max = resource_size(cxlrd->res);
+	else
+		max = 0;
+
+	for (prev = NULL; res; prev = res, res = res->sibling) {
+		struct resource *next = res->sibling;
+		resource_size_t free = 0;
+
+		/*
+		 * Sanity check for preventing arithmetic problems below as a
+		 * resource with size 0 could imply using the end field below
+		 * when set to unsigned zero - 1 or all f in hex.
+		 */
+		if (prev && !resource_size(prev))
+			continue;
+
+		if (!prev && res->start > cxlrd->res->start) {
+			free = res->start - cxlrd->res->start;
+			max = max(free, max);
+		}
+		if (prev && res->start > prev->end + 1) {
+			free = res->start - prev->end + 1;
+			max = max(free, max);
+		}
+		if (next && res->end + 1 < next->start) {
+			free = next->start - res->end + 1;
+			max = max(free, max);
+		}
+		if (!next && res->end + 1 < cxlrd->res->end + 1) {
+			free = cxlrd->res->end + 1 - res->end + 1;
+			max = max(free, max);
+		}
+	}
+
+	dev_dbg(CXLRD_DEV(cxlrd), "found %pa bytes of free space\n", &max);
+	if (max > ctx->max_hpa) {
+		if (ctx->cxlrd)
+			put_device(CXLRD_DEV(ctx->cxlrd));
+		get_device(CXLRD_DEV(cxlrd));
+		ctx->cxlrd = cxlrd;
+		ctx->max_hpa = max;
+		dev_dbg(CXLRD_DEV(cxlrd), "found %pa bytes of free space\n",
+			&max);
+	}
+	return 0;
+}
+
+/**
+ * cxl_get_hpa_freespace - find a root decoder with free capacity per constraints
+ * @cxlmd: the CXL memory device with an endpoint that is mapped by the returned
+ *	    decoder
+ * @flags: CXL_DECODER_F flags for selecting RAM vs PMEM, and HDM-H vs HDM-D[B]
+ * @max_avail_contig: output parameter of max contiguous bytes available in the
+ *		      returned decoder
+ *
+ * The return tuple of a 'struct cxl_root_decoder' and 'bytes available given
+ * in (@max_avail_contig))' is a point in time snapshot. If by the time the
+ * caller goes to use this root decoder's capacity the capacity is reduced then
+ * caller needs to loop and retry.
+ *
+ * The returned root decoder has an elevated reference count that needs to be
+ * put with put_device(cxlrd_dev(cxlrd)). Locking context is with
+ * cxl_{acquire,release}_endpoint(), that ensures removal of the root decoder
+ * does not race.
+ */
+struct cxl_root_decoder *cxl_get_hpa_freespace(struct cxl_memdev *cxlmd,
+					       unsigned long flags,
+					       resource_size_t *max_avail_contig)
+{
+	struct cxl_port *endpoint = cxlmd->endpoint;
+	struct cxlrd_max_context ctx = {
+		.host_bridge = endpoint->host_bridge,
+		.flags = flags,
+	};
+	struct cxl_port *root_port;
+	struct cxl_root *root __free(put_cxl_root) = find_cxl_root(endpoint);
+
+	if (!is_cxl_endpoint(endpoint)) {
+		dev_dbg(&endpoint->dev, "hpa requestor is not an endpoint\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	if (!root) {
+		dev_dbg(&endpoint->dev, "endpoint can not be related to a root port\n");
+		return ERR_PTR(-ENXIO);
+	}
+
+	root_port = &root->port;
+	down_read(&cxl_region_rwsem);
+	device_for_each_child(&root_port->dev, &ctx, find_max_hpa);
+	up_read(&cxl_region_rwsem);
+
+	if (!ctx.cxlrd)
+		return ERR_PTR(-ENOMEM);
+
+	*max_avail_contig = ctx.max_hpa;
+	return ctx.cxlrd;
+}
+EXPORT_SYMBOL_NS_GPL(cxl_get_hpa_freespace, "CXL");
+
 static ssize_t size_store(struct device *dev, struct device_attribute *attr,
 			  const char *buf, size_t len)
 {
diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
index a662b1b88408..efdd4627b774 100644
--- a/drivers/cxl/cxl.h
+++ b/drivers/cxl/cxl.h
@@ -785,6 +785,9 @@  static inline void cxl_dport_init_ras_reporting(struct cxl_dport *dport,
 struct cxl_decoder *to_cxl_decoder(struct device *dev);
 struct cxl_root_decoder *to_cxl_root_decoder(struct device *dev);
 struct cxl_switch_decoder *to_cxl_switch_decoder(struct device *dev);
+
+#define CXLRD_DEV(cxlrd) (&(cxlrd)->cxlsd.cxld.dev)
+
 struct cxl_endpoint_decoder *to_cxl_endpoint_decoder(struct device *dev);
 bool is_root_decoder(struct device *dev);
 bool is_switch_decoder(struct device *dev);
diff --git a/include/cxl/cxl.h b/include/cxl/cxl.h
index f7ce683465f0..4a8434a2b5da 100644
--- a/include/cxl/cxl.h
+++ b/include/cxl/cxl.h
@@ -6,6 +6,10 @@ 
 
 #include <linux/ioport.h>
 
+#define CXL_DECODER_F_RAM   BIT(0)
+#define CXL_DECODER_F_PMEM  BIT(1)
+#define CXL_DECODER_F_TYPE2 BIT(2)
+
 enum cxl_resource {
 	CXL_RES_DPA,
 	CXL_RES_RAM,
@@ -50,4 +54,8 @@  int cxl_release_resource(struct cxl_dev_state *cxlds, enum cxl_resource type);
 void cxl_set_media_ready(struct cxl_dev_state *cxlds);
 struct cxl_memdev *devm_cxl_add_memdev(struct device *host,
 				       struct cxl_dev_state *cxlds);
+struct cxl_port;
+struct cxl_root_decoder *cxl_get_hpa_freespace(struct cxl_memdev *cxlmd,
+					       unsigned long flags,
+					       resource_size_t *max);
 #endif