Message ID | 20240715172835.24757-10-alejandro.lucero-palau@amd.com |
---|---|
State | Superseded |
Headers | show |
Series | cxl: add Type2 device support | expand |
Hi, kernel test robot noticed the following build warnings: [auto build test WARNING on linus/master] [also build test WARNING on v6.10 next-20240715] [cannot apply to cxl/next cxl/pending horms-ipvs/master] [If your patch is applied to the wrong git tree, kindly drop us a note. And when submitting patch, we suggest to use '--base' as documented in https://git-scm.com/docs/git-format-patch#_base_tree_information] url: https://github.com/intel-lab-lkp/linux/commits/alejandro-lucero-palau-amd-com/cxl-add-type2-device-basic-support/20240716-015920 base: linus/master patch link: https://lore.kernel.org/r/20240715172835.24757-10-alejandro.lucero-palau%40amd.com patch subject: [PATCH v2 09/15] cxl: define a driver interface for HPA free space enumaration config: i386-buildonly-randconfig-004-20240716 (https://download.01.org/0day-ci/archive/20240716/202407160818.7GrterxM-lkp@intel.com/config) compiler: clang version 18.1.5 (https://github.com/llvm/llvm-project 617a15a9eac96088ae5e9134248d8236e34b91b1) reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20240716/202407160818.7GrterxM-lkp@intel.com/reproduce) If you fix the issue in a separate patch/commit (i.e. not just a new version of the same patch/commit), kindly add following tags | Reported-by: kernel test robot <lkp@intel.com> | Closes: https://lore.kernel.org/oe-kbuild-all/202407160818.7GrterxM-lkp@intel.com/ All warnings (new ones prefixed by >>): In file included from drivers/net/ethernet/sfc/efx_cxl.c:17: drivers/net/ethernet/sfc/efx_cxl.h:11:9: warning: 'EFX_CXL_H' is used as a header guard here, followed by #define of a different macro [-Wheader-guard] 11 | #ifndef EFX_CXL_H | ^~~~~~~~~ drivers/net/ethernet/sfc/efx_cxl.h:12:9: note: 'EFX_CLX_H' is defined here; did you mean 'EFX_CXL_H'? 12 | #define EFX_CLX_H | ^~~~~~~~~ | EFX_CXL_H >> drivers/net/ethernet/sfc/efx_cxl.c:89:7: warning: format specifies type 'unsigned long long' but the argument has type 'resource_size_t' (aka 'unsigned int') [-Wformat] 88 | pci_info(pci_dev, "CXL accel not enough free HPA space %llu < %u\n", | ~~~~ | %u 89 | max, EFX_CTPIO_BUFFER_SIZE); | ^~~ include/linux/pci.h:2683:67: note: expanded from macro 'pci_info' 2683 | #define pci_info(pdev, fmt, arg...) dev_info(&(pdev)->dev, fmt, ##arg) | ~~~ ^~~ include/linux/dev_printk.h:160:67: note: expanded from macro 'dev_info' 160 | dev_printk_index_wrap(_dev_info, KERN_INFO, dev, dev_fmt(fmt), ##__VA_ARGS__) | ~~~ ^~~~~~~~~~~ include/linux/dev_printk.h:110:23: note: expanded from macro 'dev_printk_index_wrap' 110 | _p_func(dev, fmt, ##__VA_ARGS__); \ | ~~~ ^~~~~~~~~~~ 2 warnings generated. vim +89 drivers/net/ethernet/sfc/efx_cxl.c 15 16 #include "net_driver.h" > 17 #include "efx_cxl.h" 18 19 #define EFX_CTPIO_BUFFER_SIZE (1024*1024*256) 20 21 void efx_cxl_init(struct efx_nic *efx) 22 { 23 struct pci_dev *pci_dev = efx->pci_dev; 24 struct efx_cxl *cxl = efx->cxl; 25 resource_size_t max = 0; 26 struct resource res; 27 u16 dvsec; 28 29 dvsec = pci_find_dvsec_capability(pci_dev, PCI_VENDOR_ID_CXL, 30 CXL_DVSEC_PCIE_DEVICE); 31 32 if (!dvsec) 33 return; 34 35 pci_info(pci_dev, "CXL CXL_DVSEC_PCIE_DEVICE capability found"); 36 37 cxl->cxlds = cxl_accel_state_create(&pci_dev->dev, 38 CXL_ACCEL_DRIVER_CAP_HDM); 39 if (IS_ERR(cxl->cxlds)) { 40 pci_info(pci_dev, "CXL accel device state failed"); 41 return; 42 } 43 44 cxl_accel_set_dvsec(cxl->cxlds, dvsec); 45 cxl_accel_set_serial(cxl->cxlds, pci_dev->dev.id); 46 47 res = DEFINE_RES_MEM(0, EFX_CTPIO_BUFFER_SIZE); 48 cxl_accel_set_resource(cxl->cxlds, res, CXL_ACCEL_RES_DPA); 49 50 res = DEFINE_RES_MEM_NAMED(0, EFX_CTPIO_BUFFER_SIZE, "ram"); 51 cxl_accel_set_resource(cxl->cxlds, res, CXL_ACCEL_RES_RAM); 52 53 if (cxl_pci_accel_setup_regs(pci_dev, cxl->cxlds)) { 54 pci_info(pci_dev, "CXL accel setup regs failed"); 55 return; 56 } 57 58 if (cxl_accel_request_resource(cxl->cxlds, true)) 59 pci_info(pci_dev, "CXL accel resource request failed"); 60 61 if (!cxl_await_media_ready(cxl->cxlds)) { 62 cxl_accel_set_media_ready(cxl->cxlds); 63 } else { 64 pci_info(pci_dev, "CXL accel media not active"); 65 return; 66 } 67 68 cxl->cxlmd = devm_cxl_add_memdev(&pci_dev->dev, cxl->cxlds); 69 if (IS_ERR(cxl->cxlmd)) { 70 pci_info(pci_dev, "CXL accel memdev creation failed"); 71 return; 72 } 73 74 cxl->endpoint = cxl_acquire_endpoint(cxl->cxlmd); 75 if (IS_ERR(cxl->endpoint)) 76 pci_info(pci_dev, "CXL accel acquire endpoint failed"); 77 78 cxl->cxlrd = cxl_get_hpa_freespace(cxl->endpoint, 1, 79 CXL_DECODER_F_RAM | CXL_DECODER_F_TYPE2, 80 &max); 81 82 if (IS_ERR(cxl->cxlrd)) { 83 pci_info(pci_dev, "CXL accel get HPA failed"); 84 goto out; 85 } 86 87 if (max < EFX_CTPIO_BUFFER_SIZE) 88 pci_info(pci_dev, "CXL accel not enough free HPA space %llu < %u\n", > 89 max, EFX_CTPIO_BUFFER_SIZE); 90 out: 91 cxl_release_endpoint(cxl->cxlmd, cxl->endpoint); 92 } 93
On 7/16/2024 1:28 AM, alejandro.lucero-palau@amd.com wrote: > From: Alejandro Lucero <alucerop@amd.com> > > CXL region creation involves allocating capacity from device DPA > (device-physical-address space) and assigning it to decode a given HPA > (host-physical-address space). Before determining how much DPA to > allocate the amount of available HPA must be determined. Also, not all > HPA is create equal, some specifically targets RAM, some target PMEM, > some is prepared for device-memory flows like HDM-D and HDM-DB, and some > is host-only (HDM-H). > > Wrap all of those concerns into an API that retrieves a root decoder > (platform CXL window) that fits the specified constraints and the > capacity available for a new region. > > Based on https://lore.kernel.org/linux-cxl/168592149709.1948938.8663425987110396027.stgit@dwillia2-xfh.jf.intel.com/T/#m6fbe775541da3cd477d65fa95c8acdc347345b4f > > Signed-off-by: Alejandro Lucero <alucerop@amd.com> > Co-developed-by: Dan Williams <dan.j.williams@intel.com> > --- > drivers/cxl/core/region.c | 161 +++++++++++++++++++++++++++++ > drivers/cxl/cxl.h | 3 + > drivers/cxl/cxlmem.h | 5 + > drivers/net/ethernet/sfc/efx_cxl.c | 14 +++ > include/linux/cxl_accel_mem.h | 9 ++ > 5 files changed, 192 insertions(+) > > diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c > index 538ebd5a64fd..ca464bfef77b 100644 > --- a/drivers/cxl/core/region.c > +++ b/drivers/cxl/core/region.c > @@ -702,6 +702,167 @@ static int free_hpa(struct cxl_region *cxlr) > return 0; > } > > + > +struct cxlrd_max_context { > + struct device * const *host_bridges; > + int interleave_ways; > + unsigned long flags; > + resource_size_t max_hpa; > + struct cxl_root_decoder *cxlrd; > +}; > + > +static int find_max_hpa(struct device *dev, void *data) > +{ > + struct cxlrd_max_context *ctx = data; > + struct cxl_switch_decoder *cxlsd; > + struct cxl_root_decoder *cxlrd; > + struct resource *res, *prev; > + struct cxl_decoder *cxld; > + resource_size_t max; > + int found; > + > + if (!is_root_decoder(dev)) > + return 0; > + > + cxlrd = to_cxl_root_decoder(dev); > + cxld = &cxlrd->cxlsd.cxld; > + if ((cxld->flags & ctx->flags) != ctx->flags) { > + dev_dbg(dev, "find_max_hpa, flags not matching: %08lx vs %08lx\n", > + cxld->flags, ctx->flags); > + return 0; > + } > + > + /* A Host bridge could have more interleave ways than an > + * endpoint, couldn´t it? > + * > + * What does interleave ways mean here in terms of the requestor? > + * Why the FFMWS has 0 interleave ways but root port has 1? > + */ > + if (cxld->interleave_ways != ctx->interleave_ways) { > + dev_dbg(dev, "find_max_hpa, interleave_ways not matching\n"); > + return 0; > + } > + > + cxlsd = &cxlrd->cxlsd; > + > + guard(rwsem_read)(&cxl_region_rwsem); > + found = 0; > + for (int i = 0; i < ctx->interleave_ways; i++) > + for (int j = 0; j < ctx->interleave_ways; j++) > + if (ctx->host_bridges[i] == > + cxlsd->target[j]->dport_dev) { > + found++; > + break; > + } > + > + if (found != ctx->interleave_ways) { > + dev_dbg(dev, "find_max_hpa, no interleave_ways found\n"); > + return 0; > + } > + > + /* > + * Walk the root decoder resource range relying on cxl_region_rwsem to > + * preclude sibling arrival/departure and find the largest free space > + * gap. > + */ > + lockdep_assert_held_read(&cxl_region_rwsem); > + max = 0; > + res = cxlrd->res->child; > + if (!res) > + max = resource_size(cxlrd->res); > + else > + max = 0; > + > + for (prev = NULL; res; prev = res, res = res->sibling) { > + struct resource *next = res->sibling; > + resource_size_t free = 0; > + > + if (!prev && res->start > cxlrd->res->start) { > + free = res->start - cxlrd->res->start; > + max = max(free, max); > + } > + if (prev && res->start > prev->end + 1) { > + free = res->start - prev->end + 1; > + max = max(free, max); > + } > + if (next && res->end + 1 < next->start) { > + free = next->start - res->end + 1; > + max = max(free, max); > + } > + if (!next && res->end + 1 < cxlrd->res->end + 1) { > + free = cxlrd->res->end + 1 - res->end + 1; > + max = max(free, max); > + } > + } > + > + if (max > ctx->max_hpa) { > + if (ctx->cxlrd) > + put_device(CXLRD_DEV(ctx->cxlrd)); > + get_device(CXLRD_DEV(cxlrd)); > + ctx->cxlrd = cxlrd; > + ctx->max_hpa = max; > + dev_info(CXLRD_DEV(cxlrd), "found %pa bytes of free space\n", &max); > + } > + return 0; > +} > + > +/** > + * cxl_get_hpa_freespace - find a root decoder with free capacity per constraints > + * @endpoint: an endpoint that is mapped by the returned decoder > + * @interleave_ways: number of entries in @host_bridges > + * @flags: CXL_DECODER_F flags for selecting RAM vs PMEM, and HDM-H vs HDM-D[B] > + * @max: output parameter of bytes available in the returned decoder > + * > + * The return tuple of a 'struct cxl_root_decoder' and 'bytes available (@max)' > + * is a point in time snapshot. If by the time the caller goes to use this root > + * decoder's capacity the capacity is reduced then caller needs to loop and > + * retry. > + * > + * The returned root decoder has an elevated reference count that needs to be > + * put with put_device(cxlrd_dev(cxlrd)). Locking context is with > + * cxl_{acquire,release}_endpoint(), that ensures removal of the root decoder > + * does not race. > + */ > +struct cxl_root_decoder *cxl_get_hpa_freespace(struct cxl_port *endpoint, > + int interleave_ways, > + unsigned long flags, > + resource_size_t *max) > +{ > + > + struct cxlrd_max_context ctx = { > + .host_bridges = &endpoint->host_bridge, > + .interleave_ways = interleave_ways, > + .flags = flags, > + }; > + struct cxl_port *root_port; > + struct cxl_root *root; > + > + if (!is_cxl_endpoint(endpoint)) { > + dev_dbg(&endpoint->dev, "hpa requestor is not an endpoint\n"); > + return ERR_PTR(-EINVAL); > + } > + > + root = find_cxl_root(endpoint); Could use scope-based resource management __free() here to drop below put_device(&root_port->dev); e.g. struct cxl_root *cxl_root __free(put_cxl_root) = find_cxl_root(endpoint); > + if (!root) { > + dev_dbg(&endpoint->dev, "endpoint can not be related to a root port\n"); > + return ERR_PTR(-ENXIO); > + } > + > + root_port = &root->port; > + down_read(&cxl_region_rwsem); > + device_for_each_child(&root_port->dev, &ctx, find_max_hpa); > + up_read(&cxl_region_rwsem); > + put_device(&root_port->dev); > + > + if (!ctx.cxlrd) > + return ERR_PTR(-ENOMEM); > + > + *max = ctx.max_hpa; > + return ctx.cxlrd; > +} > +EXPORT_SYMBOL_NS_GPL(cxl_get_hpa_freespace, CXL); > + > + > static ssize_t size_store(struct device *dev, struct device_attribute *attr, > const char *buf, size_t len) > { > diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h > index 9973430d975f..d3fdd2c1e066 100644 > --- a/drivers/cxl/cxl.h > +++ b/drivers/cxl/cxl.h > @@ -770,6 +770,9 @@ struct cxl_decoder *to_cxl_decoder(struct device *dev); > struct cxl_root_decoder *to_cxl_root_decoder(struct device *dev); > struct cxl_switch_decoder *to_cxl_switch_decoder(struct device *dev); > struct cxl_endpoint_decoder *to_cxl_endpoint_decoder(struct device *dev); > + > +#define CXLRD_DEV(cxlrd) &cxlrd->cxlsd.cxld.dev > + > bool is_root_decoder(struct device *dev); > bool is_switch_decoder(struct device *dev); > bool is_endpoint_decoder(struct device *dev); > diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h > index 8f2a820bd92d..a0e0795ec064 100644 > --- a/drivers/cxl/cxlmem.h > +++ b/drivers/cxl/cxlmem.h > @@ -877,4 +877,9 @@ struct cxl_hdm { > struct seq_file; > struct dentry *cxl_debugfs_create_dir(const char *dir); > void cxl_dpa_debug(struct seq_file *file, struct cxl_dev_state *cxlds); > +struct cxl_root_decoder *cxl_get_hpa_freespace(struct cxl_port *endpoint, > + int interleave_ways, > + unsigned long flags, > + resource_size_t *max); > + > #endif /* __CXL_MEM_H__ */ > diff --git a/drivers/net/ethernet/sfc/efx_cxl.c b/drivers/net/ethernet/sfc/efx_cxl.c > index 2cf4837ddfc1..6d49571ccff7 100644 > --- a/drivers/net/ethernet/sfc/efx_cxl.c > +++ b/drivers/net/ethernet/sfc/efx_cxl.c > @@ -22,6 +22,7 @@ void efx_cxl_init(struct efx_nic *efx) > { > struct pci_dev *pci_dev = efx->pci_dev; > struct efx_cxl *cxl = efx->cxl; > + resource_size_t max = 0; > struct resource res; > u16 dvsec; > > @@ -74,6 +75,19 @@ void efx_cxl_init(struct efx_nic *efx) > if (IS_ERR(cxl->endpoint)) > pci_info(pci_dev, "CXL accel acquire endpoint failed"); > > + cxl->cxlrd = cxl_get_hpa_freespace(cxl->endpoint, 1, > + CXL_DECODER_F_RAM | CXL_DECODER_F_TYPE2, > + &max); > + > + if (IS_ERR(cxl->cxlrd)) { > + pci_info(pci_dev, "CXL accel get HPA failed"); > + goto out; > + } > + > + if (max < EFX_CTPIO_BUFFER_SIZE) > + pci_info(pci_dev, "CXL accel not enough free HPA space %llu < %u\n", > + max, EFX_CTPIO_BUFFER_SIZE); > +out: > cxl_release_endpoint(cxl->cxlmd, cxl->endpoint); > } > > diff --git a/include/linux/cxl_accel_mem.h b/include/linux/cxl_accel_mem.h > index 701910021df8..f3e77688ffe0 100644 > --- a/include/linux/cxl_accel_mem.h > +++ b/include/linux/cxl_accel_mem.h > @@ -6,6 +6,10 @@ > #ifndef __CXL_ACCEL_MEM_H > #define __CXL_ACCEL_MEM_H > > +#define CXL_DECODER_F_RAM BIT(0) > +#define CXL_DECODER_F_PMEM BIT(1) > +#define CXL_DECODER_F_TYPE2 BIT(2) > + > enum accel_resource{ > CXL_ACCEL_RES_DPA, > CXL_ACCEL_RES_RAM, > @@ -32,4 +36,9 @@ struct cxl_memdev *devm_cxl_add_memdev(struct device *host, > > struct cxl_port *cxl_acquire_endpoint(struct cxl_memdev *cxlmd); > void cxl_release_endpoint(struct cxl_memdev *cxlmd, struct cxl_port *endpoint); > + > +struct cxl_root_decoder *cxl_get_hpa_freespace(struct cxl_port *endpoint, > + int interleave_ways, > + unsigned long flags, > + resource_size_t *max); > #endif
On 7/16/24 07:06, Li, Ming4 wrote: > On 7/16/2024 1:28 AM, alejandro.lucero-palau@amd.com wrote: >> From: Alejandro Lucero <alucerop@amd.com> >> >> CXL region creation involves allocating capacity from device DPA >> (device-physical-address space) and assigning it to decode a given HPA >> (host-physical-address space). Before determining how much DPA to >> allocate the amount of available HPA must be determined. Also, not all >> HPA is create equal, some specifically targets RAM, some target PMEM, >> some is prepared for device-memory flows like HDM-D and HDM-DB, and some >> is host-only (HDM-H). >> >> Wrap all of those concerns into an API that retrieves a root decoder >> (platform CXL window) that fits the specified constraints and the >> capacity available for a new region. >> >> Based on https://lore.kernel.org/linux-cxl/168592149709.1948938.8663425987110396027.stgit@dwillia2-xfh.jf.intel.com/T/#m6fbe775541da3cd477d65fa95c8acdc347345b4f >> >> Signed-off-by: Alejandro Lucero <alucerop@amd.com> >> Co-developed-by: Dan Williams <dan.j.williams@intel.com> >> --- >> drivers/cxl/core/region.c | 161 +++++++++++++++++++++++++++++ >> drivers/cxl/cxl.h | 3 + >> drivers/cxl/cxlmem.h | 5 + >> drivers/net/ethernet/sfc/efx_cxl.c | 14 +++ >> include/linux/cxl_accel_mem.h | 9 ++ >> 5 files changed, 192 insertions(+) >> >> diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c >> index 538ebd5a64fd..ca464bfef77b 100644 >> --- a/drivers/cxl/core/region.c >> +++ b/drivers/cxl/core/region.c >> @@ -702,6 +702,167 @@ static int free_hpa(struct cxl_region *cxlr) >> return 0; >> } >> >> + >> +struct cxlrd_max_context { >> + struct device * const *host_bridges; >> + int interleave_ways; >> + unsigned long flags; >> + resource_size_t max_hpa; >> + struct cxl_root_decoder *cxlrd; >> +}; >> + >> +static int find_max_hpa(struct device *dev, void *data) >> +{ >> + struct cxlrd_max_context *ctx = data; >> + struct cxl_switch_decoder *cxlsd; >> + struct cxl_root_decoder *cxlrd; >> + struct resource *res, *prev; >> + struct cxl_decoder *cxld; >> + resource_size_t max; >> + int found; >> + >> + if (!is_root_decoder(dev)) >> + return 0; >> + >> + cxlrd = to_cxl_root_decoder(dev); >> + cxld = &cxlrd->cxlsd.cxld; >> + if ((cxld->flags & ctx->flags) != ctx->flags) { >> + dev_dbg(dev, "find_max_hpa, flags not matching: %08lx vs %08lx\n", >> + cxld->flags, ctx->flags); >> + return 0; >> + } >> + >> + /* A Host bridge could have more interleave ways than an >> + * endpoint, couldn´t it? >> + * >> + * What does interleave ways mean here in terms of the requestor? >> + * Why the FFMWS has 0 interleave ways but root port has 1? >> + */ >> + if (cxld->interleave_ways != ctx->interleave_ways) { >> + dev_dbg(dev, "find_max_hpa, interleave_ways not matching\n"); >> + return 0; >> + } >> + >> + cxlsd = &cxlrd->cxlsd; >> + >> + guard(rwsem_read)(&cxl_region_rwsem); >> + found = 0; >> + for (int i = 0; i < ctx->interleave_ways; i++) >> + for (int j = 0; j < ctx->interleave_ways; j++) >> + if (ctx->host_bridges[i] == >> + cxlsd->target[j]->dport_dev) { >> + found++; >> + break; >> + } >> + >> + if (found != ctx->interleave_ways) { >> + dev_dbg(dev, "find_max_hpa, no interleave_ways found\n"); >> + return 0; >> + } >> + >> + /* >> + * Walk the root decoder resource range relying on cxl_region_rwsem to >> + * preclude sibling arrival/departure and find the largest free space >> + * gap. >> + */ >> + lockdep_assert_held_read(&cxl_region_rwsem); >> + max = 0; >> + res = cxlrd->res->child; >> + if (!res) >> + max = resource_size(cxlrd->res); >> + else >> + max = 0; >> + >> + for (prev = NULL; res; prev = res, res = res->sibling) { >> + struct resource *next = res->sibling; >> + resource_size_t free = 0; >> + >> + if (!prev && res->start > cxlrd->res->start) { >> + free = res->start - cxlrd->res->start; >> + max = max(free, max); >> + } >> + if (prev && res->start > prev->end + 1) { >> + free = res->start - prev->end + 1; >> + max = max(free, max); >> + } >> + if (next && res->end + 1 < next->start) { >> + free = next->start - res->end + 1; >> + max = max(free, max); >> + } >> + if (!next && res->end + 1 < cxlrd->res->end + 1) { >> + free = cxlrd->res->end + 1 - res->end + 1; >> + max = max(free, max); >> + } >> + } >> + >> + if (max > ctx->max_hpa) { >> + if (ctx->cxlrd) >> + put_device(CXLRD_DEV(ctx->cxlrd)); >> + get_device(CXLRD_DEV(cxlrd)); >> + ctx->cxlrd = cxlrd; >> + ctx->max_hpa = max; >> + dev_info(CXLRD_DEV(cxlrd), "found %pa bytes of free space\n", &max); >> + } >> + return 0; >> +} >> + >> +/** >> + * cxl_get_hpa_freespace - find a root decoder with free capacity per constraints >> + * @endpoint: an endpoint that is mapped by the returned decoder >> + * @interleave_ways: number of entries in @host_bridges >> + * @flags: CXL_DECODER_F flags for selecting RAM vs PMEM, and HDM-H vs HDM-D[B] >> + * @max: output parameter of bytes available in the returned decoder >> + * >> + * The return tuple of a 'struct cxl_root_decoder' and 'bytes available (@max)' >> + * is a point in time snapshot. If by the time the caller goes to use this root >> + * decoder's capacity the capacity is reduced then caller needs to loop and >> + * retry. >> + * >> + * The returned root decoder has an elevated reference count that needs to be >> + * put with put_device(cxlrd_dev(cxlrd)). Locking context is with >> + * cxl_{acquire,release}_endpoint(), that ensures removal of the root decoder >> + * does not race. >> + */ >> +struct cxl_root_decoder *cxl_get_hpa_freespace(struct cxl_port *endpoint, >> + int interleave_ways, >> + unsigned long flags, >> + resource_size_t *max) >> +{ >> + >> + struct cxlrd_max_context ctx = { >> + .host_bridges = &endpoint->host_bridge, >> + .interleave_ways = interleave_ways, >> + .flags = flags, >> + }; >> + struct cxl_port *root_port; >> + struct cxl_root *root; >> + >> + if (!is_cxl_endpoint(endpoint)) { >> + dev_dbg(&endpoint->dev, "hpa requestor is not an endpoint\n"); >> + return ERR_PTR(-EINVAL); >> + } >> + >> + root = find_cxl_root(endpoint); > Could use scope-based resource management __free() here to drop below put_device(&root_port->dev); > > e.g. struct cxl_root *cxl_root __free(put_cxl_root) = find_cxl_root(endpoint); > I need to admit not familiar yet with scope-based macros, but I think these are different things. The scope of the pointer is inside this function, but the data referenced is likely to persist. get_device, inside find_cxl_root, is needed to avoid the device-related data disappearing while referenced by the code inside this function, and at the time of put_device, the data will be freed if ref counter reaches 0. Am I missing something? >> + if (!root) { >> + dev_dbg(&endpoint->dev, "endpoint can not be related to a root port\n"); >> + return ERR_PTR(-ENXIO); >> + } >> + >> + root_port = &root->port; >> + down_read(&cxl_region_rwsem); >> + device_for_each_child(&root_port->dev, &ctx, find_max_hpa); >> + up_read(&cxl_region_rwsem); >> + put_device(&root_port->dev); >> + >> + if (!ctx.cxlrd) >> + return ERR_PTR(-ENOMEM); >> + >> + *max = ctx.max_hpa; >> + return ctx.cxlrd; >> +} >> +EXPORT_SYMBOL_NS_GPL(cxl_get_hpa_freespace, CXL); >> + >> + >> static ssize_t size_store(struct device *dev, struct device_attribute *attr, >> const char *buf, size_t len) >> { >> diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h >> index 9973430d975f..d3fdd2c1e066 100644 >> --- a/drivers/cxl/cxl.h >> +++ b/drivers/cxl/cxl.h >> @@ -770,6 +770,9 @@ struct cxl_decoder *to_cxl_decoder(struct device *dev); >> struct cxl_root_decoder *to_cxl_root_decoder(struct device *dev); >> struct cxl_switch_decoder *to_cxl_switch_decoder(struct device *dev); >> struct cxl_endpoint_decoder *to_cxl_endpoint_decoder(struct device *dev); >> + >> +#define CXLRD_DEV(cxlrd) &cxlrd->cxlsd.cxld.dev >> + >> bool is_root_decoder(struct device *dev); >> bool is_switch_decoder(struct device *dev); >> bool is_endpoint_decoder(struct device *dev); >> diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h >> index 8f2a820bd92d..a0e0795ec064 100644 >> --- a/drivers/cxl/cxlmem.h >> +++ b/drivers/cxl/cxlmem.h >> @@ -877,4 +877,9 @@ struct cxl_hdm { >> struct seq_file; >> struct dentry *cxl_debugfs_create_dir(const char *dir); >> void cxl_dpa_debug(struct seq_file *file, struct cxl_dev_state *cxlds); >> +struct cxl_root_decoder *cxl_get_hpa_freespace(struct cxl_port *endpoint, >> + int interleave_ways, >> + unsigned long flags, >> + resource_size_t *max); >> + >> #endif /* __CXL_MEM_H__ */ >> diff --git a/drivers/net/ethernet/sfc/efx_cxl.c b/drivers/net/ethernet/sfc/efx_cxl.c >> index 2cf4837ddfc1..6d49571ccff7 100644 >> --- a/drivers/net/ethernet/sfc/efx_cxl.c >> +++ b/drivers/net/ethernet/sfc/efx_cxl.c >> @@ -22,6 +22,7 @@ void efx_cxl_init(struct efx_nic *efx) >> { >> struct pci_dev *pci_dev = efx->pci_dev; >> struct efx_cxl *cxl = efx->cxl; >> + resource_size_t max = 0; >> struct resource res; >> u16 dvsec; >> >> @@ -74,6 +75,19 @@ void efx_cxl_init(struct efx_nic *efx) >> if (IS_ERR(cxl->endpoint)) >> pci_info(pci_dev, "CXL accel acquire endpoint failed"); >> >> + cxl->cxlrd = cxl_get_hpa_freespace(cxl->endpoint, 1, >> + CXL_DECODER_F_RAM | CXL_DECODER_F_TYPE2, >> + &max); >> + >> + if (IS_ERR(cxl->cxlrd)) { >> + pci_info(pci_dev, "CXL accel get HPA failed"); >> + goto out; >> + } >> + >> + if (max < EFX_CTPIO_BUFFER_SIZE) >> + pci_info(pci_dev, "CXL accel not enough free HPA space %llu < %u\n", >> + max, EFX_CTPIO_BUFFER_SIZE); >> +out: >> cxl_release_endpoint(cxl->cxlmd, cxl->endpoint); >> } >> >> diff --git a/include/linux/cxl_accel_mem.h b/include/linux/cxl_accel_mem.h >> index 701910021df8..f3e77688ffe0 100644 >> --- a/include/linux/cxl_accel_mem.h >> +++ b/include/linux/cxl_accel_mem.h >> @@ -6,6 +6,10 @@ >> #ifndef __CXL_ACCEL_MEM_H >> #define __CXL_ACCEL_MEM_H >> >> +#define CXL_DECODER_F_RAM BIT(0) >> +#define CXL_DECODER_F_PMEM BIT(1) >> +#define CXL_DECODER_F_TYPE2 BIT(2) >> + >> enum accel_resource{ >> CXL_ACCEL_RES_DPA, >> CXL_ACCEL_RES_RAM, >> @@ -32,4 +36,9 @@ struct cxl_memdev *devm_cxl_add_memdev(struct device *host, >> >> struct cxl_port *cxl_acquire_endpoint(struct cxl_memdev *cxlmd); >> void cxl_release_endpoint(struct cxl_memdev *cxlmd, struct cxl_port *endpoint); >> + >> +struct cxl_root_decoder *cxl_get_hpa_freespace(struct cxl_port *endpoint, >> + int interleave_ways, >> + unsigned long flags, >> + resource_size_t *max); >> #endif >
On 7/24/2024 4:24 PM, Alejandro Lucero Palau wrote: > > On 7/16/24 07:06, Li, Ming4 wrote: >> On 7/16/2024 1:28 AM, alejandro.lucero-palau@amd.com wrote: >>> From: Alejandro Lucero <alucerop@amd.com> >>> >>> CXL region creation involves allocating capacity from device DPA >>> (device-physical-address space) and assigning it to decode a given HPA >>> (host-physical-address space). Before determining how much DPA to >>> allocate the amount of available HPA must be determined. Also, not all >>> HPA is create equal, some specifically targets RAM, some target PMEM, >>> some is prepared for device-memory flows like HDM-D and HDM-DB, and some >>> is host-only (HDM-H). >>> >>> Wrap all of those concerns into an API that retrieves a root decoder >>> (platform CXL window) that fits the specified constraints and the >>> capacity available for a new region. >>> >>> Based on https://lore.kernel.org/linux-cxl/168592149709.1948938.8663425987110396027.stgit@dwillia2-xfh.jf.intel.com/T/#m6fbe775541da3cd477d65fa95c8acdc347345b4f >>> >>> Signed-off-by: Alejandro Lucero <alucerop@amd.com> >>> Co-developed-by: Dan Williams <dan.j.williams@intel.com> >>> --- >>> drivers/cxl/core/region.c | 161 +++++++++++++++++++++++++++++ >>> drivers/cxl/cxl.h | 3 + >>> drivers/cxl/cxlmem.h | 5 + >>> drivers/net/ethernet/sfc/efx_cxl.c | 14 +++ >>> include/linux/cxl_accel_mem.h | 9 ++ >>> 5 files changed, 192 insertions(+) >>> >>> diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c >>> index 538ebd5a64fd..ca464bfef77b 100644 >>> --- a/drivers/cxl/core/region.c >>> +++ b/drivers/cxl/core/region.c >>> @@ -702,6 +702,167 @@ static int free_hpa(struct cxl_region *cxlr) >>> return 0; >>> } >>> + >>> +struct cxlrd_max_context { >>> + struct device * const *host_bridges; >>> + int interleave_ways; >>> + unsigned long flags; >>> + resource_size_t max_hpa; >>> + struct cxl_root_decoder *cxlrd; >>> +}; >>> + >>> +static int find_max_hpa(struct device *dev, void *data) >>> +{ >>> + struct cxlrd_max_context *ctx = data; >>> + struct cxl_switch_decoder *cxlsd; >>> + struct cxl_root_decoder *cxlrd; >>> + struct resource *res, *prev; >>> + struct cxl_decoder *cxld; >>> + resource_size_t max; >>> + int found; >>> + >>> + if (!is_root_decoder(dev)) >>> + return 0; >>> + >>> + cxlrd = to_cxl_root_decoder(dev); >>> + cxld = &cxlrd->cxlsd.cxld; >>> + if ((cxld->flags & ctx->flags) != ctx->flags) { >>> + dev_dbg(dev, "find_max_hpa, flags not matching: %08lx vs %08lx\n", >>> + cxld->flags, ctx->flags); >>> + return 0; >>> + } >>> + >>> + /* A Host bridge could have more interleave ways than an >>> + * endpoint, couldn´t it? >>> + * >>> + * What does interleave ways mean here in terms of the requestor? >>> + * Why the FFMWS has 0 interleave ways but root port has 1? >>> + */ >>> + if (cxld->interleave_ways != ctx->interleave_ways) { >>> + dev_dbg(dev, "find_max_hpa, interleave_ways not matching\n"); >>> + return 0; >>> + } >>> + >>> + cxlsd = &cxlrd->cxlsd; >>> + >>> + guard(rwsem_read)(&cxl_region_rwsem); >>> + found = 0; >>> + for (int i = 0; i < ctx->interleave_ways; i++) >>> + for (int j = 0; j < ctx->interleave_ways; j++) >>> + if (ctx->host_bridges[i] == >>> + cxlsd->target[j]->dport_dev) { >>> + found++; >>> + break; >>> + } >>> + >>> + if (found != ctx->interleave_ways) { >>> + dev_dbg(dev, "find_max_hpa, no interleave_ways found\n"); >>> + return 0; >>> + } >>> + >>> + /* >>> + * Walk the root decoder resource range relying on cxl_region_rwsem to >>> + * preclude sibling arrival/departure and find the largest free space >>> + * gap. >>> + */ >>> + lockdep_assert_held_read(&cxl_region_rwsem); >>> + max = 0; >>> + res = cxlrd->res->child; >>> + if (!res) >>> + max = resource_size(cxlrd->res); >>> + else >>> + max = 0; >>> + >>> + for (prev = NULL; res; prev = res, res = res->sibling) { >>> + struct resource *next = res->sibling; >>> + resource_size_t free = 0; >>> + >>> + if (!prev && res->start > cxlrd->res->start) { >>> + free = res->start - cxlrd->res->start; >>> + max = max(free, max); >>> + } >>> + if (prev && res->start > prev->end + 1) { >>> + free = res->start - prev->end + 1; >>> + max = max(free, max); >>> + } >>> + if (next && res->end + 1 < next->start) { >>> + free = next->start - res->end + 1; >>> + max = max(free, max); >>> + } >>> + if (!next && res->end + 1 < cxlrd->res->end + 1) { >>> + free = cxlrd->res->end + 1 - res->end + 1; >>> + max = max(free, max); >>> + } >>> + } >>> + >>> + if (max > ctx->max_hpa) { >>> + if (ctx->cxlrd) >>> + put_device(CXLRD_DEV(ctx->cxlrd)); >>> + get_device(CXLRD_DEV(cxlrd)); >>> + ctx->cxlrd = cxlrd; >>> + ctx->max_hpa = max; >>> + dev_info(CXLRD_DEV(cxlrd), "found %pa bytes of free space\n", &max); >>> + } >>> + return 0; >>> +} >>> + >>> +/** >>> + * cxl_get_hpa_freespace - find a root decoder with free capacity per constraints >>> + * @endpoint: an endpoint that is mapped by the returned decoder >>> + * @interleave_ways: number of entries in @host_bridges >>> + * @flags: CXL_DECODER_F flags for selecting RAM vs PMEM, and HDM-H vs HDM-D[B] >>> + * @max: output parameter of bytes available in the returned decoder >>> + * >>> + * The return tuple of a 'struct cxl_root_decoder' and 'bytes available (@max)' >>> + * is a point in time snapshot. If by the time the caller goes to use this root >>> + * decoder's capacity the capacity is reduced then caller needs to loop and >>> + * retry. >>> + * >>> + * The returned root decoder has an elevated reference count that needs to be >>> + * put with put_device(cxlrd_dev(cxlrd)). Locking context is with >>> + * cxl_{acquire,release}_endpoint(), that ensures removal of the root decoder >>> + * does not race. >>> + */ >>> +struct cxl_root_decoder *cxl_get_hpa_freespace(struct cxl_port *endpoint, >>> + int interleave_ways, >>> + unsigned long flags, >>> + resource_size_t *max) >>> +{ >>> + >>> + struct cxlrd_max_context ctx = { >>> + .host_bridges = &endpoint->host_bridge, >>> + .interleave_ways = interleave_ways, >>> + .flags = flags, >>> + }; >>> + struct cxl_port *root_port; >>> + struct cxl_root *root; >>> + >>> + if (!is_cxl_endpoint(endpoint)) { >>> + dev_dbg(&endpoint->dev, "hpa requestor is not an endpoint\n"); >>> + return ERR_PTR(-EINVAL); >>> + } >>> + >>> + root = find_cxl_root(endpoint); >> Could use scope-based resource management __free() here to drop below put_device(&root_port->dev); >> >> e.g. struct cxl_root *cxl_root __free(put_cxl_root) = find_cxl_root(endpoint); >> > > I need to admit not familiar yet with scope-based macros, but I think these are different things. The scope of the pointer is inside this function, but the data referenced is likely to persist. > > > get_device, inside find_cxl_root, is needed to avoid the device-related data disappearing while referenced by the code inside this function, and at the time of put_device, the data will be freed if ref counter reaches 0. Am I missing something? > Yes, get_device() is to avoid the device-related data disappearing, __free(put_cxl_root) will help to release the reference of cxl_root->port.dev when cxl_get_hpa_freespace() finished, so that you don't need a put_device(&root_port->dev) in the function. I think that your case is similar to this patch https://lore.kernel.org/all/170449247353.3779673.5963704495491343135.stgit@djiang5-mobl3/ > >>> + if (!root) { >>> + dev_dbg(&endpoint->dev, "endpoint can not be related to a root port\n"); >>> + return ERR_PTR(-ENXIO); >>> + } >>> + >>> + root_port = &root->port; >>> + down_read(&cxl_region_rwsem); >>> + device_for_each_child(&root_port->dev, &ctx, find_max_hpa); >>> + up_read(&cxl_region_rwsem); >>> + put_device(&root_port->dev); >>> + >>> + if (!ctx.cxlrd) >>> + return ERR_PTR(-ENOMEM); >>> + >>> + *max = ctx.max_hpa; >>> + return ctx.cxlrd; >>> +} >>> +EXPORT_SYMBOL_NS_GPL(cxl_get_hpa_freespace, CXL); >>> + >>> + >>> static ssize_t size_store(struct device *dev, struct device_attribute *attr, >>> const char *buf, size_t len) >>> { >>> diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h >>> index 9973430d975f..d3fdd2c1e066 100644 >>> --- a/drivers/cxl/cxl.h >>> +++ b/drivers/cxl/cxl.h >>> @@ -770,6 +770,9 @@ struct cxl_decoder *to_cxl_decoder(struct device *dev); >>> struct cxl_root_decoder *to_cxl_root_decoder(struct device *dev); >>> struct cxl_switch_decoder *to_cxl_switch_decoder(struct device *dev); >>> struct cxl_endpoint_decoder *to_cxl_endpoint_decoder(struct device *dev); >>> + >>> +#define CXLRD_DEV(cxlrd) &cxlrd->cxlsd.cxld.dev >>> + >>> bool is_root_decoder(struct device *dev); >>> bool is_switch_decoder(struct device *dev); >>> bool is_endpoint_decoder(struct device *dev); >>> diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h >>> index 8f2a820bd92d..a0e0795ec064 100644 >>> --- a/drivers/cxl/cxlmem.h >>> +++ b/drivers/cxl/cxlmem.h >>> @@ -877,4 +877,9 @@ struct cxl_hdm { >>> struct seq_file; >>> struct dentry *cxl_debugfs_create_dir(const char *dir); >>> void cxl_dpa_debug(struct seq_file *file, struct cxl_dev_state *cxlds); >>> +struct cxl_root_decoder *cxl_get_hpa_freespace(struct cxl_port *endpoint, >>> + int interleave_ways, >>> + unsigned long flags, >>> + resource_size_t *max); >>> + >>> #endif /* __CXL_MEM_H__ */ >>> diff --git a/drivers/net/ethernet/sfc/efx_cxl.c b/drivers/net/ethernet/sfc/efx_cxl.c >>> index 2cf4837ddfc1..6d49571ccff7 100644 >>> --- a/drivers/net/ethernet/sfc/efx_cxl.c >>> +++ b/drivers/net/ethernet/sfc/efx_cxl.c >>> @@ -22,6 +22,7 @@ void efx_cxl_init(struct efx_nic *efx) >>> { >>> struct pci_dev *pci_dev = efx->pci_dev; >>> struct efx_cxl *cxl = efx->cxl; >>> + resource_size_t max = 0; >>> struct resource res; >>> u16 dvsec; >>> @@ -74,6 +75,19 @@ void efx_cxl_init(struct efx_nic *efx) >>> if (IS_ERR(cxl->endpoint)) >>> pci_info(pci_dev, "CXL accel acquire endpoint failed"); >>> + cxl->cxlrd = cxl_get_hpa_freespace(cxl->endpoint, 1, >>> + CXL_DECODER_F_RAM | CXL_DECODER_F_TYPE2, >>> + &max); >>> + >>> + if (IS_ERR(cxl->cxlrd)) { >>> + pci_info(pci_dev, "CXL accel get HPA failed"); >>> + goto out; >>> + } >>> + >>> + if (max < EFX_CTPIO_BUFFER_SIZE) >>> + pci_info(pci_dev, "CXL accel not enough free HPA space %llu < %u\n", >>> + max, EFX_CTPIO_BUFFER_SIZE); >>> +out: >>> cxl_release_endpoint(cxl->cxlmd, cxl->endpoint); >>> } >>> diff --git a/include/linux/cxl_accel_mem.h b/include/linux/cxl_accel_mem.h >>> index 701910021df8..f3e77688ffe0 100644 >>> --- a/include/linux/cxl_accel_mem.h >>> +++ b/include/linux/cxl_accel_mem.h >>> @@ -6,6 +6,10 @@ >>> #ifndef __CXL_ACCEL_MEM_H >>> #define __CXL_ACCEL_MEM_H >>> +#define CXL_DECODER_F_RAM BIT(0) >>> +#define CXL_DECODER_F_PMEM BIT(1) >>> +#define CXL_DECODER_F_TYPE2 BIT(2) >>> + >>> enum accel_resource{ >>> CXL_ACCEL_RES_DPA, >>> CXL_ACCEL_RES_RAM, >>> @@ -32,4 +36,9 @@ struct cxl_memdev *devm_cxl_add_memdev(struct device *host, >>> struct cxl_port *cxl_acquire_endpoint(struct cxl_memdev *cxlmd); >>> void cxl_release_endpoint(struct cxl_memdev *cxlmd, struct cxl_port *endpoint); >>> + >>> +struct cxl_root_decoder *cxl_get_hpa_freespace(struct cxl_port *endpoint, >>> + int interleave_ways, >>> + unsigned long flags, >>> + resource_size_t *max); >>> #endif >> >
On 7/25/24 06:51, Li, Ming4 wrote: > On 7/24/2024 4:24 PM, Alejandro Lucero Palau wrote: >> On 7/16/24 07:06, Li, Ming4 wrote: >>> On 7/16/2024 1:28 AM, alejandro.lucero-palau@amd.com wrote: >>>> From: Alejandro Lucero <alucerop@amd.com> >>>> >>>> >>> Could use scope-based resource management __free() here to drop below put_device(&root_port->dev); >>> >>> e.g. struct cxl_root *cxl_root __free(put_cxl_root) = find_cxl_root(endpoint); >>> >> I need to admit not familiar yet with scope-based macros, but I think these are different things. The scope of the pointer is inside this function, but the data referenced is likely to persist. >> >> >> get_device, inside find_cxl_root, is needed to avoid the device-related data disappearing while referenced by the code inside this function, and at the time of put_device, the data will be freed if ref counter reaches 0. Am I missing something? >> > Yes, get_device() is to avoid the device-related data disappearing, __free(put_cxl_root) will help to release the reference of cxl_root->port.dev when cxl_get_hpa_freespace() finished, so that you don't need a put_device(&root_port->dev) in the function. > > I think that your case is similar to this patch > > https://lore.kernel.org/all/170449247353.3779673.5963704495491343135.stgit@djiang5-mobl3/ > OK. It makes sense. I was blinded assuming it was just about freeing memory, but the function to call for cleaning up can do other things as well. I will use it in next version. Thanks
On Mon, 15 Jul 2024 18:28:29 +0100 alejandro.lucero-palau@amd.com wrote: > From: Alejandro Lucero <alucerop@amd.com> > > CXL region creation involves allocating capacity from device DPA > (device-physical-address space) and assigning it to decode a given HPA > (host-physical-address space). Before determining how much DPA to > allocate the amount of available HPA must be determined. Also, not all > HPA is create equal, some specifically targets RAM, some target PMEM, > some is prepared for device-memory flows like HDM-D and HDM-DB, and some > is host-only (HDM-H). > > Wrap all of those concerns into an API that retrieves a root decoder > (platform CXL window) that fits the specified constraints and the > capacity available for a new region. > > Based on https://lore.kernel.org/linux-cxl/168592149709.1948938.8663425987110396027.stgit@dwillia2-xfh.jf.intel.com/T/#m6fbe775541da3cd477d65fa95c8acdc347345b4f > > Signed-off-by: Alejandro Lucero <alucerop@amd.com> > Co-developed-by: Dan Williams <dan.j.williams@intel.com> Hi. This seems a lot more complex than an accelerator would need. If plan is to use this in the type3 driver as well, I'd like to see that done as a precursor to the main series. If it only matters to accelerator drivers (as in type 3 I think we make this a userspace problem), then limit the code to handle interleave ways == 1 only. Maybe we will care about higher interleave in the long run, but do you have a multihead accelerator today? Jonathan > --- > drivers/cxl/core/region.c | 161 +++++++++++++++++++++++++++++ > drivers/cxl/cxl.h | 3 + > drivers/cxl/cxlmem.h | 5 + > drivers/net/ethernet/sfc/efx_cxl.c | 14 +++ > include/linux/cxl_accel_mem.h | 9 ++ > 5 files changed, 192 insertions(+) > > diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c > index 538ebd5a64fd..ca464bfef77b 100644 > --- a/drivers/cxl/core/region.c > +++ b/drivers/cxl/core/region.c > @@ -702,6 +702,167 @@ static int free_hpa(struct cxl_region *cxlr) > return 0; > } > > + > +struct cxlrd_max_context { > + struct device * const *host_bridges; > + int interleave_ways; > + unsigned long flags; > + resource_size_t max_hpa; > + struct cxl_root_decoder *cxlrd; > +}; > + > +static int find_max_hpa(struct device *dev, void *data) > +{ > + struct cxlrd_max_context *ctx = data; > + struct cxl_switch_decoder *cxlsd; > + struct cxl_root_decoder *cxlrd; > + struct resource *res, *prev; > + struct cxl_decoder *cxld; > + resource_size_t max; > + int found; > + > + if (!is_root_decoder(dev)) > + return 0; > + > + cxlrd = to_cxl_root_decoder(dev); > + cxld = &cxlrd->cxlsd.cxld; > + if ((cxld->flags & ctx->flags) != ctx->flags) { > + dev_dbg(dev, "find_max_hpa, flags not matching: %08lx vs %08lx\n", > + cxld->flags, ctx->flags); > + return 0; > + } > + > + /* A Host bridge could have more interleave ways than an > + * endpoint, couldn´t it? EP interleave ways is about working out how the full HPA address (it's all sent over the wire) is modified to get to the DPA. So it needs to know what the overall interleave is. Host bridge can't interleave and then have the EP not know about it. If there are switch HDM decoders in the path, the host bridge interleave may be less than that the EP needs to deal with. Does an accelerator actually cope with interleave? Is aim here to ensure that IW is never anything other than 1? Or is this meant to have more general use? I guess it is meant to. In which case, I'd like to see this used in the type3 driver as well. > + * > + * What does interleave ways mean here in terms of the requestor? > + * Why the FFMWS has 0 interleave ways but root port has 1? FFMWS? > + */ > + if (cxld->interleave_ways != ctx->interleave_ways) { > + dev_dbg(dev, "find_max_hpa, interleave_ways not matching\n"); > + return 0; > + } > + > + cxlsd = &cxlrd->cxlsd; > + > + guard(rwsem_read)(&cxl_region_rwsem); > + found = 0; > + for (int i = 0; i < ctx->interleave_ways; i++) > + for (int j = 0; j < ctx->interleave_ways; j++) > + if (ctx->host_bridges[i] == > + cxlsd->target[j]->dport_dev) { > + found++; > + break; > + } > + > + if (found != ctx->interleave_ways) { > + dev_dbg(dev, "find_max_hpa, no interleave_ways found\n"); > + return 0; > + } > + > + /* > + * Walk the root decoder resource range relying on cxl_region_rwsem to > + * preclude sibling arrival/departure and find the largest free space > + * gap. > + */ > + lockdep_assert_held_read(&cxl_region_rwsem); > + max = 0; > + res = cxlrd->res->child; > + if (!res) > + max = resource_size(cxlrd->res); > + else > + max = 0; > + > + for (prev = NULL; res; prev = res, res = res->sibling) { > + struct resource *next = res->sibling; > + resource_size_t free = 0; > + > + if (!prev && res->start > cxlrd->res->start) { > + free = res->start - cxlrd->res->start; > + max = max(free, max); > + } > + if (prev && res->start > prev->end + 1) { > + free = res->start - prev->end + 1; > + max = max(free, max); > + } > + if (next && res->end + 1 < next->start) { > + free = next->start - res->end + 1; > + max = max(free, max); > + } > + if (!next && res->end + 1 < cxlrd->res->end + 1) { > + free = cxlrd->res->end + 1 - res->end + 1; > + max = max(free, max); > + } > + } > + > + if (max > ctx->max_hpa) { > + if (ctx->cxlrd) > + put_device(CXLRD_DEV(ctx->cxlrd)); > + get_device(CXLRD_DEV(cxlrd)); > + ctx->cxlrd = cxlrd; > + ctx->max_hpa = max; > + dev_info(CXLRD_DEV(cxlrd), "found %pa bytes of free space\n", &max); dev_dbg() > + } > + return 0; > +} > + > +/** > + * cxl_get_hpa_freespace - find a root decoder with free capacity per constraints > + * @endpoint: an endpoint that is mapped by the returned decoder > + * @interleave_ways: number of entries in @host_bridges > + * @flags: CXL_DECODER_F flags for selecting RAM vs PMEM, and HDM-H vs HDM-D[B] > + * @max: output parameter of bytes available in the returned decoder @available_size or something along those lines. I'd expect max to be the end address of the available region > + * > + * The return tuple of a 'struct cxl_root_decoder' and 'bytes available (@max)' > + * is a point in time snapshot. If by the time the caller goes to use this root > + * decoder's capacity the capacity is reduced then caller needs to loop and > + * retry. > + * > + * The returned root decoder has an elevated reference count that needs to be > + * put with put_device(cxlrd_dev(cxlrd)). Locking context is with > + * cxl_{acquire,release}_endpoint(), that ensures removal of the root decoder > + * does not race. > + */ > +struct cxl_root_decoder *cxl_get_hpa_freespace(struct cxl_port *endpoint, > + int interleave_ways, > + unsigned long flags, > + resource_size_t *max) > +{ > + > + struct cxlrd_max_context ctx = { > + .host_bridges = &endpoint->host_bridge, > + .interleave_ways = interleave_ways, > + .flags = flags, > + }; > + struct cxl_port *root_port; > + struct cxl_root *root; > + > + if (!is_cxl_endpoint(endpoint)) { > + dev_dbg(&endpoint->dev, "hpa requestor is not an endpoint\n"); > + return ERR_PTR(-EINVAL); > + } > + > + root = find_cxl_root(endpoint); > + if (!root) { > + dev_dbg(&endpoint->dev, "endpoint can not be related to a root port\n"); > + return ERR_PTR(-ENXIO); > + } > + > + root_port = &root->port; > + down_read(&cxl_region_rwsem); > + device_for_each_child(&root_port->dev, &ctx, find_max_hpa); > + up_read(&cxl_region_rwsem); > + put_device(&root_port->dev); > + > + if (!ctx.cxlrd) > + return ERR_PTR(-ENOMEM); > + > + *max = ctx.max_hpa; Rename max_hpa to available_hpa. > + return ctx.cxlrd; > +} > +EXPORT_SYMBOL_NS_GPL(cxl_get_hpa_freespace, CXL); > + > +
On 8/4/24 18:57, Jonathan Cameron wrote: > On Mon, 15 Jul 2024 18:28:29 +0100 > alejandro.lucero-palau@amd.com wrote: > >> From: Alejandro Lucero <alucerop@amd.com> >> >> CXL region creation involves allocating capacity from device DPA >> (device-physical-address space) and assigning it to decode a given HPA >> (host-physical-address space). Before determining how much DPA to >> allocate the amount of available HPA must be determined. Also, not all >> HPA is create equal, some specifically targets RAM, some target PMEM, >> some is prepared for device-memory flows like HDM-D and HDM-DB, and some >> is host-only (HDM-H). >> >> Wrap all of those concerns into an API that retrieves a root decoder >> (platform CXL window) that fits the specified constraints and the >> capacity available for a new region. >> >> Based on https://lore.kernel.org/linux-cxl/168592149709.1948938.8663425987110396027.stgit@dwillia2-xfh.jf.intel.com/T/#m6fbe775541da3cd477d65fa95c8acdc347345b4f >> >> Signed-off-by: Alejandro Lucero <alucerop@amd.com> >> Co-developed-by: Dan Williams <dan.j.williams@intel.com> > Hi. > > This seems a lot more complex than an accelerator would need. > If plan is to use this in the type3 driver as well, I'd like to > see that done as a precursor to the main series. > If it only matters to accelerator drivers (as in type 3 I think > we make this a userspace problem), then limit the code to handle > interleave ways == 1 only. Maybe we will care about higher interleave > in the long run, but do you have a multihead accelerator today? I would say this is needed for Type3 as well but current support relies on user space requests. I think Type3 support uses the legacy implementation for memory devices where initially the requirements are quite similar, but I think where CXL is going requires less manual intervention or more automatic assisted manual intervention. I'll wait until Dan can comment on this one for sending it as a precursor or as part of the type2 support. Regarding the interleave, I know you are joking ... but who knows what the future will bring. O maybe I'm misunderstanding your comment, because in my view multi-head device and interleave are not directly related. Are they? I think you can have a single head and support interleaving, with multi-head implying different hosts and therefore different HPAs. > Jonathan > >> --- >> drivers/cxl/core/region.c | 161 +++++++++++++++++++++++++++++ >> drivers/cxl/cxl.h | 3 + >> drivers/cxl/cxlmem.h | 5 + >> drivers/net/ethernet/sfc/efx_cxl.c | 14 +++ >> include/linux/cxl_accel_mem.h | 9 ++ >> 5 files changed, 192 insertions(+) >> >> diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c >> index 538ebd5a64fd..ca464bfef77b 100644 >> --- a/drivers/cxl/core/region.c >> +++ b/drivers/cxl/core/region.c >> @@ -702,6 +702,167 @@ static int free_hpa(struct cxl_region *cxlr) >> return 0; >> } >> >> + >> +struct cxlrd_max_context { >> + struct device * const *host_bridges; >> + int interleave_ways; >> + unsigned long flags; >> + resource_size_t max_hpa; >> + struct cxl_root_decoder *cxlrd; >> +}; >> + >> +static int find_max_hpa(struct device *dev, void *data) >> +{ >> + struct cxlrd_max_context *ctx = data; >> + struct cxl_switch_decoder *cxlsd; >> + struct cxl_root_decoder *cxlrd; >> + struct resource *res, *prev; >> + struct cxl_decoder *cxld; >> + resource_size_t max; >> + int found; >> + >> + if (!is_root_decoder(dev)) >> + return 0; >> + >> + cxlrd = to_cxl_root_decoder(dev); >> + cxld = &cxlrd->cxlsd.cxld; >> + if ((cxld->flags & ctx->flags) != ctx->flags) { >> + dev_dbg(dev, "find_max_hpa, flags not matching: %08lx vs %08lx\n", >> + cxld->flags, ctx->flags); >> + return 0; >> + } >> + >> + /* A Host bridge could have more interleave ways than an >> + * endpoint, couldn´t it? > EP interleave ways is about working out how the full HPA address (it's > all sent over the wire) is modified to get to the DPA. So it needs > to know what the overall interleave is. Host bridge can't interleave > and then have the EP not know about it. If there are switch HDM decoders > in the path, the host bridge interleave may be less than that the EP needs > to deal with. > > Does an accelerator actually cope with interleave? Is aim here to ensure > that IW is never anything other than 1? Or is this meant to have > more general use? I guess it is meant to. In which case, I'd like to > see this used in the type3 driver as well. > >> + * >> + * What does interleave ways mean here in terms of the requestor? >> + * Why the FFMWS has 0 interleave ways but root port has 1? > FFMWS? > >> + */ >> + if (cxld->interleave_ways != ctx->interleave_ways) { >> + dev_dbg(dev, "find_max_hpa, interleave_ways not matching\n"); >> + return 0; >> + } >> + >> + cxlsd = &cxlrd->cxlsd; >> + >> + guard(rwsem_read)(&cxl_region_rwsem); >> + found = 0; >> + for (int i = 0; i < ctx->interleave_ways; i++) >> + for (int j = 0; j < ctx->interleave_ways; j++) >> + if (ctx->host_bridges[i] == >> + cxlsd->target[j]->dport_dev) { >> + found++; >> + break; >> + } >> + >> + if (found != ctx->interleave_ways) { >> + dev_dbg(dev, "find_max_hpa, no interleave_ways found\n"); >> + return 0; >> + } >> + >> + /* >> + * Walk the root decoder resource range relying on cxl_region_rwsem to >> + * preclude sibling arrival/departure and find the largest free space >> + * gap. >> + */ >> + lockdep_assert_held_read(&cxl_region_rwsem); >> + max = 0; >> + res = cxlrd->res->child; >> + if (!res) >> + max = resource_size(cxlrd->res); >> + else >> + max = 0; >> + >> + for (prev = NULL; res; prev = res, res = res->sibling) { >> + struct resource *next = res->sibling; >> + resource_size_t free = 0; >> + >> + if (!prev && res->start > cxlrd->res->start) { >> + free = res->start - cxlrd->res->start; >> + max = max(free, max); >> + } >> + if (prev && res->start > prev->end + 1) { >> + free = res->start - prev->end + 1; >> + max = max(free, max); >> + } >> + if (next && res->end + 1 < next->start) { >> + free = next->start - res->end + 1; >> + max = max(free, max); >> + } >> + if (!next && res->end + 1 < cxlrd->res->end + 1) { >> + free = cxlrd->res->end + 1 - res->end + 1; >> + max = max(free, max); >> + } >> + } >> + >> + if (max > ctx->max_hpa) { >> + if (ctx->cxlrd) >> + put_device(CXLRD_DEV(ctx->cxlrd)); >> + get_device(CXLRD_DEV(cxlrd)); >> + ctx->cxlrd = cxlrd; >> + ctx->max_hpa = max; >> + dev_info(CXLRD_DEV(cxlrd), "found %pa bytes of free space\n", &max); > dev_dbg() > >> + } >> + return 0; >> +} >> + >> +/** >> + * cxl_get_hpa_freespace - find a root decoder with free capacity per constraints >> + * @endpoint: an endpoint that is mapped by the returned decoder >> + * @interleave_ways: number of entries in @host_bridges >> + * @flags: CXL_DECODER_F flags for selecting RAM vs PMEM, and HDM-H vs HDM-D[B] >> + * @max: output parameter of bytes available in the returned decoder > @available_size > or something along those lines. I'd expect max to be the end address of the available > region > >> + * >> + * The return tuple of a 'struct cxl_root_decoder' and 'bytes available (@max)' >> + * is a point in time snapshot. If by the time the caller goes to use this root >> + * decoder's capacity the capacity is reduced then caller needs to loop and >> + * retry. >> + * >> + * The returned root decoder has an elevated reference count that needs to be >> + * put with put_device(cxlrd_dev(cxlrd)). Locking context is with >> + * cxl_{acquire,release}_endpoint(), that ensures removal of the root decoder >> + * does not race. >> + */ >> +struct cxl_root_decoder *cxl_get_hpa_freespace(struct cxl_port *endpoint, >> + int interleave_ways, >> + unsigned long flags, >> + resource_size_t *max) >> +{ >> + >> + struct cxlrd_max_context ctx = { >> + .host_bridges = &endpoint->host_bridge, >> + .interleave_ways = interleave_ways, >> + .flags = flags, >> + }; >> + struct cxl_port *root_port; >> + struct cxl_root *root; >> + >> + if (!is_cxl_endpoint(endpoint)) { >> + dev_dbg(&endpoint->dev, "hpa requestor is not an endpoint\n"); >> + return ERR_PTR(-EINVAL); >> + } >> + >> + root = find_cxl_root(endpoint); >> + if (!root) { >> + dev_dbg(&endpoint->dev, "endpoint can not be related to a root port\n"); >> + return ERR_PTR(-ENXIO); >> + } >> + >> + root_port = &root->port; >> + down_read(&cxl_region_rwsem); >> + device_for_each_child(&root_port->dev, &ctx, find_max_hpa); >> + up_read(&cxl_region_rwsem); >> + put_device(&root_port->dev); >> + >> + if (!ctx.cxlrd) >> + return ERR_PTR(-ENOMEM); >> + >> + *max = ctx.max_hpa; > Rename max_hpa to available_hpa. > >> + return ctx.cxlrd; >> +} >> +EXPORT_SYMBOL_NS_GPL(cxl_get_hpa_freespace, CXL); >> + >> +
On Mon, 19 Aug 2024 15:47:48 +0100 Alejandro Lucero Palau <alucerop@amd.com> wrote: > On 8/4/24 18:57, Jonathan Cameron wrote: > > On Mon, 15 Jul 2024 18:28:29 +0100 > > alejandro.lucero-palau@amd.com wrote: > > > >> From: Alejandro Lucero <alucerop@amd.com> > >> > >> CXL region creation involves allocating capacity from device DPA > >> (device-physical-address space) and assigning it to decode a given HPA > >> (host-physical-address space). Before determining how much DPA to > >> allocate the amount of available HPA must be determined. Also, not all > >> HPA is create equal, some specifically targets RAM, some target PMEM, > >> some is prepared for device-memory flows like HDM-D and HDM-DB, and some > >> is host-only (HDM-H). > >> > >> Wrap all of those concerns into an API that retrieves a root decoder > >> (platform CXL window) that fits the specified constraints and the > >> capacity available for a new region. > >> > >> Based on https://lore.kernel.org/linux-cxl/168592149709.1948938.8663425987110396027.stgit@dwillia2-xfh.jf.intel.com/T/#m6fbe775541da3cd477d65fa95c8acdc347345b4f > >> > >> Signed-off-by: Alejandro Lucero <alucerop@amd.com> > >> Co-developed-by: Dan Williams <dan.j.williams@intel.com> > > Hi. > > > > This seems a lot more complex than an accelerator would need. > > If plan is to use this in the type3 driver as well, I'd like to > > see that done as a precursor to the main series. > > If it only matters to accelerator drivers (as in type 3 I think > > we make this a userspace problem), then limit the code to handle > > interleave ways == 1 only. Maybe we will care about higher interleave > > in the long run, but do you have a multihead accelerator today? > > > I would say this is needed for Type3 as well but current support relies > on user space requests. I think Type3 support uses the legacy > implementation for memory devices where initially the requirements are > quite similar, but I think where CXL is going requires less manual > intervention or more automatic assisted manual intervention. I'll wait > until Dan can comment on this one for sending it as a precursor or as > part of the type2 support. > > > Regarding the interleave, I know you are joking ... but who knows what > the future will bring. O maybe I'm misunderstanding your comment, > because in my view multi-head device and interleave are not directly > related. Are they? I think you can have a single head and support > interleaving, with multi-head implying different hosts and therefore > different HPAs. Nothing says they heads are connected to different hosts. For type 3 version the reason you'd do this is to spread load across multiple root ports. So it's just a bandwidth play and as far as the host is concerned they might as well be separate devices. For accelerators in theory you can do stuff like that but it gets fiddly fast and in theory you might care that they are the same device for reasons beyond RAS etc and might interleave access to device memory across the two heads. Don't think we care today though, so for now I'd just reject any interleaving. Jonathan
On 8/4/24 18:57, Jonathan Cameron wrote: > On Mon, 15 Jul 2024 18:28:29 +0100 > alejandro.lucero-palau@amd.com wrote: > >> From: Alejandro Lucero <alucerop@amd.com> >> >> CXL region creation involves allocating capacity from device DPA >> (device-physical-address space) and assigning it to decode a given HPA >> (host-physical-address space). Before determining how much DPA to >> allocate the amount of available HPA must be determined. Also, not all >> HPA is create equal, some specifically targets RAM, some target PMEM, >> some is prepared for device-memory flows like HDM-D and HDM-DB, and some >> is host-only (HDM-H). >> >> Wrap all of those concerns into an API that retrieves a root decoder >> (platform CXL window) that fits the specified constraints and the >> capacity available for a new region. >> >> Based on https://lore.kernel.org/linux-cxl/168592149709.1948938.8663425987110396027.stgit@dwillia2-xfh.jf.intel.com/T/#m6fbe775541da3cd477d65fa95c8acdc347345b4f >> >> Signed-off-by: Alejandro Lucero <alucerop@amd.com> >> Co-developed-by: Dan Williams <dan.j.williams@intel.com> > Hi. > > This seems a lot more complex than an accelerator would need. > If plan is to use this in the type3 driver as well, I'd like to > see that done as a precursor to the main series. > If it only matters to accelerator drivers (as in type 3 I think > we make this a userspace problem), then limit the code to handle > interleave ways == 1 only. Maybe we will care about higher interleave > in the long run, but do you have a multihead accelerator today? > > Jonathan > >> --- >> drivers/cxl/core/region.c | 161 +++++++++++++++++++++++++++++ >> drivers/cxl/cxl.h | 3 + >> drivers/cxl/cxlmem.h | 5 + >> drivers/net/ethernet/sfc/efx_cxl.c | 14 +++ >> include/linux/cxl_accel_mem.h | 9 ++ >> 5 files changed, 192 insertions(+) >> >> diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c >> index 538ebd5a64fd..ca464bfef77b 100644 >> --- a/drivers/cxl/core/region.c >> +++ b/drivers/cxl/core/region.c >> @@ -702,6 +702,167 @@ static int free_hpa(struct cxl_region *cxlr) >> return 0; >> } >> >> + >> +struct cxlrd_max_context { >> + struct device * const *host_bridges; >> + int interleave_ways; >> + unsigned long flags; >> + resource_size_t max_hpa; >> + struct cxl_root_decoder *cxlrd; >> +}; >> + >> +static int find_max_hpa(struct device *dev, void *data) >> +{ >> + struct cxlrd_max_context *ctx = data; >> + struct cxl_switch_decoder *cxlsd; >> + struct cxl_root_decoder *cxlrd; >> + struct resource *res, *prev; >> + struct cxl_decoder *cxld; >> + resource_size_t max; >> + int found; >> + >> + if (!is_root_decoder(dev)) >> + return 0; >> + >> + cxlrd = to_cxl_root_decoder(dev); >> + cxld = &cxlrd->cxlsd.cxld; >> + if ((cxld->flags & ctx->flags) != ctx->flags) { >> + dev_dbg(dev, "find_max_hpa, flags not matching: %08lx vs %08lx\n", >> + cxld->flags, ctx->flags); >> + return 0; >> + } >> + >> + /* A Host bridge could have more interleave ways than an >> + * endpoint, couldn´t it? > EP interleave ways is about working out how the full HPA address (it's > all sent over the wire) is modified to get to the DPA. So it needs > to know what the overall interleave is. Host bridge can't interleave > and then have the EP not know about it. If there are switch HDM decoders > in the path, the host bridge interleave may be less than that the EP needs > to deal with. > > Does an accelerator actually cope with interleave? Is aim here to ensure > that IW is never anything other than 1? Or is this meant to have > more general use? I guess it is meant to. In which case, I'd like to > see this used in the type3 driver as well. I guess an accelerator could cope with interleave ways > 1, but not ours. And it does not make sense to me an accelerator being an EP for an interleaved HPA because the memory does not make sense out of the accelerator. So if the CFMW and the Host Bridge have an interleave way of 2, implying accesses to the HPA through different wires, I assume an accelerator should not be allowed. >> + * >> + * What does interleave ways mean here in terms of the requestor? >> + * Why the FFMWS has 0 interleave ways but root port has 1? > FFMWS? I meant CFMW, and I think this comment is because I found out the CFMW is parsed with interleave ways = 0 then the root port having 1, what is confusing. > >> + */ >> + if (cxld->interleave_ways != ctx->interleave_ways) { >> + dev_dbg(dev, "find_max_hpa, interleave_ways not matching\n"); >> + return 0; >> + } >> + >> + cxlsd = &cxlrd->cxlsd; >> + >> + guard(rwsem_read)(&cxl_region_rwsem); >> + found = 0; >> + for (int i = 0; i < ctx->interleave_ways; i++) >> + for (int j = 0; j < ctx->interleave_ways; j++) >> + if (ctx->host_bridges[i] == >> + cxlsd->target[j]->dport_dev) { >> + found++; >> + break; >> + } >> + >> + if (found != ctx->interleave_ways) { >> + dev_dbg(dev, "find_max_hpa, no interleave_ways found\n"); >> + return 0; >> + } >> + >> + /* >> + * Walk the root decoder resource range relying on cxl_region_rwsem to >> + * preclude sibling arrival/departure and find the largest free space >> + * gap. >> + */ >> + lockdep_assert_held_read(&cxl_region_rwsem); >> + max = 0; >> + res = cxlrd->res->child; >> + if (!res) >> + max = resource_size(cxlrd->res); >> + else >> + max = 0; >> + >> + for (prev = NULL; res; prev = res, res = res->sibling) { >> + struct resource *next = res->sibling; >> + resource_size_t free = 0; >> + >> + if (!prev && res->start > cxlrd->res->start) { >> + free = res->start - cxlrd->res->start; >> + max = max(free, max); >> + } >> + if (prev && res->start > prev->end + 1) { >> + free = res->start - prev->end + 1; >> + max = max(free, max); >> + } >> + if (next && res->end + 1 < next->start) { >> + free = next->start - res->end + 1; >> + max = max(free, max); >> + } >> + if (!next && res->end + 1 < cxlrd->res->end + 1) { >> + free = cxlrd->res->end + 1 - res->end + 1; >> + max = max(free, max); >> + } >> + } >> + >> + if (max > ctx->max_hpa) { >> + if (ctx->cxlrd) >> + put_device(CXLRD_DEV(ctx->cxlrd)); >> + get_device(CXLRD_DEV(cxlrd)); >> + ctx->cxlrd = cxlrd; >> + ctx->max_hpa = max; >> + dev_info(CXLRD_DEV(cxlrd), "found %pa bytes of free space\n", &max); > dev_dbg() > >> + } >> + return 0; >> +} >> + >> +/** >> + * cxl_get_hpa_freespace - find a root decoder with free capacity per constraints >> + * @endpoint: an endpoint that is mapped by the returned decoder >> + * @interleave_ways: number of entries in @host_bridges >> + * @flags: CXL_DECODER_F flags for selecting RAM vs PMEM, and HDM-H vs HDM-D[B] >> + * @max: output parameter of bytes available in the returned decoder > @available_size > or something along those lines. I'd expect max to be the end address of the available > region > >> + * >> + * The return tuple of a 'struct cxl_root_decoder' and 'bytes available (@max)' >> + * is a point in time snapshot. If by the time the caller goes to use this root >> + * decoder's capacity the capacity is reduced then caller needs to loop and >> + * retry. >> + * >> + * The returned root decoder has an elevated reference count that needs to be >> + * put with put_device(cxlrd_dev(cxlrd)). Locking context is with >> + * cxl_{acquire,release}_endpoint(), that ensures removal of the root decoder >> + * does not race. >> + */ >> +struct cxl_root_decoder *cxl_get_hpa_freespace(struct cxl_port *endpoint, >> + int interleave_ways, >> + unsigned long flags, >> + resource_size_t *max) >> +{ >> + >> + struct cxlrd_max_context ctx = { >> + .host_bridges = &endpoint->host_bridge, >> + .interleave_ways = interleave_ways, >> + .flags = flags, >> + }; >> + struct cxl_port *root_port; >> + struct cxl_root *root; >> + >> + if (!is_cxl_endpoint(endpoint)) { >> + dev_dbg(&endpoint->dev, "hpa requestor is not an endpoint\n"); >> + return ERR_PTR(-EINVAL); >> + } >> + >> + root = find_cxl_root(endpoint); >> + if (!root) { >> + dev_dbg(&endpoint->dev, "endpoint can not be related to a root port\n"); >> + return ERR_PTR(-ENXIO); >> + } >> + >> + root_port = &root->port; >> + down_read(&cxl_region_rwsem); >> + device_for_each_child(&root_port->dev, &ctx, find_max_hpa); >> + up_read(&cxl_region_rwsem); >> + put_device(&root_port->dev); >> + >> + if (!ctx.cxlrd) >> + return ERR_PTR(-ENOMEM); >> + >> + *max = ctx.max_hpa; > Rename max_hpa to available_hpa. > >> + return ctx.cxlrd; >> +} >> +EXPORT_SYMBOL_NS_GPL(cxl_get_hpa_freespace, CXL); >> + >> +
On 8/4/24 18:57, Jonathan Cameron wrote: > + } >> + return 0; >> +} >> + >> +/** >> + * cxl_get_hpa_freespace - find a root decoder with free capacity per constraints >> + * @endpoint: an endpoint that is mapped by the returned decoder >> + * @interleave_ways: number of entries in @host_bridges >> + * @flags: CXL_DECODER_F flags for selecting RAM vs PMEM, and HDM-H vs HDM-D[B] >> + * @max: output parameter of bytes available in the returned decoder > @available_size > or something along those lines. I'd expect max to be the end address of the available > region No really. The code looks for the biggest free hole in the HPA. Returning available size does not help except from informing about the "internal fragmentation". >> + * >> + * The return tuple of a 'struct cxl_root_decoder' and 'bytes available (@max)' >> + * is a point in time snapshot. If by the time the caller goes to use this root >> + * decoder's capacity the capacity is reduced then caller needs to loop and >> + * retry. >> + * >> + * The returned root decoder has an elevated reference count that needs to be >> + * put with put_device(cxlrd_dev(cxlrd)). Locking context is with >> + * cxl_{acquire,release}_endpoint(), that ensures removal of the root decoder >> + * does not race. >> + */ >> +struct cxl_root_decoder *cxl_get_hpa_freespace(struct cxl_port *endpoint, >> + int interleave_ways, >> + unsigned long flags, >> + resource_size_t *max) >> +{ >> + >> + struct cxlrd_max_context ctx = { >> + .host_bridges = &endpoint->host_bridge, >> + .interleave_ways = interleave_ways, >> + .flags = flags, >> + }; >> + struct cxl_port *root_port; >> + struct cxl_root *root; >> + >> + if (!is_cxl_endpoint(endpoint)) { >> + dev_dbg(&endpoint->dev, "hpa requestor is not an endpoint\n"); >> + return ERR_PTR(-EINVAL); >> + } >> + >> + root = find_cxl_root(endpoint); >> + if (!root) { >> + dev_dbg(&endpoint->dev, "endpoint can not be related to a root port\n"); >> + return ERR_PTR(-ENXIO); >> + } >> + >> + root_port = &root->port; >> + down_read(&cxl_region_rwsem); >> + device_for_each_child(&root_port->dev, &ctx, find_max_hpa); >> + up_read(&cxl_region_rwsem); >> + put_device(&root_port->dev); >> + >> + if (!ctx.cxlrd) >> + return ERR_PTR(-ENOMEM); >> + >> + *max = ctx.max_hpa; > Rename max_hpa to available_hpa. > >> + return ctx.cxlrd; >> +} >> +EXPORT_SYMBOL_NS_GPL(cxl_get_hpa_freespace, CXL); >> + >> +
On Wed, 28 Aug 2024 11:18:12 +0100 Alejandro Lucero Palau <alucerop@amd.com> wrote: > On 8/4/24 18:57, Jonathan Cameron wrote: > > On Mon, 15 Jul 2024 18:28:29 +0100 > > alejandro.lucero-palau@amd.com wrote: > > > >> From: Alejandro Lucero <alucerop@amd.com> > >> > >> CXL region creation involves allocating capacity from device DPA > >> (device-physical-address space) and assigning it to decode a given HPA > >> (host-physical-address space). Before determining how much DPA to > >> allocate the amount of available HPA must be determined. Also, not all > >> HPA is create equal, some specifically targets RAM, some target PMEM, > >> some is prepared for device-memory flows like HDM-D and HDM-DB, and some > >> is host-only (HDM-H). > >> > >> Wrap all of those concerns into an API that retrieves a root decoder > >> (platform CXL window) that fits the specified constraints and the > >> capacity available for a new region. > >> > >> Based on https://lore.kernel.org/linux-cxl/168592149709.1948938.8663425987110396027.stgit@dwillia2-xfh.jf.intel.com/T/#m6fbe775541da3cd477d65fa95c8acdc347345b4f > >> > >> Signed-off-by: Alejandro Lucero <alucerop@amd.com> > >> Co-developed-by: Dan Williams <dan.j.williams@intel.com> > > Hi. > > > > This seems a lot more complex than an accelerator would need. > > If plan is to use this in the type3 driver as well, I'd like to > > see that done as a precursor to the main series. > > If it only matters to accelerator drivers (as in type 3 I think > > we make this a userspace problem), then limit the code to handle > > interleave ways == 1 only. Maybe we will care about higher interleave > > in the long run, but do you have a multihead accelerator today? > > > > Jonathan > > > >> --- > >> drivers/cxl/core/region.c | 161 +++++++++++++++++++++++++++++ > >> drivers/cxl/cxl.h | 3 + > >> drivers/cxl/cxlmem.h | 5 + > >> drivers/net/ethernet/sfc/efx_cxl.c | 14 +++ > >> include/linux/cxl_accel_mem.h | 9 ++ > >> 5 files changed, 192 insertions(+) > >> > >> diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c > >> index 538ebd5a64fd..ca464bfef77b 100644 > >> --- a/drivers/cxl/core/region.c > >> +++ b/drivers/cxl/core/region.c > >> @@ -702,6 +702,167 @@ static int free_hpa(struct cxl_region *cxlr) > >> return 0; > >> } > >> > >> + > >> +struct cxlrd_max_context { > >> + struct device * const *host_bridges; > >> + int interleave_ways; > >> + unsigned long flags; > >> + resource_size_t max_hpa; > >> + struct cxl_root_decoder *cxlrd; > >> +}; > >> + > >> +static int find_max_hpa(struct device *dev, void *data) > >> +{ > >> + struct cxlrd_max_context *ctx = data; > >> + struct cxl_switch_decoder *cxlsd; > >> + struct cxl_root_decoder *cxlrd; > >> + struct resource *res, *prev; > >> + struct cxl_decoder *cxld; > >> + resource_size_t max; > >> + int found; > >> + > >> + if (!is_root_decoder(dev)) > >> + return 0; > >> + > >> + cxlrd = to_cxl_root_decoder(dev); > >> + cxld = &cxlrd->cxlsd.cxld; > >> + if ((cxld->flags & ctx->flags) != ctx->flags) { > >> + dev_dbg(dev, "find_max_hpa, flags not matching: %08lx vs %08lx\n", > >> + cxld->flags, ctx->flags); > >> + return 0; > >> + } > >> + > >> + /* A Host bridge could have more interleave ways than an > >> + * endpoint, couldn´t it? > > EP interleave ways is about working out how the full HPA address (it's > > all sent over the wire) is modified to get to the DPA. So it needs > > to know what the overall interleave is. Host bridge can't interleave > > and then have the EP not know about it. If there are switch HDM decoders > > in the path, the host bridge interleave may be less than that the EP needs > > to deal with. > > > > Does an accelerator actually cope with interleave? Is aim here to ensure > > that IW is never anything other than 1? Or is this meant to have > > more general use? I guess it is meant to. In which case, I'd like to > > see this used in the type3 driver as well. > > > I guess an accelerator could cope with interleave ways > 1, but not ours. > > And it does not make sense to me an accelerator being an EP for an > interleaved HPA because the memory does not make sense out of the > accelerator. > > So if the CFMW and the Host Bridge have an interleave way of 2, implying > accesses to the HPA through different wires, I assume an accelerator > should not be allowed. That's certainly fine for now. 'maybe' something will come along that can make use of interleaving (I'm thinking of Processing near memory type setup where it's offloading minor stuff more local to the memory but is basically type 3 memory) > > > >> + * > >> + * What does interleave ways mean here in terms of the requestor? > >> + * Why the FFMWS has 0 interleave ways but root port has 1? > > FFMWS? > > > I meant CFMW, and I think this comment is because I found out the CFMW > is parsed with interleave ways = 0 then the root port having 1, what is > confusing. > I'm a bit lost. Maybe this is just encoded and 'real' values? 1 way interleave is just not interleaving. Jonathan
On Wed, 28 Aug 2024 11:41:11 +0100 Alejandro Lucero Palau <alucerop@amd.com> wrote: > On 8/4/24 18:57, Jonathan Cameron wrote: > > + } > >> + return 0; > >> +} > >> + > >> +/** > >> + * cxl_get_hpa_freespace - find a root decoder with free capacity per constraints > >> + * @endpoint: an endpoint that is mapped by the returned decoder > >> + * @interleave_ways: number of entries in @host_bridges > >> + * @flags: CXL_DECODER_F flags for selecting RAM vs PMEM, and HDM-H vs HDM-D[B] > >> + * @max: output parameter of bytes available in the returned decoder > > @available_size > > or something along those lines. I'd expect max to be the end address of the available > > region > > > No really. The code looks for the biggest free hole in the HPA. > Returning available size does not help except from informing about the > "internal fragmentation". I worded that badly. Intent was that to me 'max' == maximum address, not maximum available contiguous range. max_hole or max_avail_contig maybe? >
On 8/28/24 12:26, Jonathan Cameron wrote: > On Wed, 28 Aug 2024 11:41:11 +0100 > Alejandro Lucero Palau <alucerop@amd.com> wrote: > >> On 8/4/24 18:57, Jonathan Cameron wrote: >>> + } >>>> + return 0; >>>> +} >>>> + >>>> +/** >>>> + * cxl_get_hpa_freespace - find a root decoder with free capacity per constraints >>>> + * @endpoint: an endpoint that is mapped by the returned decoder >>>> + * @interleave_ways: number of entries in @host_bridges >>>> + * @flags: CXL_DECODER_F flags for selecting RAM vs PMEM, and HDM-H vs HDM-D[B] >>>> + * @max: output parameter of bytes available in the returned decoder >>> @available_size >>> or something along those lines. I'd expect max to be the end address of the available >>> region >> >> No really. The code looks for the biggest free hole in the HPA. >> Returning available size does not help except from informing about the >> "internal fragmentation". > I worded that badly. Intent was that to me 'max' == maximum address, not maximum available > contiguous range. max_hole or max_avail_contig maybe? > Let's go with max_avail_contig. Thanks!
diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c index 538ebd5a64fd..ca464bfef77b 100644 --- a/drivers/cxl/core/region.c +++ b/drivers/cxl/core/region.c @@ -702,6 +702,167 @@ static int free_hpa(struct cxl_region *cxlr) return 0; } + +struct cxlrd_max_context { + struct device * const *host_bridges; + int interleave_ways; + unsigned long flags; + resource_size_t max_hpa; + struct cxl_root_decoder *cxlrd; +}; + +static int find_max_hpa(struct device *dev, void *data) +{ + struct cxlrd_max_context *ctx = data; + struct cxl_switch_decoder *cxlsd; + struct cxl_root_decoder *cxlrd; + struct resource *res, *prev; + struct cxl_decoder *cxld; + resource_size_t max; + int found; + + if (!is_root_decoder(dev)) + return 0; + + cxlrd = to_cxl_root_decoder(dev); + cxld = &cxlrd->cxlsd.cxld; + if ((cxld->flags & ctx->flags) != ctx->flags) { + dev_dbg(dev, "find_max_hpa, flags not matching: %08lx vs %08lx\n", + cxld->flags, ctx->flags); + return 0; + } + + /* A Host bridge could have more interleave ways than an + * endpoint, couldn´t it? + * + * What does interleave ways mean here in terms of the requestor? + * Why the FFMWS has 0 interleave ways but root port has 1? + */ + if (cxld->interleave_ways != ctx->interleave_ways) { + dev_dbg(dev, "find_max_hpa, interleave_ways not matching\n"); + return 0; + } + + cxlsd = &cxlrd->cxlsd; + + guard(rwsem_read)(&cxl_region_rwsem); + found = 0; + for (int i = 0; i < ctx->interleave_ways; i++) + for (int j = 0; j < ctx->interleave_ways; j++) + if (ctx->host_bridges[i] == + cxlsd->target[j]->dport_dev) { + found++; + break; + } + + if (found != ctx->interleave_ways) { + dev_dbg(dev, "find_max_hpa, no interleave_ways found\n"); + return 0; + } + + /* + * Walk the root decoder resource range relying on cxl_region_rwsem to + * preclude sibling arrival/departure and find the largest free space + * gap. + */ + lockdep_assert_held_read(&cxl_region_rwsem); + max = 0; + res = cxlrd->res->child; + if (!res) + max = resource_size(cxlrd->res); + else + max = 0; + + for (prev = NULL; res; prev = res, res = res->sibling) { + struct resource *next = res->sibling; + resource_size_t free = 0; + + if (!prev && res->start > cxlrd->res->start) { + free = res->start - cxlrd->res->start; + max = max(free, max); + } + if (prev && res->start > prev->end + 1) { + free = res->start - prev->end + 1; + max = max(free, max); + } + if (next && res->end + 1 < next->start) { + free = next->start - res->end + 1; + max = max(free, max); + } + if (!next && res->end + 1 < cxlrd->res->end + 1) { + free = cxlrd->res->end + 1 - res->end + 1; + max = max(free, max); + } + } + + if (max > ctx->max_hpa) { + if (ctx->cxlrd) + put_device(CXLRD_DEV(ctx->cxlrd)); + get_device(CXLRD_DEV(cxlrd)); + ctx->cxlrd = cxlrd; + ctx->max_hpa = max; + dev_info(CXLRD_DEV(cxlrd), "found %pa bytes of free space\n", &max); + } + return 0; +} + +/** + * cxl_get_hpa_freespace - find a root decoder with free capacity per constraints + * @endpoint: an endpoint that is mapped by the returned decoder + * @interleave_ways: number of entries in @host_bridges + * @flags: CXL_DECODER_F flags for selecting RAM vs PMEM, and HDM-H vs HDM-D[B] + * @max: output parameter of bytes available in the returned decoder + * + * The return tuple of a 'struct cxl_root_decoder' and 'bytes available (@max)' + * is a point in time snapshot. If by the time the caller goes to use this root + * decoder's capacity the capacity is reduced then caller needs to loop and + * retry. + * + * The returned root decoder has an elevated reference count that needs to be + * put with put_device(cxlrd_dev(cxlrd)). Locking context is with + * cxl_{acquire,release}_endpoint(), that ensures removal of the root decoder + * does not race. + */ +struct cxl_root_decoder *cxl_get_hpa_freespace(struct cxl_port *endpoint, + int interleave_ways, + unsigned long flags, + resource_size_t *max) +{ + + struct cxlrd_max_context ctx = { + .host_bridges = &endpoint->host_bridge, + .interleave_ways = interleave_ways, + .flags = flags, + }; + struct cxl_port *root_port; + struct cxl_root *root; + + if (!is_cxl_endpoint(endpoint)) { + dev_dbg(&endpoint->dev, "hpa requestor is not an endpoint\n"); + return ERR_PTR(-EINVAL); + } + + root = find_cxl_root(endpoint); + if (!root) { + dev_dbg(&endpoint->dev, "endpoint can not be related to a root port\n"); + return ERR_PTR(-ENXIO); + } + + root_port = &root->port; + down_read(&cxl_region_rwsem); + device_for_each_child(&root_port->dev, &ctx, find_max_hpa); + up_read(&cxl_region_rwsem); + put_device(&root_port->dev); + + if (!ctx.cxlrd) + return ERR_PTR(-ENOMEM); + + *max = ctx.max_hpa; + return ctx.cxlrd; +} +EXPORT_SYMBOL_NS_GPL(cxl_get_hpa_freespace, CXL); + + static ssize_t size_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h index 9973430d975f..d3fdd2c1e066 100644 --- a/drivers/cxl/cxl.h +++ b/drivers/cxl/cxl.h @@ -770,6 +770,9 @@ struct cxl_decoder *to_cxl_decoder(struct device *dev); struct cxl_root_decoder *to_cxl_root_decoder(struct device *dev); struct cxl_switch_decoder *to_cxl_switch_decoder(struct device *dev); struct cxl_endpoint_decoder *to_cxl_endpoint_decoder(struct device *dev); + +#define CXLRD_DEV(cxlrd) &cxlrd->cxlsd.cxld.dev + bool is_root_decoder(struct device *dev); bool is_switch_decoder(struct device *dev); bool is_endpoint_decoder(struct device *dev); diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h index 8f2a820bd92d..a0e0795ec064 100644 --- a/drivers/cxl/cxlmem.h +++ b/drivers/cxl/cxlmem.h @@ -877,4 +877,9 @@ struct cxl_hdm { struct seq_file; struct dentry *cxl_debugfs_create_dir(const char *dir); void cxl_dpa_debug(struct seq_file *file, struct cxl_dev_state *cxlds); +struct cxl_root_decoder *cxl_get_hpa_freespace(struct cxl_port *endpoint, + int interleave_ways, + unsigned long flags, + resource_size_t *max); + #endif /* __CXL_MEM_H__ */ diff --git a/drivers/net/ethernet/sfc/efx_cxl.c b/drivers/net/ethernet/sfc/efx_cxl.c index 2cf4837ddfc1..6d49571ccff7 100644 --- a/drivers/net/ethernet/sfc/efx_cxl.c +++ b/drivers/net/ethernet/sfc/efx_cxl.c @@ -22,6 +22,7 @@ void efx_cxl_init(struct efx_nic *efx) { struct pci_dev *pci_dev = efx->pci_dev; struct efx_cxl *cxl = efx->cxl; + resource_size_t max = 0; struct resource res; u16 dvsec; @@ -74,6 +75,19 @@ void efx_cxl_init(struct efx_nic *efx) if (IS_ERR(cxl->endpoint)) pci_info(pci_dev, "CXL accel acquire endpoint failed"); + cxl->cxlrd = cxl_get_hpa_freespace(cxl->endpoint, 1, + CXL_DECODER_F_RAM | CXL_DECODER_F_TYPE2, + &max); + + if (IS_ERR(cxl->cxlrd)) { + pci_info(pci_dev, "CXL accel get HPA failed"); + goto out; + } + + if (max < EFX_CTPIO_BUFFER_SIZE) + pci_info(pci_dev, "CXL accel not enough free HPA space %llu < %u\n", + max, EFX_CTPIO_BUFFER_SIZE); +out: cxl_release_endpoint(cxl->cxlmd, cxl->endpoint); } diff --git a/include/linux/cxl_accel_mem.h b/include/linux/cxl_accel_mem.h index 701910021df8..f3e77688ffe0 100644 --- a/include/linux/cxl_accel_mem.h +++ b/include/linux/cxl_accel_mem.h @@ -6,6 +6,10 @@ #ifndef __CXL_ACCEL_MEM_H #define __CXL_ACCEL_MEM_H +#define CXL_DECODER_F_RAM BIT(0) +#define CXL_DECODER_F_PMEM BIT(1) +#define CXL_DECODER_F_TYPE2 BIT(2) + enum accel_resource{ CXL_ACCEL_RES_DPA, CXL_ACCEL_RES_RAM, @@ -32,4 +36,9 @@ struct cxl_memdev *devm_cxl_add_memdev(struct device *host, struct cxl_port *cxl_acquire_endpoint(struct cxl_memdev *cxlmd); void cxl_release_endpoint(struct cxl_memdev *cxlmd, struct cxl_port *endpoint); + +struct cxl_root_decoder *cxl_get_hpa_freespace(struct cxl_port *endpoint, + int interleave_ways, + unsigned long flags, + resource_size_t *max); #endif