diff mbox series

[v4] remoteproc: xlnx: add sram support

Message ID 20240819170937.3666806-1-tanmay.shah@amd.com (mailing list archive)
State Superseded
Headers show
Series [v4] remoteproc: xlnx: add sram support | expand

Commit Message

Tanmay Shah Aug. 19, 2024, 5:09 p.m. UTC
AMD-Xilinx zynqmp platform contains on-chip sram memory (OCM).
R5 cores can access OCM and access is faster than DDR memory but slower
than TCM memories available. Sram region can have optional multiple
power-domains. Platform management firmware is responsible
to operate these power-domains.

Signed-off-by: Tanmay Shah <tanmay.shah@amd.com>
---

Changes in v4:
  - Free previously allocalted genpool if adding carveouts fail for any
    sram.
  - add comment about sram size used in creating carveouts.

Changes in v3:
  - make @sram an array rather than an array of pointers
  - fix of_node_put usage to maintain proper refcount of node
  - s/proprty/property
  - Use gen pool framework for mapping sram address space.

Changes in v2:
  - Expand commit message with power-domains related information.

 drivers/remoteproc/xlnx_r5_remoteproc.c | 165 ++++++++++++++++++++++++
 1 file changed, 165 insertions(+)


base-commit: 1a491aaf1d1ce3a1cf5190394c36f21d805c7e96

Comments

Mathieu Poirier Aug. 26, 2024, 4:22 p.m. UTC | #1
Good morning,

First and foremost the overall structure of your code has improved immensely and
I commend you for that.

On Mon, Aug 19, 2024 at 10:09:38AM -0700, Tanmay Shah wrote:
> AMD-Xilinx zynqmp platform contains on-chip sram memory (OCM).
> R5 cores can access OCM and access is faster than DDR memory but slower
> than TCM memories available. Sram region can have optional multiple
> power-domains. Platform management firmware is responsible
> to operate these power-domains.
> 
> Signed-off-by: Tanmay Shah <tanmay.shah@amd.com>
> ---
> 
> Changes in v4:
>   - Free previously allocalted genpool if adding carveouts fail for any
>     sram.
>   - add comment about sram size used in creating carveouts.
> 
> Changes in v3:
>   - make @sram an array rather than an array of pointers
>   - fix of_node_put usage to maintain proper refcount of node
>   - s/proprty/property
>   - Use gen pool framework for mapping sram address space.
> 
> Changes in v2:
>   - Expand commit message with power-domains related information.
> 
>  drivers/remoteproc/xlnx_r5_remoteproc.c | 165 ++++++++++++++++++++++++
>  1 file changed, 165 insertions(+)
> 
> diff --git a/drivers/remoteproc/xlnx_r5_remoteproc.c b/drivers/remoteproc/xlnx_r5_remoteproc.c
> index 2cea97c746fd..6d2ac7b85c8a 100644
> --- a/drivers/remoteproc/xlnx_r5_remoteproc.c
> +++ b/drivers/remoteproc/xlnx_r5_remoteproc.c
> @@ -7,6 +7,7 @@
>  #include <dt-bindings/power/xlnx-zynqmp-power.h>
>  #include <linux/dma-mapping.h>
>  #include <linux/firmware/xlnx-zynqmp.h>
> +#include <linux/genalloc.h>
>  #include <linux/kernel.h>
>  #include <linux/mailbox_client.h>
>  #include <linux/mailbox/zynqmp-ipi-message.h>
> @@ -56,6 +57,21 @@ struct mem_bank_data {
>  	char *bank_name;
>  };
>  
> +/**
> + * struct zynqmp_sram_bank - sram bank description
> + *
> + * @sram_pool: gen pool for his sram
> + * @sram_res: sram address region information
> + * @va: virtual address of allocated genpool
> + * @da: device address of sram
> + */
> +struct zynqmp_sram_bank {
> +	struct gen_pool *sram_pool;
> +	struct resource sram_res;
> +	void __iomem *va;
> +	u32 da;
> +};
> +
>  /**
>   * struct mbox_info
>   *
> @@ -120,6 +136,8 @@ static const struct mem_bank_data zynqmp_tcm_banks_lockstep[] = {
>   * struct zynqmp_r5_core
>   *
>   * @rsc_tbl_va: resource table virtual address
> + * @sram: Array of sram memories assigned to this core
> + * @num_sram: number of sram for this core
>   * @dev: device of RPU instance
>   * @np: device node of RPU instance
>   * @tcm_bank_count: number TCM banks accessible to this RPU
> @@ -131,6 +149,8 @@ static const struct mem_bank_data zynqmp_tcm_banks_lockstep[] = {
>   */
>  struct zynqmp_r5_core {
>  	void __iomem *rsc_tbl_va;
> +	struct zynqmp_sram_bank *sram;
> +	int num_sram;
>  	struct device *dev;
>  	struct device_node *np;
>  	int tcm_bank_count;
> @@ -494,6 +514,56 @@ static int add_mem_regions_carveout(struct rproc *rproc)
>  	return 0;
>  }
>  
> +static int add_sram_carveouts(struct rproc *rproc)
> +{
> +	struct zynqmp_r5_core *r5_core = rproc->priv;
> +	struct rproc_mem_entry *rproc_mem;
> +	struct zynqmp_sram_bank *sram;
> +	size_t len, pool_size;
> +	dma_addr_t dma_addr;
> +	int da, i;
> +
> +	for (i = 0; i < r5_core->num_sram; i++) {
> +		sram = &r5_core->sram[i];
> +
> +		dma_addr = (dma_addr_t)sram->sram_res.start;
> +
> +		/* Use actual resource size, as genpool size can be rounded up */
> +		len = resource_size(&sram->sram_res);
> +		da = sram->da;
> +
> +		pool_size = gen_pool_size(sram[i].sram_pool);
> +		sram->va = (void __iomem *)gen_pool_alloc(sram->sram_pool, pool_size);

The genpool subsystem API is used to allocate the genpool but other than
being free'd int zynqmp_r5_rproc_unprepare(), nothing is done with the genpool.
Do you have plans to change that in an upcoming patchset?  If not please remove.

Thanks,
Mathieu

> +		if (!sram->va) {
> +			dev_err(r5_core->dev, "failed to alloc sram idx %d pool\n", i);
> +			goto fail_add_sram_carveouts;
> +		}
> +
> +		rproc_mem = rproc_mem_entry_init(&rproc->dev, sram->va,
> +						 (dma_addr_t)dma_addr,
> +						 len, da,
> +						 NULL, NULL,
> +						 sram->sram_res.name);
> +
> +		rproc_add_carveout(rproc, rproc_mem);
> +		rproc_coredump_add_segment(rproc, da, len);
> +
> +		dev_dbg(&rproc->dev, "sram carveout %s addr=%llx, da=0x%x, size=0x%lx",
> +			sram->sram_res.name, dma_addr, da, len);
> +	}
> +
> +	return 0;
> +
> +fail_add_sram_carveouts:
> +	while (--i > 0) {
> +		pool_size = gen_pool_size(sram[i].sram_pool);
> +		gen_pool_free(sram[i].sram_pool,
> +			      (unsigned long)r5_core->sram[i].va, pool_size);
> +	}
> +
> +	return -ENOMEM;
> +}
> +
>  /*
>   * tcm_mem_unmap()
>   * @rproc: single R5 core's corresponding rproc instance
> @@ -669,6 +739,12 @@ static int zynqmp_r5_rproc_prepare(struct rproc *rproc)
>  		return ret;
>  	}
>  
> +	ret = add_sram_carveouts(rproc);
> +	if (ret) {
> +		dev_err(&rproc->dev, "failed to get sram carveout %d\n", ret);
> +		return ret;
> +	}
> +
>  	return 0;
>  }
>  
> @@ -695,6 +771,12 @@ static int zynqmp_r5_rproc_unprepare(struct rproc *rproc)
>  				 "can't turn off TCM bank 0x%x", pm_domain_id);
>  	}
>  
> +	for (i = 0; i < r5_core->num_sram; i++) {
> +		gen_pool_free(r5_core->sram[i].sram_pool,
> +			      (unsigned long)r5_core->sram[i].va,
> +			      gen_pool_size(r5_core->sram[i].sram_pool));
> +	}
> +
>  	return 0;
>  }
>  
> @@ -881,6 +963,85 @@ static struct zynqmp_r5_core *zynqmp_r5_add_rproc_core(struct device *cdev)
>  	return ERR_PTR(ret);
>  }
>  
> +static int zynqmp_r5_get_sram_banks(struct zynqmp_r5_core *r5_core)
> +{
> +	struct device_node *np = r5_core->np;
> +	struct device *dev = r5_core->dev;
> +	struct zynqmp_sram_bank *sram;
> +	struct device_node *sram_np;
> +	int num_sram, i, ret;
> +	u64 abs_addr, size;
> +
> +	/* "sram" is optional property. Do not fail, if unavailable. */
> +	if (!of_property_present(r5_core->np, "sram"))
> +		return 0;
> +
> +	num_sram = of_property_count_elems_of_size(np, "sram", sizeof(phandle));
> +	if (num_sram <= 0) {
> +		dev_err(dev, "Invalid sram property, ret = %d\n",
> +			num_sram);
> +		return -EINVAL;
> +	}
> +
> +	sram = devm_kcalloc(dev, num_sram,
> +			    sizeof(struct zynqmp_sram_bank), GFP_KERNEL);
> +	if (!sram)
> +		return -ENOMEM;
> +
> +	for (i = 0; i < num_sram; i++) {
> +		sram_np = of_parse_phandle(np, "sram", i);
> +		if (!sram_np) {
> +			dev_err(dev, "failed to get sram %d phandle\n", i);
> +			ret = -EINVAL;
> +			goto fail_sram_get;
> +		}
> +
> +		if (!of_device_is_available(sram_np)) {
> +			dev_err(dev, "sram device not available\n");
> +			ret = -EINVAL;
> +			goto fail_sram_get;
> +		}
> +
> +		ret = of_address_to_resource(sram_np, 0, &sram[i].sram_res);
> +		if (ret) {
> +			dev_err(dev, "addr to res failed\n");
> +			goto fail_sram_get;
> +		}
> +
> +		sram[i].sram_pool = of_gen_pool_get(np, "sram", i);
> +		if (!sram[i].sram_pool) {
> +			dev_err(dev, "failed to get sram idx %d gen pool\n", i);
> +			ret = -ENOMEM;
> +			goto fail_sram_get;
> +		}
> +
> +		/* Get SRAM device address */
> +		ret = of_property_read_reg(sram_np, i, &abs_addr, &size);
> +		if (ret) {
> +			dev_err(dev, "failed to get reg property\n");
> +			goto fail_sram_get;
> +		}
> +
> +		sram[i].da = (u32)abs_addr;
> +
> +		of_node_put(sram_np);
> +
> +		dev_dbg(dev, "sram %d: name=%s, addr=0x%llx, da=0x%x, size=0x%llx\n",
> +			i, sram[i].sram_res.name, sram[i].sram_res.start,
> +			sram[i].da, resource_size(&sram[i].sram_res));
> +	}
> +
> +	r5_core->sram = sram;
> +	r5_core->num_sram = num_sram;
> +
> +	return 0;
> +
> +fail_sram_get:
> +	of_node_put(sram_np);
> +
> +	return ret;
> +}
> +
>  static int zynqmp_r5_get_tcm_node_from_dt(struct zynqmp_r5_cluster *cluster)
>  {
>  	int i, j, tcm_bank_count, ret, tcm_pd_idx, pd_count;
> @@ -1095,6 +1256,10 @@ static int zynqmp_r5_core_init(struct zynqmp_r5_cluster *cluster,
>  				return ret;
>  			}
>  		}
> +
> +		ret = zynqmp_r5_get_sram_banks(r5_core);
> +		if (ret)
> +			return ret;
>  	}
>  
>  	return 0;
> 
> base-commit: 1a491aaf1d1ce3a1cf5190394c36f21d805c7e96
> -- 
> 2.25.1
>
Tanmay Shah Aug. 27, 2024, 10:20 p.m. UTC | #2
On 8/26/24 11:22 AM, Mathieu Poirier wrote:
> Good morning,
> 
> First and foremost the overall structure of your code has improved immensely and
> I commend you for that.
> 

Thanks! I hope to get better.

> On Mon, Aug 19, 2024 at 10:09:38AM -0700, Tanmay Shah wrote:
>> AMD-Xilinx zynqmp platform contains on-chip sram memory (OCM).
>> R5 cores can access OCM and access is faster than DDR memory but slower
>> than TCM memories available. Sram region can have optional multiple
>> power-domains. Platform management firmware is responsible
>> to operate these power-domains.
>> 
>> Signed-off-by: Tanmay Shah <tanmay.shah@amd.com>
>> ---
>> 
>> Changes in v4:
>>   - Free previously allocalted genpool if adding carveouts fail for any
>>     sram.
>>   - add comment about sram size used in creating carveouts.
>> 
>> Changes in v3:
>>   - make @sram an array rather than an array of pointers
>>   - fix of_node_put usage to maintain proper refcount of node
>>   - s/proprty/property
>>   - Use gen pool framework for mapping sram address space.
>> 
>> Changes in v2:
>>   - Expand commit message with power-domains related information.
>> 
>>  drivers/remoteproc/xlnx_r5_remoteproc.c | 165 ++++++++++++++++++++++++
>>  1 file changed, 165 insertions(+)
>> 
>> diff --git a/drivers/remoteproc/xlnx_r5_remoteproc.c b/drivers/remoteproc/xlnx_r5_remoteproc.c
>> index 2cea97c746fd..6d2ac7b85c8a 100644
>> --- a/drivers/remoteproc/xlnx_r5_remoteproc.c
>> +++ b/drivers/remoteproc/xlnx_r5_remoteproc.c
>> @@ -7,6 +7,7 @@
>>  #include <dt-bindings/power/xlnx-zynqmp-power.h>
>>  #include <linux/dma-mapping.h>
>>  #include <linux/firmware/xlnx-zynqmp.h>
>> +#include <linux/genalloc.h>
>>  #include <linux/kernel.h>
>>  #include <linux/mailbox_client.h>
>>  #include <linux/mailbox/zynqmp-ipi-message.h>
>> @@ -56,6 +57,21 @@ struct mem_bank_data {
>>  	char *bank_name;
>>  };
>>  
>> +/**
>> + * struct zynqmp_sram_bank - sram bank description
>> + *
>> + * @sram_pool: gen pool for his sram
>> + * @sram_res: sram address region information
>> + * @va: virtual address of allocated genpool
>> + * @da: device address of sram
>> + */
>> +struct zynqmp_sram_bank {
>> +	struct gen_pool *sram_pool;
>> +	struct resource sram_res;
>> +	void __iomem *va;
>> +	u32 da;
>> +};
>> +
>>  /**
>>   * struct mbox_info
>>   *
>> @@ -120,6 +136,8 @@ static const struct mem_bank_data zynqmp_tcm_banks_lockstep[] = {
>>   * struct zynqmp_r5_core
>>   *
>>   * @rsc_tbl_va: resource table virtual address
>> + * @sram: Array of sram memories assigned to this core
>> + * @num_sram: number of sram for this core
>>   * @dev: device of RPU instance
>>   * @np: device node of RPU instance
>>   * @tcm_bank_count: number TCM banks accessible to this RPU
>> @@ -131,6 +149,8 @@ static const struct mem_bank_data zynqmp_tcm_banks_lockstep[] = {
>>   */
>>  struct zynqmp_r5_core {
>>  	void __iomem *rsc_tbl_va;
>> +	struct zynqmp_sram_bank *sram;
>> +	int num_sram;
>>  	struct device *dev;
>>  	struct device_node *np;
>>  	int tcm_bank_count;
>> @@ -494,6 +514,56 @@ static int add_mem_regions_carveout(struct rproc *rproc)
>>  	return 0;
>>  }
>>  
>> +static int add_sram_carveouts(struct rproc *rproc)
>> +{
>> +	struct zynqmp_r5_core *r5_core = rproc->priv;
>> +	struct rproc_mem_entry *rproc_mem;
>> +	struct zynqmp_sram_bank *sram;
>> +	size_t len, pool_size;
>> +	dma_addr_t dma_addr;
>> +	int da, i;
>> +
>> +	for (i = 0; i < r5_core->num_sram; i++) {
>> +		sram = &r5_core->sram[i];
>> +
>> +		dma_addr = (dma_addr_t)sram->sram_res.start;
>> +
>> +		/* Use actual resource size, as genpool size can be rounded up */
>> +		len = resource_size(&sram->sram_res);
>> +		da = sram->da;
>> +
>> +		pool_size = gen_pool_size(sram[i].sram_pool);
>> +		sram->va = (void __iomem *)gen_pool_alloc(sram->sram_pool, pool_size);
> 
> The genpool subsystem API is used to allocate the genpool but other than
> being free'd int zynqmp_r5_rproc_unprepare(), nothing is done with the genpool.

API gen_pool_alloc gives virtual address, and that is used to read/write sram memory.
I think that is genpool framework's expected usage.
I am okay, to use ioremap APIs instead (like other drivers).
For now I don't see any problem with using ioremap_* APIs. 
In future if any conflict happens with sram driver, I will refactor as needed.

I will refactor v5 accordingly.

Thanks.

> Do you have plans to change that in an upcoming patchset?  If not please remove.
> 
> Thanks,
> Mathieu
> 
>> +		if (!sram->va) {
>> +			dev_err(r5_core->dev, "failed to alloc sram idx %d pool\n", i);
>> +			goto fail_add_sram_carveouts;
>> +		}
>> +
>> +		rproc_mem = rproc_mem_entry_init(&rproc->dev, sram->va,
>> +						 (dma_addr_t)dma_addr,
>> +						 len, da,
>> +						 NULL, NULL,
>> +						 sram->sram_res.name);
>> +
>> +		rproc_add_carveout(rproc, rproc_mem);
>> +		rproc_coredump_add_segment(rproc, da, len);
>> +
>> +		dev_dbg(&rproc->dev, "sram carveout %s addr=%llx, da=0x%x, size=0x%lx",
>> +			sram->sram_res.name, dma_addr, da, len);
>> +	}
>> +
>> +	return 0;
>> +
>> +fail_add_sram_carveouts:
>> +	while (--i > 0) {
>> +		pool_size = gen_pool_size(sram[i].sram_pool);
>> +		gen_pool_free(sram[i].sram_pool,
>> +			      (unsigned long)r5_core->sram[i].va, pool_size);
>> +	}
>> +
>> +	return -ENOMEM;
>> +}
>> +
>>  /*
>>   * tcm_mem_unmap()
>>   * @rproc: single R5 core's corresponding rproc instance
>> @@ -669,6 +739,12 @@ static int zynqmp_r5_rproc_prepare(struct rproc *rproc)
>>  		return ret;
>>  	}
>>  
>> +	ret = add_sram_carveouts(rproc);
>> +	if (ret) {
>> +		dev_err(&rproc->dev, "failed to get sram carveout %d\n", ret);
>> +		return ret;
>> +	}
>> +
>>  	return 0;
>>  }
>>  
>> @@ -695,6 +771,12 @@ static int zynqmp_r5_rproc_unprepare(struct rproc *rproc)
>>  				 "can't turn off TCM bank 0x%x", pm_domain_id);
>>  	}
>>  
>> +	for (i = 0; i < r5_core->num_sram; i++) {
>> +		gen_pool_free(r5_core->sram[i].sram_pool,
>> +			      (unsigned long)r5_core->sram[i].va,
>> +			      gen_pool_size(r5_core->sram[i].sram_pool));
>> +	}
>> +
>>  	return 0;
>>  }
>>  
>> @@ -881,6 +963,85 @@ static struct zynqmp_r5_core *zynqmp_r5_add_rproc_core(struct device *cdev)
>>  	return ERR_PTR(ret);
>>  }
>>  
>> +static int zynqmp_r5_get_sram_banks(struct zynqmp_r5_core *r5_core)
>> +{
>> +	struct device_node *np = r5_core->np;
>> +	struct device *dev = r5_core->dev;
>> +	struct zynqmp_sram_bank *sram;
>> +	struct device_node *sram_np;
>> +	int num_sram, i, ret;
>> +	u64 abs_addr, size;
>> +
>> +	/* "sram" is optional property. Do not fail, if unavailable. */
>> +	if (!of_property_present(r5_core->np, "sram"))
>> +		return 0;
>> +
>> +	num_sram = of_property_count_elems_of_size(np, "sram", sizeof(phandle));
>> +	if (num_sram <= 0) {
>> +		dev_err(dev, "Invalid sram property, ret = %d\n",
>> +			num_sram);
>> +		return -EINVAL;
>> +	}
>> +
>> +	sram = devm_kcalloc(dev, num_sram,
>> +			    sizeof(struct zynqmp_sram_bank), GFP_KERNEL);
>> +	if (!sram)
>> +		return -ENOMEM;
>> +
>> +	for (i = 0; i < num_sram; i++) {
>> +		sram_np = of_parse_phandle(np, "sram", i);
>> +		if (!sram_np) {
>> +			dev_err(dev, "failed to get sram %d phandle\n", i);
>> +			ret = -EINVAL;
>> +			goto fail_sram_get;
>> +		}
>> +
>> +		if (!of_device_is_available(sram_np)) {
>> +			dev_err(dev, "sram device not available\n");
>> +			ret = -EINVAL;
>> +			goto fail_sram_get;
>> +		}
>> +
>> +		ret = of_address_to_resource(sram_np, 0, &sram[i].sram_res);
>> +		if (ret) {
>> +			dev_err(dev, "addr to res failed\n");
>> +			goto fail_sram_get;
>> +		}
>> +
>> +		sram[i].sram_pool = of_gen_pool_get(np, "sram", i);
>> +		if (!sram[i].sram_pool) {
>> +			dev_err(dev, "failed to get sram idx %d gen pool\n", i);
>> +			ret = -ENOMEM;
>> +			goto fail_sram_get;
>> +		}
>> +
>> +		/* Get SRAM device address */
>> +		ret = of_property_read_reg(sram_np, i, &abs_addr, &size);
>> +		if (ret) {
>> +			dev_err(dev, "failed to get reg property\n");
>> +			goto fail_sram_get;
>> +		}
>> +
>> +		sram[i].da = (u32)abs_addr;
>> +
>> +		of_node_put(sram_np);
>> +
>> +		dev_dbg(dev, "sram %d: name=%s, addr=0x%llx, da=0x%x, size=0x%llx\n",
>> +			i, sram[i].sram_res.name, sram[i].sram_res.start,
>> +			sram[i].da, resource_size(&sram[i].sram_res));
>> +	}
>> +
>> +	r5_core->sram = sram;
>> +	r5_core->num_sram = num_sram;
>> +
>> +	return 0;
>> +
>> +fail_sram_get:
>> +	of_node_put(sram_np);
>> +
>> +	return ret;
>> +}
>> +
>>  static int zynqmp_r5_get_tcm_node_from_dt(struct zynqmp_r5_cluster *cluster)
>>  {
>>  	int i, j, tcm_bank_count, ret, tcm_pd_idx, pd_count;
>> @@ -1095,6 +1256,10 @@ static int zynqmp_r5_core_init(struct zynqmp_r5_cluster *cluster,
>>  				return ret;
>>  			}
>>  		}
>> +
>> +		ret = zynqmp_r5_get_sram_banks(r5_core);
>> +		if (ret)
>> +			return ret;
>>  	}
>>  
>>  	return 0;
>> 
>> base-commit: 1a491aaf1d1ce3a1cf5190394c36f21d805c7e96
>> -- 
>> 2.25.1
>>
diff mbox series

Patch

diff --git a/drivers/remoteproc/xlnx_r5_remoteproc.c b/drivers/remoteproc/xlnx_r5_remoteproc.c
index 2cea97c746fd..6d2ac7b85c8a 100644
--- a/drivers/remoteproc/xlnx_r5_remoteproc.c
+++ b/drivers/remoteproc/xlnx_r5_remoteproc.c
@@ -7,6 +7,7 @@ 
 #include <dt-bindings/power/xlnx-zynqmp-power.h>
 #include <linux/dma-mapping.h>
 #include <linux/firmware/xlnx-zynqmp.h>
+#include <linux/genalloc.h>
 #include <linux/kernel.h>
 #include <linux/mailbox_client.h>
 #include <linux/mailbox/zynqmp-ipi-message.h>
@@ -56,6 +57,21 @@  struct mem_bank_data {
 	char *bank_name;
 };
 
+/**
+ * struct zynqmp_sram_bank - sram bank description
+ *
+ * @sram_pool: gen pool for his sram
+ * @sram_res: sram address region information
+ * @va: virtual address of allocated genpool
+ * @da: device address of sram
+ */
+struct zynqmp_sram_bank {
+	struct gen_pool *sram_pool;
+	struct resource sram_res;
+	void __iomem *va;
+	u32 da;
+};
+
 /**
  * struct mbox_info
  *
@@ -120,6 +136,8 @@  static const struct mem_bank_data zynqmp_tcm_banks_lockstep[] = {
  * struct zynqmp_r5_core
  *
  * @rsc_tbl_va: resource table virtual address
+ * @sram: Array of sram memories assigned to this core
+ * @num_sram: number of sram for this core
  * @dev: device of RPU instance
  * @np: device node of RPU instance
  * @tcm_bank_count: number TCM banks accessible to this RPU
@@ -131,6 +149,8 @@  static const struct mem_bank_data zynqmp_tcm_banks_lockstep[] = {
  */
 struct zynqmp_r5_core {
 	void __iomem *rsc_tbl_va;
+	struct zynqmp_sram_bank *sram;
+	int num_sram;
 	struct device *dev;
 	struct device_node *np;
 	int tcm_bank_count;
@@ -494,6 +514,56 @@  static int add_mem_regions_carveout(struct rproc *rproc)
 	return 0;
 }
 
+static int add_sram_carveouts(struct rproc *rproc)
+{
+	struct zynqmp_r5_core *r5_core = rproc->priv;
+	struct rproc_mem_entry *rproc_mem;
+	struct zynqmp_sram_bank *sram;
+	size_t len, pool_size;
+	dma_addr_t dma_addr;
+	int da, i;
+
+	for (i = 0; i < r5_core->num_sram; i++) {
+		sram = &r5_core->sram[i];
+
+		dma_addr = (dma_addr_t)sram->sram_res.start;
+
+		/* Use actual resource size, as genpool size can be rounded up */
+		len = resource_size(&sram->sram_res);
+		da = sram->da;
+
+		pool_size = gen_pool_size(sram[i].sram_pool);
+		sram->va = (void __iomem *)gen_pool_alloc(sram->sram_pool, pool_size);
+		if (!sram->va) {
+			dev_err(r5_core->dev, "failed to alloc sram idx %d pool\n", i);
+			goto fail_add_sram_carveouts;
+		}
+
+		rproc_mem = rproc_mem_entry_init(&rproc->dev, sram->va,
+						 (dma_addr_t)dma_addr,
+						 len, da,
+						 NULL, NULL,
+						 sram->sram_res.name);
+
+		rproc_add_carveout(rproc, rproc_mem);
+		rproc_coredump_add_segment(rproc, da, len);
+
+		dev_dbg(&rproc->dev, "sram carveout %s addr=%llx, da=0x%x, size=0x%lx",
+			sram->sram_res.name, dma_addr, da, len);
+	}
+
+	return 0;
+
+fail_add_sram_carveouts:
+	while (--i > 0) {
+		pool_size = gen_pool_size(sram[i].sram_pool);
+		gen_pool_free(sram[i].sram_pool,
+			      (unsigned long)r5_core->sram[i].va, pool_size);
+	}
+
+	return -ENOMEM;
+}
+
 /*
  * tcm_mem_unmap()
  * @rproc: single R5 core's corresponding rproc instance
@@ -669,6 +739,12 @@  static int zynqmp_r5_rproc_prepare(struct rproc *rproc)
 		return ret;
 	}
 
+	ret = add_sram_carveouts(rproc);
+	if (ret) {
+		dev_err(&rproc->dev, "failed to get sram carveout %d\n", ret);
+		return ret;
+	}
+
 	return 0;
 }
 
@@ -695,6 +771,12 @@  static int zynqmp_r5_rproc_unprepare(struct rproc *rproc)
 				 "can't turn off TCM bank 0x%x", pm_domain_id);
 	}
 
+	for (i = 0; i < r5_core->num_sram; i++) {
+		gen_pool_free(r5_core->sram[i].sram_pool,
+			      (unsigned long)r5_core->sram[i].va,
+			      gen_pool_size(r5_core->sram[i].sram_pool));
+	}
+
 	return 0;
 }
 
@@ -881,6 +963,85 @@  static struct zynqmp_r5_core *zynqmp_r5_add_rproc_core(struct device *cdev)
 	return ERR_PTR(ret);
 }
 
+static int zynqmp_r5_get_sram_banks(struct zynqmp_r5_core *r5_core)
+{
+	struct device_node *np = r5_core->np;
+	struct device *dev = r5_core->dev;
+	struct zynqmp_sram_bank *sram;
+	struct device_node *sram_np;
+	int num_sram, i, ret;
+	u64 abs_addr, size;
+
+	/* "sram" is optional property. Do not fail, if unavailable. */
+	if (!of_property_present(r5_core->np, "sram"))
+		return 0;
+
+	num_sram = of_property_count_elems_of_size(np, "sram", sizeof(phandle));
+	if (num_sram <= 0) {
+		dev_err(dev, "Invalid sram property, ret = %d\n",
+			num_sram);
+		return -EINVAL;
+	}
+
+	sram = devm_kcalloc(dev, num_sram,
+			    sizeof(struct zynqmp_sram_bank), GFP_KERNEL);
+	if (!sram)
+		return -ENOMEM;
+
+	for (i = 0; i < num_sram; i++) {
+		sram_np = of_parse_phandle(np, "sram", i);
+		if (!sram_np) {
+			dev_err(dev, "failed to get sram %d phandle\n", i);
+			ret = -EINVAL;
+			goto fail_sram_get;
+		}
+
+		if (!of_device_is_available(sram_np)) {
+			dev_err(dev, "sram device not available\n");
+			ret = -EINVAL;
+			goto fail_sram_get;
+		}
+
+		ret = of_address_to_resource(sram_np, 0, &sram[i].sram_res);
+		if (ret) {
+			dev_err(dev, "addr to res failed\n");
+			goto fail_sram_get;
+		}
+
+		sram[i].sram_pool = of_gen_pool_get(np, "sram", i);
+		if (!sram[i].sram_pool) {
+			dev_err(dev, "failed to get sram idx %d gen pool\n", i);
+			ret = -ENOMEM;
+			goto fail_sram_get;
+		}
+
+		/* Get SRAM device address */
+		ret = of_property_read_reg(sram_np, i, &abs_addr, &size);
+		if (ret) {
+			dev_err(dev, "failed to get reg property\n");
+			goto fail_sram_get;
+		}
+
+		sram[i].da = (u32)abs_addr;
+
+		of_node_put(sram_np);
+
+		dev_dbg(dev, "sram %d: name=%s, addr=0x%llx, da=0x%x, size=0x%llx\n",
+			i, sram[i].sram_res.name, sram[i].sram_res.start,
+			sram[i].da, resource_size(&sram[i].sram_res));
+	}
+
+	r5_core->sram = sram;
+	r5_core->num_sram = num_sram;
+
+	return 0;
+
+fail_sram_get:
+	of_node_put(sram_np);
+
+	return ret;
+}
+
 static int zynqmp_r5_get_tcm_node_from_dt(struct zynqmp_r5_cluster *cluster)
 {
 	int i, j, tcm_bank_count, ret, tcm_pd_idx, pd_count;
@@ -1095,6 +1256,10 @@  static int zynqmp_r5_core_init(struct zynqmp_r5_cluster *cluster,
 				return ret;
 			}
 		}
+
+		ret = zynqmp_r5_get_sram_banks(r5_core);
+		if (ret)
+			return ret;
 	}
 
 	return 0;