diff mbox series

[2/2] drivers: remoteproc: xlnx: add sram support

Message ID 20240502231021.370047-3-tanmay.shah@amd.com (mailing list archive)
State Superseded
Headers show
Series remoteproc: xlnx: Add attach detach ops and sram support | expand

Commit Message

Tanmay Shah May 2, 2024, 11:10 p.m. UTC
AMD-Xilinx zynqmp platform contains on-chip sram memory (OCM).
R5 cores can access OCM and access is faster than DDR memory but slower
than TCM memories available. Sram region can have optional multiple
power-domains.

Signed-off-by: Tanmay Shah <tanmay.shah@amd.com>
---
 drivers/remoteproc/xlnx_r5_remoteproc.c | 221 +++++++++++++++++++++++-
 1 file changed, 220 insertions(+), 1 deletion(-)

Comments

kernel test robot May 5, 2024, 11:47 p.m. UTC | #1
Hi Tanmay,

kernel test robot noticed the following build warnings:

[auto build test WARNING on 0496190c4d42965acb31b9da1b6dac3509791062]

url:    https://github.com/intel-lab-lkp/linux/commits/Tanmay-Shah/drivers-remoteproc-xlnx-add-attach-detach-support/20240503-071225
base:   0496190c4d42965acb31b9da1b6dac3509791062
patch link:    https://lore.kernel.org/r/20240502231021.370047-3-tanmay.shah%40amd.com
patch subject: [PATCH 2/2] drivers: remoteproc: xlnx: add sram support
config: arm64-randconfig-r113-20240506 (https://download.01.org/0day-ci/archive/20240506/202405060759.yyzLUQXP-lkp@intel.com/config)
compiler: aarch64-linux-gcc (GCC) 13.2.0
reproduce: (https://download.01.org/0day-ci/archive/20240506/202405060759.yyzLUQXP-lkp@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202405060759.yyzLUQXP-lkp@intel.com/

sparse warnings: (new ones prefixed by >>)
   drivers/remoteproc/xlnx_r5_remoteproc.c:423:20: sparse: sparse: cast removes address space '__iomem' of expression
   drivers/remoteproc/xlnx_r5_remoteproc.c:604:20: sparse: sparse: cast removes address space '__iomem' of expression
   drivers/remoteproc/xlnx_r5_remoteproc.c:827:21: sparse: sparse: incorrect type in assignment (different address spaces) @@     expected struct rsc_tbl_data *rsc_data_va @@     got void [noderef] __iomem * @@
   drivers/remoteproc/xlnx_r5_remoteproc.c:827:21: sparse:     expected struct rsc_tbl_data *rsc_data_va
   drivers/remoteproc/xlnx_r5_remoteproc.c:827:21: sparse:     got void [noderef] __iomem *
   drivers/remoteproc/xlnx_r5_remoteproc.c:844:18: sparse: sparse: incorrect type in assignment (different address spaces) @@     expected struct resource_table *rsc_addr @@     got void [noderef] __iomem * @@
   drivers/remoteproc/xlnx_r5_remoteproc.c:844:18: sparse:     expected struct resource_table *rsc_addr
   drivers/remoteproc/xlnx_r5_remoteproc.c:844:18: sparse:     got void [noderef] __iomem *
   drivers/remoteproc/xlnx_r5_remoteproc.c:898:24: sparse: sparse: incorrect type in argument 1 (different address spaces) @@     expected void volatile [noderef] __iomem *addr @@     got struct resource_table *rsc_tbl_va @@
   drivers/remoteproc/xlnx_r5_remoteproc.c:898:24: sparse:     expected void volatile [noderef] __iomem *addr
   drivers/remoteproc/xlnx_r5_remoteproc.c:898:24: sparse:     got struct resource_table *rsc_tbl_va
>> drivers/remoteproc/xlnx_r5_remoteproc.c:995:26: sparse: sparse: Using plain integer as NULL pointer

vim +995 drivers/remoteproc/xlnx_r5_remoteproc.c

   798	
   799	static int zynqmp_r5_get_rsc_table_va(struct zynqmp_r5_core *r5_core)
   800	{
   801		struct device *dev = r5_core->dev;
   802		struct rsc_tbl_data *rsc_data_va;
   803		struct resource_table *rsc_addr;
   804		struct resource res_mem;
   805		struct device_node *np;
   806		int ret;
   807	
   808		/**
   809		 * It is expected from remote processor firmware to provide resource
   810		 * table address via struct rsc_tbl_data data structure.
   811		 * Start address of first entry under "memory-region" property list
   812		 * contains that data structure which holds resource table address, size
   813		 * and some magic number to validate correct resource table entry.
   814		 */
   815		np = of_parse_phandle(r5_core->np, "memory-region", 0);
   816		if (!np) {
   817			dev_err(dev, "failed to get memory region dev node\n");
   818			return -EINVAL;
   819		}
   820	
   821		ret = of_address_to_resource(np, 0, &res_mem);
   822		if (ret) {
   823			dev_err(dev, "failed to get memory-region resource addr\n");
   824			return -EINVAL;
   825		}
   826	
 > 827		rsc_data_va = devm_ioremap_wc(dev, res_mem.start,
   828					      sizeof(struct rsc_tbl_data));
   829		if (!rsc_data_va) {
   830			dev_err(dev, "failed to map resource table data address\n");
   831			return -EIO;
   832		}
   833	
   834		/**
   835		 * If RSC_TBL_XLNX_MAGIC number and its complement isn't found then
   836		 * do not consider resource table address valid and don't attach
   837		 */
   838		if (rsc_data_va->magic_num != RSC_TBL_XLNX_MAGIC ||
   839		    rsc_data_va->comp_magic_num != ~RSC_TBL_XLNX_MAGIC) {
   840			dev_dbg(dev, "invalid magic number, won't attach\n");
   841			return -EINVAL;
   842		}
   843	
   844		rsc_addr = ioremap_wc(rsc_data_va->rsc_tbl,
   845				      rsc_data_va->rsc_tbl_size);
   846		if (!rsc_addr) {
   847			dev_err(dev, "failed to get rsc_addr\n");
   848			return -EINVAL;
   849		}
   850	
   851		/**
   852		 * As of now resource table version 1 is expected. Don't fail to attach
   853		 * but warn users about it.
   854		 */
   855		if (rsc_addr->ver != 1)
   856			dev_warn(dev, "unexpected resource table version %d\n",
   857				 rsc_addr->ver);
   858	
   859		r5_core->rsc_tbl_size = rsc_data_va->rsc_tbl_size;
   860		r5_core->rsc_tbl_va = rsc_addr;
   861	
   862		return 0;
   863	}
   864	
   865	static int zynqmp_r5_attach(struct rproc *rproc)
   866	{
   867		struct zynqmp_r5_core *r5_core = rproc->priv;
   868		int i, pm_domain_id, ret;
   869	
   870		/*
   871		 * Firmware is loaded in TCM. Request TCM power domains to notify
   872		 * platform management controller that TCM is in use. This will be
   873		 * released during unprepare callback.
   874		 */
   875		for (i = 0; i < r5_core->tcm_bank_count; i++) {
   876			pm_domain_id = r5_core->tcm_banks[i]->pm_domain_id;
   877			ret = zynqmp_pm_request_node(pm_domain_id,
   878						     ZYNQMP_PM_CAPABILITY_ACCESS, 0,
   879						     ZYNQMP_PM_REQUEST_ACK_BLOCKING);
   880			if (ret < 0)
   881				pr_warn("TCM %d can't be requested\n", i);
   882		}
   883	
   884		return 0;
   885	}
   886	
   887	static int zynqmp_r5_detach(struct rproc *rproc)
   888	{
   889		struct zynqmp_r5_core *r5_core = rproc->priv;
   890	
   891		/*
   892		 * Generate last notification to remote after clearing virtio flag.
   893		 * Remote can avoid polling on virtio reset flag if kick is generated
   894		 * during detach by host and check virtio reset flag on kick interrupt.
   895		 */
   896		zynqmp_r5_rproc_kick(rproc, 0);
   897	
   898		iounmap(r5_core->rsc_tbl_va);
   899		r5_core->rsc_tbl_va = NULL;
   900	
   901		return 0;
   902	}
   903	
   904	static const struct rproc_ops zynqmp_r5_rproc_ops = {
   905		.prepare	= zynqmp_r5_rproc_prepare,
   906		.unprepare	= zynqmp_r5_rproc_unprepare,
   907		.start		= zynqmp_r5_rproc_start,
   908		.stop		= zynqmp_r5_rproc_stop,
   909		.load		= rproc_elf_load_segments,
   910		.parse_fw	= zynqmp_r5_parse_fw,
   911		.find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table,
   912		.sanity_check	= rproc_elf_sanity_check,
   913		.get_boot_addr	= rproc_elf_get_boot_addr,
   914		.kick		= zynqmp_r5_rproc_kick,
   915		.get_loaded_rsc_table = zynqmp_r5_get_loaded_rsc_table,
   916		.attach		= zynqmp_r5_attach,
   917		.detach		= zynqmp_r5_detach,
   918	};
   919	
   920	/**
   921	 * zynqmp_r5_add_rproc_core()
   922	 * Allocate and add struct rproc object for each r5f core
   923	 * This is called for each individual r5f core
   924	 *
   925	 * @cdev: Device node of each r5 core
   926	 *
   927	 * Return: zynqmp_r5_core object for success else error code pointer
   928	 */
   929	static struct zynqmp_r5_core *zynqmp_r5_add_rproc_core(struct device *cdev)
   930	{
   931		struct zynqmp_r5_core *r5_core;
   932		struct rproc *r5_rproc;
   933		int ret;
   934	
   935		/* Set up DMA mask */
   936		ret = dma_set_coherent_mask(cdev, DMA_BIT_MASK(32));
   937		if (ret)
   938			return ERR_PTR(ret);
   939	
   940		/* Allocate remoteproc instance */
   941		r5_rproc = rproc_alloc(cdev, dev_name(cdev),
   942				       &zynqmp_r5_rproc_ops,
   943				       NULL, sizeof(struct zynqmp_r5_core));
   944		if (!r5_rproc) {
   945			dev_err(cdev, "failed to allocate memory for rproc instance\n");
   946			return ERR_PTR(-ENOMEM);
   947		}
   948	
   949		rproc_coredump_set_elf_info(r5_rproc, ELFCLASS32, EM_ARM);
   950	
   951		r5_rproc->auto_boot = false;
   952		r5_core = r5_rproc->priv;
   953		r5_core->dev = cdev;
   954		r5_core->np = dev_of_node(cdev);
   955		if (!r5_core->np) {
   956			dev_err(cdev, "can't get device node for r5 core\n");
   957			ret = -EINVAL;
   958			goto free_rproc;
   959		}
   960	
   961		/* Add R5 remoteproc core */
   962		ret = rproc_add(r5_rproc);
   963		if (ret) {
   964			dev_err(cdev, "failed to add r5 remoteproc\n");
   965			goto free_rproc;
   966		}
   967	
   968		/*
   969		 * Move rproc state to DETACHED to give one time opportunity to attach
   970		 * if firmware is already available in the memory. This can happen if
   971		 * firmware is loaded via debugger or by any other agent in the system.
   972		 * If firmware isn't available in the memory and resource table isn't found,
   973		 * then rproc state stay OFFLINE.
   974		 */
   975		if (!zynqmp_r5_get_rsc_table_va(r5_core))
   976			r5_rproc->state = RPROC_DETACHED;
   977	
   978		r5_core->rproc = r5_rproc;
   979		return r5_core;
   980	
   981	free_rproc:
   982		rproc_free(r5_rproc);
   983		return ERR_PTR(ret);
   984	}
   985	
   986	static int zynqmp_r5_get_sram_pd(struct device *r5_core_dev,
   987					 struct device_node *sram_np, int **power_domains,
   988					 int *num_pd)
   989	{
   990		struct of_phandle_args out_args;
   991		int pd_count, i, ret;
   992		int *pd_list;
   993	
   994		if (!of_find_property(sram_np, "power-domains", NULL)) {
 > 995			num_pd = 0;
   996			return 0;
   997		}
   998	
   999		pd_count = of_count_phandle_with_args(sram_np, "power-domains",
  1000						      "#power-domain-cells");
  1001	
  1002		pd_list = devm_kcalloc(r5_core_dev, pd_count, sizeof(int), GFP_KERNEL);
  1003		if (!pd_list)
  1004			return -ENOMEM;
  1005	
  1006		for (i = 0; i < pd_count; i++) {
  1007			ret = of_parse_phandle_with_args(sram_np, "power-domains",
  1008							 "#power-domain-cells",
  1009							 i, &out_args);
  1010			if (ret) {
  1011				dev_err(r5_core_dev, "%s: power-domains idx %d parsing failed\n",
  1012					sram_np->name, i);
  1013				return ret;
  1014			}
  1015	
  1016			of_node_put(out_args.np);
  1017			pd_list[i] = out_args.args[0];
  1018		}
  1019	
  1020		*power_domains = pd_list;
  1021		*num_pd = pd_count;
  1022	
  1023		return 0;
  1024	}
  1025
diff mbox series

Patch

diff --git a/drivers/remoteproc/xlnx_r5_remoteproc.c b/drivers/remoteproc/xlnx_r5_remoteproc.c
index af7aff5e9098..47c08b013152 100644
--- a/drivers/remoteproc/xlnx_r5_remoteproc.c
+++ b/drivers/remoteproc/xlnx_r5_remoteproc.c
@@ -56,6 +56,21 @@  struct mem_bank_data {
 	char *bank_name;
 };
 
+/**
+ * struct zynqmp_sram_bank - sram bank description
+ *
+ * @sram_res: sram address region information
+ * @power_domains: Array of pm domain id
+ * @num_pd: total pm domain id count
+ * @da: device address of sram
+ */
+struct zynqmp_sram_bank {
+	struct resource sram_res;
+	int *power_domains;
+	int num_pd;
+	u32 da;
+};
+
 /**
  * struct mbox_info
  *
@@ -109,6 +124,8 @@  static const struct mem_bank_data zynqmp_tcm_banks_lockstep[] = {
  * struct zynqmp_r5_core
  *
  * @rsc_tbl_va: resource table virtual address
+ * @sram: Array of sram memories assigned to this core
+ * @num_sram: number of sram for this core
  * @dev: device of RPU instance
  * @np: device node of RPU instance
  * @tcm_bank_count: number TCM banks accessible to this RPU
@@ -120,6 +137,8 @@  static const struct mem_bank_data zynqmp_tcm_banks_lockstep[] = {
  */
 struct zynqmp_r5_core {
 	struct resource_table *rsc_tbl_va;
+	struct zynqmp_sram_bank **sram;
+	int num_sram;
 	struct device *dev;
 	struct device_node *np;
 	int tcm_bank_count;
@@ -483,6 +502,69 @@  static int add_mem_regions_carveout(struct rproc *rproc)
 	return 0;
 }
 
+static int add_sram_carveouts(struct rproc *rproc)
+{
+	struct zynqmp_r5_core *r5_core = rproc->priv;
+	struct rproc_mem_entry *rproc_mem;
+	struct zynqmp_sram_bank *sram;
+	dma_addr_t dma_addr;
+	int da, i, j, ret;
+	size_t len;
+
+	for (i = 0; i < r5_core->num_sram; i++) {
+		sram = r5_core->sram[i];
+
+		dma_addr = (dma_addr_t)sram->sram_res.start;
+		len = resource_size(&sram->sram_res);
+		da = sram->da;
+
+		for (j = 0; j < sram->num_pd; j++) {
+			ret = zynqmp_pm_request_node(sram->power_domains[j],
+						     ZYNQMP_PM_CAPABILITY_ACCESS, 0,
+						     ZYNQMP_PM_REQUEST_ACK_BLOCKING);
+			if (ret < 0) {
+				dev_err(r5_core->dev,
+					"failed to request on SRAM pd 0x%x",
+					sram->power_domains[j]);
+				goto fail_sram;
+			} else {
+				pr_err("sram pd 0x%x request success\n",
+				       sram->power_domains[j]);
+			}
+		}
+
+		/* Register associated reserved memory regions */
+		rproc_mem = rproc_mem_entry_init(&rproc->dev, NULL,
+						 (dma_addr_t)dma_addr,
+						 len, da,
+						 zynqmp_r5_mem_region_map,
+						 zynqmp_r5_mem_region_unmap,
+						 sram->sram_res.name);
+
+		rproc_add_carveout(rproc, rproc_mem);
+		rproc_coredump_add_segment(rproc, da, len);
+
+		dev_err(&rproc->dev, "sram carveout %s addr=%llx, da=0x%x, size=0x%lx",
+			sram->sram_res.name, dma_addr, da, len);
+	}
+
+	return 0;
+
+fail_sram:
+	/* Release current sram pd. */
+	while (--j >= 0)
+		zynqmp_pm_release_node(sram->power_domains[j]);
+
+	/* Release previously requested sram pd. */
+	while (--i >= 0) {
+		sram = r5_core->sram[i];
+		for (j = 0; j < sram->num_pd; j++)
+			zynqmp_pm_release_node(sram->power_domains[j]);
+	}
+
+	return ret;
+}
+
 /*
  * tcm_mem_unmap()
  * @rproc: single R5 core's corresponding rproc instance
@@ -659,6 +741,12 @@  static int zynqmp_r5_rproc_prepare(struct rproc *rproc)
 		return ret;
 	}
 
+	ret = add_sram_carveouts(rproc);
+	if (ret) {
+		dev_err(&rproc->dev, "failed to get sram carveout %d\n", ret);
+		return ret;
+	}
+
 	return 0;
 }
 
@@ -673,8 +761,9 @@  static int zynqmp_r5_rproc_prepare(struct rproc *rproc)
 static int zynqmp_r5_rproc_unprepare(struct rproc *rproc)
 {
 	struct zynqmp_r5_core *r5_core;
+	struct zynqmp_sram_bank *sram;
 	u32 pm_domain_id;
-	int i;
+	int i, j;
 
 	r5_core = rproc->priv;
 
@@ -685,6 +774,13 @@  static int zynqmp_r5_rproc_unprepare(struct rproc *rproc)
 				 "can't turn off TCM bank 0x%x", pm_domain_id);
 	}
 
+	/* Release sram power-domains. */
+	for (i = 0; i < r5_core->num_sram; i++) {
+		sram = r5_core->sram[i];
+		for (j = 0; j < sram->num_pd; j++)
+			zynqmp_pm_release_node(sram->power_domains[j]);
+	}
+
 	return 0;
 }
 
@@ -887,6 +983,123 @@  static struct zynqmp_r5_core *zynqmp_r5_add_rproc_core(struct device *cdev)
 	return ERR_PTR(ret);
 }
 
+static int zynqmp_r5_get_sram_pd(struct device *r5_core_dev,
+				 struct device_node *sram_np, int **power_domains,
+				 int *num_pd)
+{
+	struct of_phandle_args out_args;
+	int pd_count, i, ret;
+	int *pd_list;
+
+	if (!of_find_property(sram_np, "power-domains", NULL)) {
+		num_pd = 0;
+		return 0;
+	}
+
+	pd_count = of_count_phandle_with_args(sram_np, "power-domains",
+					      "#power-domain-cells");
+
+	pd_list = devm_kcalloc(r5_core_dev, pd_count, sizeof(int), GFP_KERNEL);
+	if (!pd_list)
+		return -ENOMEM;
+
+	for (i = 0; i < pd_count; i++) {
+		ret = of_parse_phandle_with_args(sram_np, "power-domains",
+						 "#power-domain-cells",
+						 i, &out_args);
+		if (ret) {
+			dev_err(r5_core_dev, "%s: power-domains idx %d parsing failed\n",
+				sram_np->name, i);
+			return ret;
+		}
+
+		of_node_put(out_args.np);
+		pd_list[i] = out_args.args[0];
+	}
+
+	*power_domains = pd_list;
+	*num_pd = pd_count;
+
+	return 0;
+}
+
+static int zynqmp_r5_get_sram_banks(struct zynqmp_r5_core *r5_core)
+{
+	struct zynqmp_sram_bank **sram, *sram_data;
+	struct device_node *np = r5_core->np;
+	struct device *dev = r5_core->dev;
+	struct device_node *sram_np;
+	int num_sram, i, ret;
+	u64 abs_addr, size;
+
+	num_sram = of_property_count_elems_of_size(np, "sram", sizeof(phandle));
+	if (num_sram <= 0) {
+		dev_err(dev, "Invalid sram property, ret = %d\n",
+			num_sram);
+		return -EINVAL;
+	}
+
+	sram = devm_kcalloc(dev, num_sram,
+			    sizeof(struct zynqmp_sram_bank *), GFP_KERNEL);
+	if (!sram)
+		return -ENOMEM;
+
+	for (i = 0; i < num_sram; i++) {
+		sram_data = devm_kzalloc(dev, sizeof(struct zynqmp_sram_bank),
+					 GFP_KERNEL);
+		if (!sram_data)
+			return -ENOMEM;
+
+		sram_np = of_parse_phandle(np, "sram", i);
+		if (!sram_np) {
+			dev_err(dev, "failed to get sram %d phandle\n", i);
+			return -EINVAL;
+		}
+
+		if (!of_device_is_available(sram_np)) {
+			of_node_put(sram_np);
+			dev_err(dev, "sram device not available\n");
+			return -EINVAL;
+		}
+
+		ret = of_address_to_resource(sram_np, 0, &sram_data->sram_res);
+		of_node_put(sram_np);
+		if (ret) {
+			dev_err(dev, "addr to res failed\n");
+			return ret;
+		}
+
+		/* Get SRAM device address */
+		ret = of_property_read_reg(sram_np, i, &abs_addr, &size);
+		if (ret) {
+			dev_err(dev, "failed to get reg property\n");
+			return ret;
+		}
+
+		sram_data->da = (u32)abs_addr;
+
+		ret = zynqmp_r5_get_sram_pd(r5_core->dev, sram_np,
+					    &sram_data->power_domains,
+					    &sram_data->num_pd);
+		if (ret) {
+			dev_err(dev, "failed to get power-domains for %d sram\n", i);
+			return ret;
+		}
+
+		sram[i] = sram_data;
+
+		dev_dbg(dev, "sram %d: name=%s, addr=0x%llx, da=0x%x, size=0x%llx, num_pd=%d\n",
+			i, sram[i]->sram_res.name, sram[i]->sram_res.start,
+			sram[i]->da, resource_size(&sram[i]->sram_res),
+			sram[i]->num_pd);
+	}
+
+	r5_core->sram = sram;
+	r5_core->num_sram = num_sram;
+
+	return 0;
+}
+
 static int zynqmp_r5_get_tcm_node_from_dt(struct zynqmp_r5_cluster *cluster)
 {
 	int i, j, tcm_bank_count, ret, tcm_pd_idx, pd_count;
@@ -1101,6 +1314,12 @@  static int zynqmp_r5_core_init(struct zynqmp_r5_cluster *cluster,
 				return ret;
 			}
 		}
+
+		if (of_find_property(r5_core->np, "sram", NULL)) {
+			ret = zynqmp_r5_get_sram_banks(r5_core);
+			if (ret)
+				return ret;
+		}
 	}
 
 	return 0;