diff mbox

[02/26] davinci: EDMA: restructure to support multiple channel controllers

Message ID 87hbxlsrbj.fsf@deeprootsystems.com (mailing list archive)
State Not Applicable
Headers show

Commit Message

Kevin Hilman July 9, 2009, 5:24 p.m. UTC
Kevin Hilman <khilman@deeprootsystems.com> writes:

> Russell King - ARM Linux <linux@arm.linux.org.uk> writes:
>
>> On Mon, Jul 06, 2009 at 02:14:36PM -0700, Kevin Hilman wrote:
>>> From: Sudhakar Rajashekhara <sudhakar.raj@ti.com>
>>> 
>>> Define a structure to store EDMA channel controller based information.
>>> Use platform_device.id to find out the instance being configured in
>>> probe function.
>>
>> Why all this complexity?  Resources are specific to each platform
>> device, so just provide the _specific_ resources for each platform
>> device.
>
> Agreed.  I've asked Sudhakar to take a look at this and rework.
>

OK, Sudhakar reworked this and I've integrated the change into DaVinci
git.  However, reworking this also touched several of the other
EDMA patches in this series in a way that was not easy to merge.

Therefore, since each of the commits was relatively small, I just
combined the main EDMA restructuring into a single commit that
includes this new rework and I combined the changelogs into an updated
summary. The EDMA support for the newer SoCs (dm646x, dm365 and da8xx)
was kept as separate commits.

Below is the new patch, which is also the 2nd commit in the updated
davinci-next branch:

  git://git.kernel.org/pub/scm/linux/kernel/git/khilman/linux-davinci.git davinci-next

Kevin



>From 98d7d6380f68ea674bfebaeb796db70f130213fb Mon Sep 17 00:00:00 2001
From: Sudhakar Rajashekhara <sudhakar.raj@ti.com>
Date: Thu, 21 May 2009 07:41:35 -0400
Subject: [PATCH 02/25] davinci: EDMA: multiple CCs, channel mapping and API changes

- restructure to support multiple channel controllers by using
  additional struct resources for each CC

- interface changes visible to EDMA clients

  Introduce macros to build IDs from controller and channel number,
  and to extract them. Modify the edma_alloc_slot function to take an
  extra argument for the controller.

  Also update ASoC drivers to use API.  ASoC changes
  Acked-by: Mark Brown <broonie@opensource.wolfsonmicro.com>

- Move queue related mappings to dm<soc>.c

  EDMA in DM355 and DM644x has two transfer controllers while DM646x
  has four transfer controllers. Moving the queue to tc mapping and
  queue priority mapping to dm<soc>.c will be helpful to probe these
  mappings from platform device so that the machine_is_* testing will
  be avoided.

- add channel mapping logic

  Channel mapping logic is introduced in dm646x EDMA. This implies
  that there is no fixed association for a channel number to a
  parameter entry number. In other words, using the DMA channel
  mapping registers (DCHMAPn), a PaRAM entry can be mapped to any
  channel. While in the case of dm644x and dm355 there is a fixed
  mapping between the EDMA channel and Param entry number.

Signed-off-by: Naresh Medisetty <naresh@ti.com>
Signed-off-by: Sudhakar Rajashekhara <sudhakar.raj@ti.com>
Reviewed-by: David Brownell <dbrownell@users.sourceforge.net>
Signed-off-by: Kevin Hilman <khilman@deeprootsystems.com>
---
 arch/arm/mach-davinci/devices.c           |    8 +-
 arch/arm/mach-davinci/dm355.c             |   41 ++-
 arch/arm/mach-davinci/dm644x.c            |   41 ++-
 arch/arm/mach-davinci/dm646x.c            |   46 ++-
 arch/arm/mach-davinci/dma.c               |  811 +++++++++++++++++++----------
 arch/arm/mach-davinci/include/mach/edma.h |    9 +-
 sound/soc/davinci/davinci-evm.c           |    8 +-
 sound/soc/davinci/davinci-pcm.c           |    6 +-
 8 files changed, 642 insertions(+), 328 deletions(-)
diff mbox

Patch

diff --git a/arch/arm/mach-davinci/devices.c b/arch/arm/mach-davinci/devices.c
index de16f34..7a2f8ae 100644
--- a/arch/arm/mach-davinci/devices.c
+++ b/arch/arm/mach-davinci/devices.c
@@ -82,10 +82,10 @@  static struct resource mmcsd0_resources[] = {
 	},
 	/* DMA channels: RX, then TX */
 	{
-		.start = DAVINCI_DMA_MMCRXEVT,
+		.start = EDMA_CTLR_CHAN(0, DAVINCI_DMA_MMCRXEVT),
 		.flags = IORESOURCE_DMA,
 	}, {
-		.start = DAVINCI_DMA_MMCTXEVT,
+		.start = EDMA_CTLR_CHAN(0, DAVINCI_DMA_MMCTXEVT),
 		.flags = IORESOURCE_DMA,
 	},
 };
@@ -119,10 +119,10 @@  static struct resource mmcsd1_resources[] = {
 	},
 	/* DMA channels: RX, then TX */
 	{
-		.start = 30,	/* rx */
+		.start = EDMA_CTLR_CHAN(0, 30),	/* rx */
 		.flags = IORESOURCE_DMA,
 	}, {
-		.start = 31,	/* tx */
+		.start = EDMA_CTLR_CHAN(0, 31),	/* tx */
 		.flags = IORESOURCE_DMA,
 	},
 };
diff --git a/arch/arm/mach-davinci/dm355.c b/arch/arm/mach-davinci/dm355.c
index baaaf32..373f0c4 100644
--- a/arch/arm/mach-davinci/dm355.c
+++ b/arch/arm/mach-davinci/dm355.c
@@ -558,17 +558,38 @@  static const s8 dma_chan_dm355_no_event[] = {
 	-1
 };
 
-static struct edma_soc_info dm355_edma_info = {
-	.n_channel	= 64,
-	.n_region	= 4,
-	.n_slot		= 128,
-	.n_tc		= 2,
-	.noevent	= dma_chan_dm355_no_event,
+static const s8
+queue_tc_mapping[][2] = {
+	/* {event queue no, TC no} */
+	{0, 0},
+	{1, 1},
+	{-1, -1},
+};
+
+static const s8
+queue_priority_mapping[][2] = {
+	/* {event queue no, Priority} */
+	{0, 3},
+	{1, 7},
+	{-1, -1},
+};
+
+static struct edma_soc_info dm355_edma_info[] = {
+	{
+		.n_channel		= 64,
+		.n_region		= 4,
+		.n_slot			= 128,
+		.n_tc			= 2,
+		.n_cc			= 1,
+		.noevent		= dma_chan_dm355_no_event,
+		.queue_tc_mapping	= queue_tc_mapping,
+		.queue_priority_mapping	= queue_priority_mapping,
+	},
 };
 
 static struct resource edma_resources[] = {
 	{
-		.name	= "edma_cc",
+		.name	= "edma_cc0",
 		.start	= 0x01c00000,
 		.end	= 0x01c00000 + SZ_64K - 1,
 		.flags	= IORESOURCE_MEM,
@@ -586,10 +607,12 @@  static struct resource edma_resources[] = {
 		.flags	= IORESOURCE_MEM,
 	},
 	{
+		.name	= "edma0",
 		.start	= IRQ_CCINT0,
 		.flags	= IORESOURCE_IRQ,
 	},
 	{
+		.name	= "edma0_err",
 		.start	= IRQ_CCERRINT,
 		.flags	= IORESOURCE_IRQ,
 	},
@@ -598,8 +621,8 @@  static struct resource edma_resources[] = {
 
 static struct platform_device dm355_edma_device = {
 	.name			= "edma",
-	.id			= -1,
-	.dev.platform_data	= &dm355_edma_info,
+	.id			= 0,
+	.dev.platform_data	= dm355_edma_info,
 	.num_resources		= ARRAY_SIZE(edma_resources),
 	.resource		= edma_resources,
 };
diff --git a/arch/arm/mach-davinci/dm644x.c b/arch/arm/mach-davinci/dm644x.c
index fb5449b..d145c0c 100644
--- a/arch/arm/mach-davinci/dm644x.c
+++ b/arch/arm/mach-davinci/dm644x.c
@@ -484,17 +484,38 @@  static const s8 dma_chan_dm644x_no_event[] = {
 	-1
 };
 
-static struct edma_soc_info dm644x_edma_info = {
-	.n_channel	= 64,
-	.n_region	= 4,
-	.n_slot		= 128,
-	.n_tc		= 2,
-	.noevent	= dma_chan_dm644x_no_event,
+static const s8
+queue_tc_mapping[][2] = {
+	/* {event queue no, TC no} */
+	{0, 0},
+	{1, 1},
+	{-1, -1},
+};
+
+static const s8
+queue_priority_mapping[][2] = {
+	/* {event queue no, Priority} */
+	{0, 3},
+	{1, 7},
+	{-1, -1},
+};
+
+static struct edma_soc_info dm644x_edma_info[] = {
+	{
+		.n_channel		= 64,
+		.n_region		= 4,
+		.n_slot			= 128,
+		.n_tc			= 2,
+		.n_cc			= 1,
+		.noevent		= dma_chan_dm644x_no_event,
+		.queue_tc_mapping	= queue_tc_mapping,
+		.queue_priority_mapping	= queue_priority_mapping,
+	},
 };
 
 static struct resource edma_resources[] = {
 	{
-		.name	= "edma_cc",
+		.name	= "edma_cc0",
 		.start	= 0x01c00000,
 		.end	= 0x01c00000 + SZ_64K - 1,
 		.flags	= IORESOURCE_MEM,
@@ -512,10 +533,12 @@  static struct resource edma_resources[] = {
 		.flags	= IORESOURCE_MEM,
 	},
 	{
+		.name	= "edma0",
 		.start	= IRQ_CCINT0,
 		.flags	= IORESOURCE_IRQ,
 	},
 	{
+		.name	= "edma0_err",
 		.start	= IRQ_CCERRINT,
 		.flags	= IORESOURCE_IRQ,
 	},
@@ -524,8 +547,8 @@  static struct resource edma_resources[] = {
 
 static struct platform_device dm644x_edma_device = {
 	.name			= "edma",
-	.id			= -1,
-	.dev.platform_data	= &dm644x_edma_info,
+	.id			= 0,
+	.dev.platform_data	= dm644x_edma_info,
 	.num_resources		= ARRAY_SIZE(edma_resources),
 	.resource		= edma_resources,
 };
diff --git a/arch/arm/mach-davinci/dm646x.c b/arch/arm/mach-davinci/dm646x.c
index 334f071..e241073 100644
--- a/arch/arm/mach-davinci/dm646x.c
+++ b/arch/arm/mach-davinci/dm646x.c
@@ -451,17 +451,43 @@  static const s8 dma_chan_dm646x_no_event[] = {
 	-1
 };
 
-static struct edma_soc_info dm646x_edma_info = {
-	.n_channel	= 64,
-	.n_region	= 6,	/* 0-1, 4-7 */
-	.n_slot		= 512,
-	.n_tc		= 4,
-	.noevent	= dma_chan_dm646x_no_event,
+/* Four Transfer Controllers on DM646x */
+static const s8
+dm646x_queue_tc_mapping[][2] = {
+	/* {event queue no, TC no} */
+	{0, 0},
+	{1, 1},
+	{2, 2},
+	{3, 3},
+	{-1, -1},
+};
+
+static const s8
+dm646x_queue_priority_mapping[][2] = {
+	/* {event queue no, Priority} */
+	{0, 4},
+	{1, 0},
+	{2, 5},
+	{3, 1},
+	{-1, -1},
+};
+
+static struct edma_soc_info dm646x_edma_info[] = {
+	{
+		.n_channel		= 64,
+		.n_region		= 6,	/* 0-1, 4-7 */
+		.n_slot			= 512,
+		.n_tc			= 4,
+		.n_cc			= 1,
+		.noevent		= dma_chan_dm646x_no_event,
+		.queue_tc_mapping	= dm646x_queue_tc_mapping,
+		.queue_priority_mapping	= dm646x_queue_priority_mapping,
+	},
 };
 
 static struct resource edma_resources[] = {
 	{
-		.name	= "edma_cc",
+		.name	= "edma_cc0",
 		.start	= 0x01c00000,
 		.end	= 0x01c00000 + SZ_64K - 1,
 		.flags	= IORESOURCE_MEM,
@@ -491,10 +517,12 @@  static struct resource edma_resources[] = {
 		.flags	= IORESOURCE_MEM,
 	},
 	{
+		.name	= "edma0",
 		.start	= IRQ_CCINT0,
 		.flags	= IORESOURCE_IRQ,
 	},
 	{
+		.name	= "edma0_err",
 		.start	= IRQ_CCERRINT,
 		.flags	= IORESOURCE_IRQ,
 	},
@@ -503,8 +531,8 @@  static struct resource edma_resources[] = {
 
 static struct platform_device dm646x_edma_device = {
 	.name			= "edma",
-	.id			= -1,
-	.dev.platform_data	= &dm646x_edma_info,
+	.id			= 0,
+	.dev.platform_data	= dm646x_edma_info,
 	.num_resources		= ARRAY_SIZE(edma_resources),
 	.resource		= edma_resources,
 };
diff --git a/arch/arm/mach-davinci/dma.c b/arch/arm/mach-davinci/dma.c
index 15e9eb1..5908f77 100644
--- a/arch/arm/mach-davinci/dma.c
+++ b/arch/arm/mach-davinci/dma.c
@@ -100,132 +100,157 @@ 
 #define EDMA_SHADOW0	0x2000	/* 4 regions shadowing global channels */
 #define EDMA_PARM	0x4000	/* 128 param entries */
 
-#define DAVINCI_DMA_3PCC_BASE	0x01C00000
-
 #define PARM_OFFSET(param_no)	(EDMA_PARM + ((param_no) << 5))
 
+#define EDMA_DCHMAP	0x0100  /* 64 registers */
+#define CHMAP_EXIST	BIT(24)
+
 #define EDMA_MAX_DMACH           64
 #define EDMA_MAX_PARAMENTRY     512
-#define EDMA_MAX_EVQUE            2	/* FIXME too small */
+#define EDMA_MAX_CC               2
 
 
 /*****************************************************************************/
 
-static void __iomem *edmacc_regs_base;
+static void __iomem *edmacc_regs_base[EDMA_MAX_CC];
 
-static inline unsigned int edma_read(int offset)
+static inline unsigned int edma_read(unsigned ctlr, int offset)
 {
-	return (unsigned int)__raw_readl(edmacc_regs_base + offset);
+	return (unsigned int)__raw_readl(edmacc_regs_base[ctlr] + offset);
 }
 
-static inline void edma_write(int offset, int val)
+static inline void edma_write(unsigned ctlr, int offset, int val)
 {
-	__raw_writel(val, edmacc_regs_base + offset);
+	__raw_writel(val, edmacc_regs_base[ctlr] + offset);
 }
-static inline void edma_modify(int offset, unsigned and, unsigned or)
+static inline void edma_modify(unsigned ctlr, int offset, unsigned and,
+		unsigned or)
 {
-	unsigned val = edma_read(offset);
+	unsigned val = edma_read(ctlr, offset);
 	val &= and;
 	val |= or;
-	edma_write(offset, val);
+	edma_write(ctlr, offset, val);
 }
-static inline void edma_and(int offset, unsigned and)
+static inline void edma_and(unsigned ctlr, int offset, unsigned and)
 {
-	unsigned val = edma_read(offset);
+	unsigned val = edma_read(ctlr, offset);
 	val &= and;
-	edma_write(offset, val);
+	edma_write(ctlr, offset, val);
 }
-static inline void edma_or(int offset, unsigned or)
+static inline void edma_or(unsigned ctlr, int offset, unsigned or)
 {
-	unsigned val = edma_read(offset);
+	unsigned val = edma_read(ctlr, offset);
 	val |= or;
-	edma_write(offset, val);
+	edma_write(ctlr, offset, val);
 }
-static inline unsigned int edma_read_array(int offset, int i)
+static inline unsigned int edma_read_array(unsigned ctlr, int offset, int i)
 {
-	return edma_read(offset + (i << 2));
+	return edma_read(ctlr, offset + (i << 2));
 }
-static inline void edma_write_array(int offset, int i, unsigned val)
+static inline void edma_write_array(unsigned ctlr, int offset, int i,
+		unsigned val)
 {
-	edma_write(offset + (i << 2), val);
+	edma_write(ctlr, offset + (i << 2), val);
 }
-static inline void edma_modify_array(int offset, int i,
+static inline void edma_modify_array(unsigned ctlr, int offset, int i,
 		unsigned and, unsigned or)
 {
-	edma_modify(offset + (i << 2), and, or);
+	edma_modify(ctlr, offset + (i << 2), and, or);
 }
-static inline void edma_or_array(int offset, int i, unsigned or)
+static inline void edma_or_array(unsigned ctlr, int offset, int i, unsigned or)
 {
-	edma_or(offset + (i << 2), or);
+	edma_or(ctlr, offset + (i << 2), or);
 }
-static inline void edma_or_array2(int offset, int i, int j, unsigned or)
+static inline void edma_or_array2(unsigned ctlr, int offset, int i, int j,
+		unsigned or)
 {
-	edma_or(offset + ((i*2 + j) << 2), or);
+	edma_or(ctlr, offset + ((i*2 + j) << 2), or);
 }
-static inline void edma_write_array2(int offset, int i, int j, unsigned val)
+static inline void edma_write_array2(unsigned ctlr, int offset, int i, int j,
+		unsigned val)
 {
-	edma_write(offset + ((i*2 + j) << 2), val);
+	edma_write(ctlr, offset + ((i*2 + j) << 2), val);
 }
-static inline unsigned int edma_shadow0_read(int offset)
+static inline unsigned int edma_shadow0_read(unsigned ctlr, int offset)
 {
-	return edma_read(EDMA_SHADOW0 + offset);
+	return edma_read(ctlr, EDMA_SHADOW0 + offset);
 }
-static inline unsigned int edma_shadow0_read_array(int offset, int i)
+static inline unsigned int edma_shadow0_read_array(unsigned ctlr, int offset,
+		int i)
 {
-	return edma_read(EDMA_SHADOW0 + offset + (i << 2));
+	return edma_read(ctlr, EDMA_SHADOW0 + offset + (i << 2));
 }
-static inline void edma_shadow0_write(int offset, unsigned val)
+static inline void edma_shadow0_write(unsigned ctlr, int offset, unsigned val)
 {
-	edma_write(EDMA_SHADOW0 + offset, val);
+	edma_write(ctlr, EDMA_SHADOW0 + offset, val);
 }
-static inline void edma_shadow0_write_array(int offset, int i, unsigned val)
+static inline void edma_shadow0_write_array(unsigned ctlr, int offset, int i,
+		unsigned val)
 {
-	edma_write(EDMA_SHADOW0 + offset + (i << 2), val);
+	edma_write(ctlr, EDMA_SHADOW0 + offset + (i << 2), val);
 }
-static inline unsigned int edma_parm_read(int offset, int param_no)
+static inline unsigned int edma_parm_read(unsigned ctlr, int offset,
+		int param_no)
 {
-	return edma_read(EDMA_PARM + offset + (param_no << 5));
+	return edma_read(ctlr, EDMA_PARM + offset + (param_no << 5));
 }
-static inline void edma_parm_write(int offset, int param_no, unsigned val)
+static inline void edma_parm_write(unsigned ctlr, int offset, int param_no,
+		unsigned val)
 {
-	edma_write(EDMA_PARM + offset + (param_no << 5), val);
+	edma_write(ctlr, EDMA_PARM + offset + (param_no << 5), val);
 }
-static inline void edma_parm_modify(int offset, int param_no,
+static inline void edma_parm_modify(unsigned ctlr, int offset, int param_no,
 		unsigned and, unsigned or)
 {
-	edma_modify(EDMA_PARM + offset + (param_no << 5), and, or);
+	edma_modify(ctlr, EDMA_PARM + offset + (param_no << 5), and, or);
 }
-static inline void edma_parm_and(int offset, int param_no, unsigned and)
+static inline void edma_parm_and(unsigned ctlr, int offset, int param_no,
+		unsigned and)
 {
-	edma_and(EDMA_PARM + offset + (param_no << 5), and);
+	edma_and(ctlr, EDMA_PARM + offset + (param_no << 5), and);
 }
-static inline void edma_parm_or(int offset, int param_no, unsigned or)
+static inline void edma_parm_or(unsigned ctlr, int offset, int param_no,
+		unsigned or)
 {
-	edma_or(EDMA_PARM + offset + (param_no << 5), or);
+	edma_or(ctlr, EDMA_PARM + offset + (param_no << 5), or);
 }
 
 /*****************************************************************************/
 
 /* actual number of DMA channels and slots on this silicon */
-static unsigned num_channels;
-static unsigned num_slots;
+struct edma {
+	/* how many dma resources of each type */
+	unsigned	num_channels;
+	unsigned	num_region;
+	unsigned	num_slots;
+	unsigned	num_tc;
+	unsigned	num_cc;
+
+	/* list of channels with no even trigger; terminated by "-1" */
+	const s8	*noevent;
+
+	/* The edma_inuse bit for each PaRAM slot is clear unless the
+	 * channel is in use ... by ARM or DSP, for QDMA, or whatever.
+	 */
+	DECLARE_BITMAP(edma_inuse, EDMA_MAX_PARAMENTRY);
 
-static struct dma_interrupt_data {
-	void (*callback)(unsigned channel, unsigned short ch_status,
-			 void *data);
-	void *data;
-} intr_data[EDMA_MAX_DMACH];
+	/* The edma_noevent bit for each channel is clear unless
+	 * it doesn't trigger DMA events on this platform.  It uses a
+	 * bit of SOC-specific initialization code.
+	 */
+	DECLARE_BITMAP(edma_noevent, EDMA_MAX_DMACH);
 
-/* The edma_inuse bit for each PaRAM slot is clear unless the
- * channel is in use ... by ARM or DSP, for QDMA, or whatever.
- */
-static DECLARE_BITMAP(edma_inuse, EDMA_MAX_PARAMENTRY);
+	unsigned	irq_res_start;
+	unsigned	irq_res_end;
 
-/* The edma_noevent bit for each channel is clear unless
- * it doesn't trigger DMA events on this platform.  It uses a
- * bit of SOC-specific initialization code.
- */
-static DECLARE_BITMAP(edma_noevent, EDMA_MAX_DMACH);
+	struct dma_interrupt_data {
+		void (*callback)(unsigned channel, unsigned short ch_status,
+				void *data);
+		void *data;
+	} intr_data[EDMA_MAX_DMACH];
+};
+
+static struct edma *edma_info[EDMA_MAX_CC];
 
 /* dummy param set used to (re)initialize parameter RAM slots */
 static const struct edmacc_param dummy_paramset = {
@@ -233,25 +258,10 @@  static const struct edmacc_param dummy_paramset = {
 	.ccnt = 1,
 };
 
-static const int __initconst
-queue_tc_mapping[EDMA_MAX_EVQUE + 1][2] = {
-/* {event queue no, TC no} */
-	{0, 0},
-	{1, 1},
-	{-1, -1}
-};
-
-static const int __initconst
-queue_priority_mapping[EDMA_MAX_EVQUE + 1][2] = {
-	/* {event queue no, Priority} */
-	{0, 3},
-	{1, 7},
-	{-1, -1}
-};
-
 /*****************************************************************************/
 
-static void map_dmach_queue(unsigned ch_no, enum dma_event_q queue_no)
+static void map_dmach_queue(unsigned ctlr, unsigned ch_no,
+		enum dma_event_q queue_no)
 {
 	int bit = (ch_no & 0x7) * 4;
 
@@ -260,20 +270,40 @@  static void map_dmach_queue(unsigned ch_no, enum dma_event_q queue_no)
 		queue_no = EVENTQ_1;
 
 	queue_no &= 7;
-	edma_modify_array(EDMA_DMAQNUM, (ch_no >> 3),
+	edma_modify_array(ctlr, EDMA_DMAQNUM, (ch_no >> 3),
 			~(0x7 << bit), queue_no << bit);
 }
 
-static void __init map_queue_tc(int queue_no, int tc_no)
+static void __init map_queue_tc(unsigned ctlr, int queue_no, int tc_no)
 {
 	int bit = queue_no * 4;
-	edma_modify(EDMA_QUETCMAP, ~(0x7 << bit), ((tc_no & 0x7) << bit));
+	edma_modify(ctlr, EDMA_QUETCMAP, ~(0x7 << bit), ((tc_no & 0x7) << bit));
 }
 
-static void __init assign_priority_to_queue(int queue_no, int priority)
+static void __init assign_priority_to_queue(unsigned ctlr, int queue_no,
+		int priority)
 {
 	int bit = queue_no * 4;
-	edma_modify(EDMA_QUEPRI, ~(0x7 << bit), ((priority & 0x7) << bit));
+	edma_modify(ctlr, EDMA_QUEPRI, ~(0x7 << bit),
+			((priority & 0x7) << bit));
+}
+
+/**
+ * map_dmach_param - Maps channel number to param entry number
+ *
+ * This maps the dma channel number to param entry numberter. In
+ * other words using the DMA channel mapping registers a param entry
+ * can be mapped to any channel
+ *
+ * Callers are responsible for ensuring the channel mapping logic is
+ * included in that particular EDMA variant (Eg : dm646x)
+ *
+ */
+static void __init map_dmach_param(unsigned ctlr)
+{
+	int i;
+	for (i = 0; i < EDMA_MAX_DMACH; i++)
+		edma_write_array(ctlr, EDMA_DCHMAP , i , (i << 5));
 }
 
 static inline void
@@ -281,22 +311,39 @@  setup_dma_interrupt(unsigned lch,
 	void (*callback)(unsigned channel, u16 ch_status, void *data),
 	void *data)
 {
+	unsigned ctlr;
+
+	ctlr = EDMA_CTLR(lch);
+	lch = EDMA_CHAN_SLOT(lch);
+
 	if (!callback) {
-		edma_shadow0_write_array(SH_IECR, lch >> 5,
+		edma_shadow0_write_array(ctlr, SH_IECR, lch >> 5,
 				(1 << (lch & 0x1f)));
 	}
 
-	intr_data[lch].callback = callback;
-	intr_data[lch].data = data;
+	edma_info[ctlr]->intr_data[lch].callback = callback;
+	edma_info[ctlr]->intr_data[lch].data = data;
 
 	if (callback) {
-		edma_shadow0_write_array(SH_ICR, lch >> 5,
+		edma_shadow0_write_array(ctlr, SH_ICR, lch >> 5,
 				(1 << (lch & 0x1f)));
-		edma_shadow0_write_array(SH_IESR, lch >> 5,
+		edma_shadow0_write_array(ctlr, SH_IESR, lch >> 5,
 				(1 << (lch & 0x1f)));
 	}
 }
 
+static int irq2ctlr(int irq)
+{
+	if (irq >= edma_info[0]->irq_res_start &&
+		irq <= edma_info[0]->irq_res_end)
+		return 0;
+	else if (irq >= edma_info[1]->irq_res_start &&
+		irq <= edma_info[1]->irq_res_end)
+		return 1;
+
+	return -1;
+}
+
 /******************************************************************************
  *
  * DMA interrupt handler
@@ -305,32 +352,39 @@  setup_dma_interrupt(unsigned lch,
 static irqreturn_t dma_irq_handler(int irq, void *data)
 {
 	int i;
+	unsigned ctlr;
 	unsigned int cnt = 0;
 
+	ctlr = irq2ctlr(irq);
+
 	dev_dbg(data, "dma_irq_handler\n");
 
-	if ((edma_shadow0_read_array(SH_IPR, 0) == 0)
-	    && (edma_shadow0_read_array(SH_IPR, 1) == 0))
+	if ((edma_shadow0_read_array(ctlr, SH_IPR, 0) == 0)
+	    && (edma_shadow0_read_array(ctlr, SH_IPR, 1) == 0))
 		return IRQ_NONE;
 
 	while (1) {
 		int j;
-		if (edma_shadow0_read_array(SH_IPR, 0))
+		if (edma_shadow0_read_array(ctlr, SH_IPR, 0))
 			j = 0;
-		else if (edma_shadow0_read_array(SH_IPR, 1))
+		else if (edma_shadow0_read_array(ctlr, SH_IPR, 1))
 			j = 1;
 		else
 			break;
 		dev_dbg(data, "IPR%d %08x\n", j,
-				edma_shadow0_read_array(SH_IPR, j));
+				edma_shadow0_read_array(ctlr, SH_IPR, j));
 		for (i = 0; i < 32; i++) {
 			int k = (j << 5) + i;
-			if (edma_shadow0_read_array(SH_IPR, j) & (1 << i)) {
+			if (edma_shadow0_read_array(ctlr, SH_IPR, j) &
+							(1 << i)) {
 				/* Clear the corresponding IPR bits */
-				edma_shadow0_write_array(SH_ICR, j, (1 << i));
-				if (intr_data[k].callback) {
-					intr_data[k].callback(k, DMA_COMPLETE,
-						intr_data[k].data);
+				edma_shadow0_write_array(ctlr, SH_ICR, j,
+							(1 << i));
+				if (edma_info[ctlr]->intr_data[k].callback) {
+					edma_info[ctlr]->intr_data[k].callback(
+						k, DMA_COMPLETE,
+						edma_info[ctlr]->intr_data[k].
+						data);
 				}
 			}
 		}
@@ -338,7 +392,7 @@  static irqreturn_t dma_irq_handler(int irq, void *data)
 		if (cnt > 10)
 			break;
 	}
-	edma_shadow0_write(SH_IEVAL, 1);
+	edma_shadow0_write(ctlr, SH_IEVAL, 1);
 	return IRQ_HANDLED;
 }
 
@@ -350,78 +404,87 @@  static irqreturn_t dma_irq_handler(int irq, void *data)
 static irqreturn_t dma_ccerr_handler(int irq, void *data)
 {
 	int i;
+	unsigned ctlr;
 	unsigned int cnt = 0;
 
+	ctlr = irq2ctlr(irq);
+
 	dev_dbg(data, "dma_ccerr_handler\n");
 
-	if ((edma_read_array(EDMA_EMR, 0) == 0) &&
-	    (edma_read_array(EDMA_EMR, 1) == 0) &&
-	    (edma_read(EDMA_QEMR) == 0) && (edma_read(EDMA_CCERR) == 0))
+	if ((edma_read_array(ctlr, EDMA_EMR, 0) == 0) &&
+	    (edma_read_array(ctlr, EDMA_EMR, 1) == 0) &&
+	    (edma_read(ctlr, EDMA_QEMR) == 0) &&
+	    (edma_read(ctlr, EDMA_CCERR) == 0))
 		return IRQ_NONE;
 
 	while (1) {
 		int j = -1;
-		if (edma_read_array(EDMA_EMR, 0))
+		if (edma_read_array(ctlr, EDMA_EMR, 0))
 			j = 0;
-		else if (edma_read_array(EDMA_EMR, 1))
+		else if (edma_read_array(ctlr, EDMA_EMR, 1))
 			j = 1;
 		if (j >= 0) {
 			dev_dbg(data, "EMR%d %08x\n", j,
-					edma_read_array(EDMA_EMR, j));
+					edma_read_array(ctlr, EDMA_EMR, j));
 			for (i = 0; i < 32; i++) {
 				int k = (j << 5) + i;
-				if (edma_read_array(EDMA_EMR, j) & (1 << i)) {
+				if (edma_read_array(ctlr, EDMA_EMR, j) &
+							(1 << i)) {
 					/* Clear the corresponding EMR bits */
-					edma_write_array(EDMA_EMCR, j, 1 << i);
+					edma_write_array(ctlr, EDMA_EMCR, j,
+							1 << i);
 					/* Clear any SER */
-					edma_shadow0_write_array(SH_SECR, j,
-							(1 << i));
-					if (intr_data[k].callback) {
-						intr_data[k].callback(k,
-								DMA_CC_ERROR,
-								intr_data
-								[k].data);
+					edma_shadow0_write_array(ctlr, SH_SECR,
+								j, (1 << i));
+					if (edma_info[ctlr]->intr_data[k].
+								callback) {
+						edma_info[ctlr]->intr_data[k].
+						callback(k,
+						DMA_CC_ERROR,
+						edma_info[ctlr]->intr_data
+						[k].data);
 					}
 				}
 			}
-		} else if (edma_read(EDMA_QEMR)) {
+		} else if (edma_read(ctlr, EDMA_QEMR)) {
 			dev_dbg(data, "QEMR %02x\n",
-				edma_read(EDMA_QEMR));
+				edma_read(ctlr, EDMA_QEMR));
 			for (i = 0; i < 8; i++) {
-				if (edma_read(EDMA_QEMR) & (1 << i)) {
+				if (edma_read(ctlr, EDMA_QEMR) & (1 << i)) {
 					/* Clear the corresponding IPR bits */
-					edma_write(EDMA_QEMCR, 1 << i);
-					edma_shadow0_write(SH_QSECR, (1 << i));
+					edma_write(ctlr, EDMA_QEMCR, 1 << i);
+					edma_shadow0_write(ctlr, SH_QSECR,
+								(1 << i));
 
 					/* NOTE:  not reported!! */
 				}
 			}
-		} else if (edma_read(EDMA_CCERR)) {
+		} else if (edma_read(ctlr, EDMA_CCERR)) {
 			dev_dbg(data, "CCERR %08x\n",
-				edma_read(EDMA_CCERR));
+				edma_read(ctlr, EDMA_CCERR));
 			/* FIXME:  CCERR.BIT(16) ignored!  much better
 			 * to just write CCERRCLR with CCERR value...
 			 */
 			for (i = 0; i < 8; i++) {
-				if (edma_read(EDMA_CCERR) & (1 << i)) {
+				if (edma_read(ctlr, EDMA_CCERR) & (1 << i)) {
 					/* Clear the corresponding IPR bits */
-					edma_write(EDMA_CCERRCLR, 1 << i);
+					edma_write(ctlr, EDMA_CCERRCLR, 1 << i);
 
 					/* NOTE:  not reported!! */
 				}
 			}
 		}
-		if ((edma_read_array(EDMA_EMR, 0) == 0)
-		    && (edma_read_array(EDMA_EMR, 1) == 0)
-		    && (edma_read(EDMA_QEMR) == 0)
-		    && (edma_read(EDMA_CCERR) == 0)) {
+		if ((edma_read_array(ctlr, EDMA_EMR, 0) == 0)
+		    && (edma_read_array(ctlr, EDMA_EMR, 1) == 0)
+		    && (edma_read(ctlr, EDMA_QEMR) == 0)
+		    && (edma_read(ctlr, EDMA_CCERR) == 0)) {
 			break;
 		}
 		cnt++;
 		if (cnt > 10)
 			break;
 	}
-	edma_write(EDMA_EEVAL, 1);
+	edma_write(ctlr, EDMA_EEVAL, 1);
 	return IRQ_HANDLED;
 }
 
@@ -484,35 +547,53 @@  int edma_alloc_channel(int channel,
 		void *data,
 		enum dma_event_q eventq_no)
 {
+	unsigned i, done, ctlr = 0;
+
+	if (channel >= 0) {
+		ctlr = EDMA_CTLR(channel);
+		channel = EDMA_CHAN_SLOT(channel);
+	}
+
 	if (channel < 0) {
-		channel = 0;
-		for (;;) {
-			channel = find_next_bit(edma_noevent,
-					num_channels, channel);
-			if (channel == num_channels)
-				return -ENOMEM;
-			if (!test_and_set_bit(channel, edma_inuse))
+		for (i = 0; i < EDMA_MAX_CC; i++) {
+			channel = 0;
+			for (;;) {
+				channel = find_next_bit(edma_info[i]->
+						edma_noevent,
+						edma_info[i]->num_channels,
+						channel);
+				if (channel == edma_info[i]->num_channels)
+					return -ENOMEM;
+				if (!test_and_set_bit(channel,
+						edma_info[i]->edma_inuse)) {
+					done = 1;
+					ctlr = i;
+					break;
+				}
+				channel++;
+			}
+			if (done)
 				break;
-			channel++;
 		}
-	} else if (channel >= num_channels) {
+	} else if (channel >= edma_info[ctlr]->num_channels) {
 		return -EINVAL;
-	} else if (test_and_set_bit(channel, edma_inuse)) {
+	} else if (test_and_set_bit(channel, edma_info[ctlr]->edma_inuse)) {
 		return -EBUSY;
 	}
 
 	/* ensure access through shadow region 0 */
-	edma_or_array2(EDMA_DRAE, 0, channel >> 5, 1 << (channel & 0x1f));
+	edma_or_array2(ctlr, EDMA_DRAE, 0, channel >> 5, 1 << (channel & 0x1f));
 
 	/* ensure no events are pending */
-	edma_stop(channel);
-	memcpy_toio(edmacc_regs_base + PARM_OFFSET(channel),
+	edma_stop(EDMA_CTLR_CHAN(ctlr, channel));
+	memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(channel),
 			&dummy_paramset, PARM_SIZE);
 
 	if (callback)
-		setup_dma_interrupt(channel, callback, data);
+		setup_dma_interrupt(EDMA_CTLR_CHAN(ctlr, channel),
+					callback, data);
 
-	map_dmach_queue(channel, eventq_no);
+	map_dmach_queue(ctlr, channel, eventq_no);
 
 	return channel;
 }
@@ -532,15 +613,20 @@  EXPORT_SYMBOL(edma_alloc_channel);
  */
 void edma_free_channel(unsigned channel)
 {
-	if (channel >= num_channels)
+	unsigned ctlr;
+
+	ctlr = EDMA_CTLR(channel);
+	channel = EDMA_CHAN_SLOT(channel);
+
+	if (channel >= edma_info[ctlr]->num_channels)
 		return;
 
 	setup_dma_interrupt(channel, NULL, NULL);
 	/* REVISIT should probably take out of shadow region 0 */
 
-	memcpy_toio(edmacc_regs_base + PARM_OFFSET(channel),
+	memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(channel),
 			&dummy_paramset, PARM_SIZE);
-	clear_bit(channel, edma_inuse);
+	clear_bit(channel, edma_info[ctlr]->edma_inuse);
 }
 EXPORT_SYMBOL(edma_free_channel);
 
@@ -558,28 +644,33 @@  EXPORT_SYMBOL(edma_free_channel);
  *
  * Returns the number of the slot, else negative errno.
  */
-int edma_alloc_slot(int slot)
+int edma_alloc_slot(unsigned ctlr, int slot)
 {
+	if (slot >= 0)
+		slot = EDMA_CHAN_SLOT(slot);
+
 	if (slot < 0) {
-		slot = num_channels;
+		slot = edma_info[ctlr]->num_channels;
 		for (;;) {
-			slot = find_next_zero_bit(edma_inuse,
-					num_slots, slot);
-			if (slot == num_slots)
+			slot = find_next_zero_bit(edma_info[ctlr]->edma_inuse,
+					edma_info[ctlr]->num_slots, slot);
+			if (slot == edma_info[ctlr]->num_slots)
 				return -ENOMEM;
-			if (!test_and_set_bit(slot, edma_inuse))
+			if (!test_and_set_bit(slot,
+						edma_info[ctlr]->edma_inuse))
 				break;
 		}
-	} else if (slot < num_channels || slot >= num_slots) {
+	} else if (slot < edma_info[ctlr]->num_channels ||
+			slot >= edma_info[ctlr]->num_slots) {
 		return -EINVAL;
-	} else if (test_and_set_bit(slot, edma_inuse)) {
+	} else if (test_and_set_bit(slot, edma_info[ctlr]->edma_inuse)) {
 		return -EBUSY;
 	}
 
-	memcpy_toio(edmacc_regs_base + PARM_OFFSET(slot),
+	memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot),
 			&dummy_paramset, PARM_SIZE);
 
-	return slot;
+	return EDMA_CTLR_CHAN(ctlr, slot);
 }
 EXPORT_SYMBOL(edma_alloc_slot);
 
@@ -593,12 +684,18 @@  EXPORT_SYMBOL(edma_alloc_slot);
  */
 void edma_free_slot(unsigned slot)
 {
-	if (slot < num_channels || slot >= num_slots)
+	unsigned ctlr;
+
+	ctlr = EDMA_CTLR(slot);
+	slot = EDMA_CHAN_SLOT(slot);
+
+	if (slot < edma_info[ctlr]->num_channels ||
+		slot >= edma_info[ctlr]->num_slots)
 		return;
 
-	memcpy_toio(edmacc_regs_base + PARM_OFFSET(slot),
+	memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot),
 			&dummy_paramset, PARM_SIZE);
-	clear_bit(slot, edma_inuse);
+	clear_bit(slot, edma_info[ctlr]->edma_inuse);
 }
 EXPORT_SYMBOL(edma_free_slot);
 
@@ -620,8 +717,13 @@  EXPORT_SYMBOL(edma_free_slot);
 void edma_set_src(unsigned slot, dma_addr_t src_port,
 				enum address_mode mode, enum fifo_width width)
 {
-	if (slot < num_slots) {
-		unsigned int i = edma_parm_read(PARM_OPT, slot);
+	unsigned ctlr;
+
+	ctlr = EDMA_CTLR(slot);
+	slot = EDMA_CHAN_SLOT(slot);
+
+	if (slot < edma_info[ctlr]->num_slots) {
+		unsigned int i = edma_parm_read(ctlr, PARM_OPT, slot);
 
 		if (mode) {
 			/* set SAM and program FWID */
@@ -630,11 +732,11 @@  void edma_set_src(unsigned slot, dma_addr_t src_port,
 			/* clear SAM */
 			i &= ~SAM;
 		}
-		edma_parm_write(PARM_OPT, slot, i);
+		edma_parm_write(ctlr, PARM_OPT, slot, i);
 
 		/* set the source port address
 		   in source register of param structure */
-		edma_parm_write(PARM_SRC, slot, src_port);
+		edma_parm_write(ctlr, PARM_SRC, slot, src_port);
 	}
 }
 EXPORT_SYMBOL(edma_set_src);
@@ -653,8 +755,13 @@  EXPORT_SYMBOL(edma_set_src);
 void edma_set_dest(unsigned slot, dma_addr_t dest_port,
 				 enum address_mode mode, enum fifo_width width)
 {
-	if (slot < num_slots) {
-		unsigned int i = edma_parm_read(PARM_OPT, slot);
+	unsigned ctlr;
+
+	ctlr = EDMA_CTLR(slot);
+	slot = EDMA_CHAN_SLOT(slot);
+
+	if (slot < edma_info[ctlr]->num_slots) {
+		unsigned int i = edma_parm_read(ctlr, PARM_OPT, slot);
 
 		if (mode) {
 			/* set DAM and program FWID */
@@ -663,10 +770,10 @@  void edma_set_dest(unsigned slot, dma_addr_t dest_port,
 			/* clear DAM */
 			i &= ~DAM;
 		}
-		edma_parm_write(PARM_OPT, slot, i);
+		edma_parm_write(ctlr, PARM_OPT, slot, i);
 		/* set the destination port address
 		   in dest register of param structure */
-		edma_parm_write(PARM_DST, slot, dest_port);
+		edma_parm_write(ctlr, PARM_DST, slot, dest_port);
 	}
 }
 EXPORT_SYMBOL(edma_set_dest);
@@ -683,8 +790,12 @@  EXPORT_SYMBOL(edma_set_dest);
 void edma_get_position(unsigned slot, dma_addr_t *src, dma_addr_t *dst)
 {
 	struct edmacc_param temp;
+	unsigned ctlr;
+
+	ctlr = EDMA_CTLR(slot);
+	slot = EDMA_CHAN_SLOT(slot);
 
-	edma_read_slot(slot, &temp);
+	edma_read_slot(EDMA_CTLR_CHAN(ctlr, slot), &temp);
 	if (src != NULL)
 		*src = temp.src;
 	if (dst != NULL)
@@ -704,10 +815,15 @@  EXPORT_SYMBOL(edma_get_position);
  */
 void edma_set_src_index(unsigned slot, s16 src_bidx, s16 src_cidx)
 {
-	if (slot < num_slots) {
-		edma_parm_modify(PARM_SRC_DST_BIDX, slot,
+	unsigned ctlr;
+
+	ctlr = EDMA_CTLR(slot);
+	slot = EDMA_CHAN_SLOT(slot);
+
+	if (slot < edma_info[ctlr]->num_slots) {
+		edma_parm_modify(ctlr, PARM_SRC_DST_BIDX, slot,
 				0xffff0000, src_bidx);
-		edma_parm_modify(PARM_SRC_DST_CIDX, slot,
+		edma_parm_modify(ctlr, PARM_SRC_DST_CIDX, slot,
 				0xffff0000, src_cidx);
 	}
 }
@@ -725,10 +841,15 @@  EXPORT_SYMBOL(edma_set_src_index);
  */
 void edma_set_dest_index(unsigned slot, s16 dest_bidx, s16 dest_cidx)
 {
-	if (slot < num_slots) {
-		edma_parm_modify(PARM_SRC_DST_BIDX, slot,
+	unsigned ctlr;
+
+	ctlr = EDMA_CTLR(slot);
+	slot = EDMA_CHAN_SLOT(slot);
+
+	if (slot < edma_info[ctlr]->num_slots) {
+		edma_parm_modify(ctlr, PARM_SRC_DST_BIDX, slot,
 				0x0000ffff, dest_bidx << 16);
-		edma_parm_modify(PARM_SRC_DST_CIDX, slot,
+		edma_parm_modify(ctlr, PARM_SRC_DST_CIDX, slot,
 				0x0000ffff, dest_cidx << 16);
 	}
 }
@@ -767,16 +888,21 @@  void edma_set_transfer_params(unsigned slot,
 		u16 acnt, u16 bcnt, u16 ccnt,
 		u16 bcnt_rld, enum sync_dimension sync_mode)
 {
-	if (slot < num_slots) {
-		edma_parm_modify(PARM_LINK_BCNTRLD, slot,
+	unsigned ctlr;
+
+	ctlr = EDMA_CTLR(slot);
+	slot = EDMA_CHAN_SLOT(slot);
+
+	if (slot < edma_info[ctlr]->num_slots) {
+		edma_parm_modify(ctlr, PARM_LINK_BCNTRLD, slot,
 				0x0000ffff, bcnt_rld << 16);
 		if (sync_mode == ASYNC)
-			edma_parm_and(PARM_OPT, slot, ~SYNCDIM);
+			edma_parm_and(ctlr, PARM_OPT, slot, ~SYNCDIM);
 		else
-			edma_parm_or(PARM_OPT, slot, SYNCDIM);
+			edma_parm_or(ctlr, PARM_OPT, slot, SYNCDIM);
 		/* Set the acount, bcount, ccount registers */
-		edma_parm_write(PARM_A_B_CNT, slot, (bcnt << 16) | acnt);
-		edma_parm_write(PARM_CCNT, slot, ccnt);
+		edma_parm_write(ctlr, PARM_A_B_CNT, slot, (bcnt << 16) | acnt);
+		edma_parm_write(ctlr, PARM_CCNT, slot, ccnt);
 	}
 }
 EXPORT_SYMBOL(edma_set_transfer_params);
@@ -790,11 +916,19 @@  EXPORT_SYMBOL(edma_set_transfer_params);
  */
 void edma_link(unsigned from, unsigned to)
 {
-	if (from >= num_slots)
+	unsigned ctlr_from, ctlr_to;
+
+	ctlr_from = EDMA_CTLR(from);
+	from = EDMA_CHAN_SLOT(from);
+	ctlr_to = EDMA_CTLR(to);
+	to = EDMA_CHAN_SLOT(to);
+
+	if (from >= edma_info[ctlr_from]->num_slots)
 		return;
-	if (to >= num_slots)
+	if (to >= edma_info[ctlr_to]->num_slots)
 		return;
-	edma_parm_modify(PARM_LINK_BCNTRLD, from, 0xffff0000, PARM_OFFSET(to));
+	edma_parm_modify(ctlr_from, PARM_LINK_BCNTRLD, from, 0xffff0000,
+				PARM_OFFSET(to));
 }
 EXPORT_SYMBOL(edma_link);
 
@@ -807,9 +941,14 @@  EXPORT_SYMBOL(edma_link);
  */
 void edma_unlink(unsigned from)
 {
-	if (from >= num_slots)
+	unsigned ctlr;
+
+	ctlr = EDMA_CTLR(from);
+	from = EDMA_CHAN_SLOT(from);
+
+	if (from >= edma_info[ctlr]->num_slots)
 		return;
-	edma_parm_or(PARM_LINK_BCNTRLD, from, 0xffff);
+	edma_parm_or(ctlr, PARM_LINK_BCNTRLD, from, 0xffff);
 }
 EXPORT_SYMBOL(edma_unlink);
 
@@ -829,9 +968,15 @@  EXPORT_SYMBOL(edma_unlink);
  */
 void edma_write_slot(unsigned slot, const struct edmacc_param *param)
 {
-	if (slot >= num_slots)
+	unsigned ctlr;
+
+	ctlr = EDMA_CTLR(slot);
+	slot = EDMA_CHAN_SLOT(slot);
+
+	if (slot >= edma_info[ctlr]->num_slots)
 		return;
-	memcpy_toio(edmacc_regs_base + PARM_OFFSET(slot), param, PARM_SIZE);
+	memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot), param,
+			PARM_SIZE);
 }
 EXPORT_SYMBOL(edma_write_slot);
 
@@ -845,9 +990,15 @@  EXPORT_SYMBOL(edma_write_slot);
  */
 void edma_read_slot(unsigned slot, struct edmacc_param *param)
 {
-	if (slot >= num_slots)
+	unsigned ctlr;
+
+	ctlr = EDMA_CTLR(slot);
+	slot = EDMA_CHAN_SLOT(slot);
+
+	if (slot >= edma_info[ctlr]->num_slots)
 		return;
-	memcpy_fromio(param, edmacc_regs_base + PARM_OFFSET(slot), PARM_SIZE);
+	memcpy_fromio(param, edmacc_regs_base[ctlr] + PARM_OFFSET(slot),
+			PARM_SIZE);
 }
 EXPORT_SYMBOL(edma_read_slot);
 
@@ -864,10 +1015,15 @@  EXPORT_SYMBOL(edma_read_slot);
  */
 void edma_pause(unsigned channel)
 {
-	if (channel < num_channels) {
+	unsigned ctlr;
+
+	ctlr = EDMA_CTLR(channel);
+	channel = EDMA_CHAN_SLOT(channel);
+
+	if (channel < edma_info[ctlr]->num_channels) {
 		unsigned int mask = (1 << (channel & 0x1f));
 
-		edma_shadow0_write_array(SH_EECR, channel >> 5, mask);
+		edma_shadow0_write_array(ctlr, SH_EECR, channel >> 5, mask);
 	}
 }
 EXPORT_SYMBOL(edma_pause);
@@ -880,10 +1036,15 @@  EXPORT_SYMBOL(edma_pause);
  */
 void edma_resume(unsigned channel)
 {
-	if (channel < num_channels) {
+	unsigned ctlr;
+
+	ctlr = EDMA_CTLR(channel);
+	channel = EDMA_CHAN_SLOT(channel);
+
+	if (channel < edma_info[ctlr]->num_channels) {
 		unsigned int mask = (1 << (channel & 0x1f));
 
-		edma_shadow0_write_array(SH_EESR, channel >> 5, mask);
+		edma_shadow0_write_array(ctlr, SH_EESR, channel >> 5, mask);
 	}
 }
 EXPORT_SYMBOL(edma_resume);
@@ -901,28 +1062,33 @@  EXPORT_SYMBOL(edma_resume);
  */
 int edma_start(unsigned channel)
 {
-	if (channel < num_channels) {
+	unsigned ctlr;
+
+	ctlr = EDMA_CTLR(channel);
+	channel = EDMA_CHAN_SLOT(channel);
+
+	if (channel < edma_info[ctlr]->num_channels) {
 		int j = channel >> 5;
 		unsigned int mask = (1 << (channel & 0x1f));
 
 		/* EDMA channels without event association */
-		if (test_bit(channel, edma_noevent)) {
+		if (test_bit(channel, edma_info[ctlr]->edma_noevent)) {
 			pr_debug("EDMA: ESR%d %08x\n", j,
-				edma_shadow0_read_array(SH_ESR, j));
-			edma_shadow0_write_array(SH_ESR, j, mask);
+				edma_shadow0_read_array(ctlr, SH_ESR, j));
+			edma_shadow0_write_array(ctlr, SH_ESR, j, mask);
 			return 0;
 		}
 
 		/* EDMA channel with event association */
 		pr_debug("EDMA: ER%d %08x\n", j,
-			edma_shadow0_read_array(SH_ER, j));
+			edma_shadow0_read_array(ctlr, SH_ER, j));
 		/* Clear any pending error */
-		edma_write_array(EDMA_EMCR, j, mask);
+		edma_write_array(ctlr, EDMA_EMCR, j, mask);
 		/* Clear any SER */
-		edma_shadow0_write_array(SH_SECR, j, mask);
-		edma_shadow0_write_array(SH_EESR, j, mask);
+		edma_shadow0_write_array(ctlr, SH_SECR, j, mask);
+		edma_shadow0_write_array(ctlr, SH_EESR, j, mask);
 		pr_debug("EDMA: EER%d %08x\n", j,
-			edma_shadow0_read_array(SH_EER, j));
+			edma_shadow0_read_array(ctlr, SH_EER, j));
 		return 0;
 	}
 
@@ -941,17 +1107,22 @@  EXPORT_SYMBOL(edma_start);
  */
 void edma_stop(unsigned channel)
 {
-	if (channel < num_channels) {
+	unsigned ctlr;
+
+	ctlr = EDMA_CTLR(channel);
+	channel = EDMA_CHAN_SLOT(channel);
+
+	if (channel < edma_info[ctlr]->num_channels) {
 		int j = channel >> 5;
 		unsigned int mask = (1 << (channel & 0x1f));
 
-		edma_shadow0_write_array(SH_EECR, j, mask);
-		edma_shadow0_write_array(SH_ECR, j, mask);
-		edma_shadow0_write_array(SH_SECR, j, mask);
-		edma_write_array(EDMA_EMCR, j, mask);
+		edma_shadow0_write_array(ctlr, SH_EECR, j, mask);
+		edma_shadow0_write_array(ctlr, SH_ECR, j, mask);
+		edma_shadow0_write_array(ctlr, SH_SECR, j, mask);
+		edma_write_array(ctlr, EDMA_EMCR, j, mask);
 
 		pr_debug("EDMA: EER%d %08x\n", j,
-				edma_shadow0_read_array(SH_EER, j));
+				edma_shadow0_read_array(ctlr, SH_EER, j));
 
 		/* REVISIT:  consider guarding against inappropriate event
 		 * chaining by overwriting with dummy_paramset.
@@ -975,18 +1146,23 @@  EXPORT_SYMBOL(edma_stop);
 
 void edma_clean_channel(unsigned channel)
 {
-	if (channel < num_channels) {
+	unsigned ctlr;
+
+	ctlr = EDMA_CTLR(channel);
+	channel = EDMA_CHAN_SLOT(channel);
+
+	if (channel < edma_info[ctlr]->num_channels) {
 		int j = (channel >> 5);
 		unsigned int mask = 1 << (channel & 0x1f);
 
 		pr_debug("EDMA: EMR%d %08x\n", j,
-				edma_read_array(EDMA_EMR, j));
-		edma_shadow0_write_array(SH_ECR, j, mask);
+				edma_read_array(ctlr, EDMA_EMR, j));
+		edma_shadow0_write_array(ctlr, SH_ECR, j, mask);
 		/* Clear the corresponding EMR bits */
-		edma_write_array(EDMA_EMCR, j, mask);
+		edma_write_array(ctlr, EDMA_EMCR, j, mask);
 		/* Clear any SER */
-		edma_shadow0_write_array(SH_SECR, j, mask);
-		edma_write(EDMA_CCERRCLR, (1 << 16) | 0x3);
+		edma_shadow0_write_array(ctlr, SH_SECR, j, mask);
+		edma_write(ctlr, EDMA_CCERRCLR, (1 << 16) | 0x3);
 	}
 }
 EXPORT_SYMBOL(edma_clean_channel);
@@ -998,12 +1174,17 @@  EXPORT_SYMBOL(edma_clean_channel);
  */
 void edma_clear_event(unsigned channel)
 {
-	if (channel >= num_channels)
+	unsigned ctlr;
+
+	ctlr = EDMA_CTLR(channel);
+	channel = EDMA_CHAN_SLOT(channel);
+
+	if (channel >= edma_info[ctlr]->num_channels)
 		return;
 	if (channel < 32)
-		edma_write(EDMA_ECR, 1 << channel);
+		edma_write(ctlr, EDMA_ECR, 1 << channel);
 	else
-		edma_write(EDMA_ECRH, 1 << (channel - 32));
+		edma_write(ctlr, EDMA_ECRH, 1 << (channel - 32));
 }
 EXPORT_SYMBOL(edma_clear_event);
 
@@ -1012,62 +1193,129 @@  EXPORT_SYMBOL(edma_clear_event);
 static int __init edma_probe(struct platform_device *pdev)
 {
 	struct edma_soc_info	*info = pdev->dev.platform_data;
-	int			i;
-	int			status;
+	const s8		(*queue_priority_mapping)[2];
+	const s8		(*queue_tc_mapping)[2];
+	int			i, j, found = 0;
+	int			status = -1;
 	const s8		*noevent;
-	int			irq = 0, err_irq = 0;
-	struct resource		*r;
-	resource_size_t		len;
+	int			irq[EDMA_MAX_CC] = {0, 0};
+	int			err_irq[EDMA_MAX_CC] = {0, 0};
+	struct resource		*r[EDMA_MAX_CC] = {NULL};
+	resource_size_t		len[EDMA_MAX_CC];
+	char			res_name[10];
+	char			irq_name[10];
 
 	if (!info)
 		return -ENODEV;
 
-	r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "edma_cc");
-	if (!r)
-		return -ENODEV;
+	for (j = 0; j < EDMA_MAX_CC; j++) {
+		sprintf(res_name, "edma_cc%d", j);
+		r[j] = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						res_name);
+		if (!r[j]) {
+			if (found)
+				break;
+			else
+				return -ENODEV;
+		} else
+			found = 1;
+
+		len[j] = resource_size(r[j]);
+
+		r[j] = request_mem_region(r[j]->start, len[j],
+			dev_name(&pdev->dev));
+		if (!r[j]) {
+			status = -EBUSY;
+			goto fail1;
+		}
 
-	len = r->end - r->start + 1;
+		edmacc_regs_base[j] = ioremap(r[j]->start, len[j]);
+		if (!edmacc_regs_base[j]) {
+			status = -EBUSY;
+			goto fail1;
+		}
 
-	r = request_mem_region(r->start, len, r->name);
-	if (!r)
-		return -EBUSY;
+		edma_info[j] = kmalloc(sizeof(struct edma), GFP_KERNEL);
+		if (!edma_info[j]) {
+			status = -ENOMEM;
+			goto fail1;
+		}
+		memset(edma_info[j], 0, sizeof(struct edma));
+
+		edma_info[j]->num_channels = min_t(unsigned, info[j].n_channel,
+							EDMA_MAX_DMACH);
+		edma_info[j]->num_slots = min_t(unsigned, info[j].n_slot,
+							EDMA_MAX_PARAMENTRY);
+		edma_info[j]->num_cc = min_t(unsigned, info[j].n_cc,
+							EDMA_MAX_CC);
+
+		dev_dbg(&pdev->dev, "DMA REG BASE ADDR=%p\n",
+			edmacc_regs_base[j]);
+
+		for (i = 0; i < edma_info[j]->num_slots; i++)
+			memcpy_toio(edmacc_regs_base[j] + PARM_OFFSET(i),
+					&dummy_paramset, PARM_SIZE);
+
+		noevent = info[j].noevent;
+		if (noevent) {
+			while (*noevent != -1)
+				set_bit(*noevent++, edma_info[j]->edma_noevent);
+		}
 
-	edmacc_regs_base = ioremap(r->start, len);
-	if (!edmacc_regs_base) {
-		status = -EBUSY;
-		goto fail1;
-	}
+		sprintf(irq_name, "edma%d", j);
+		irq[j] = platform_get_irq_byname(pdev, irq_name);
+		edma_info[j]->irq_res_start = irq[j];
+		status = request_irq(irq[j], dma_irq_handler, 0, "edma",
+					&pdev->dev);
+		if (status < 0) {
+			dev_dbg(&pdev->dev, "request_irq %d failed --> %d\n",
+				irq[j], status);
+			goto fail;
+		}
 
-	num_channels = min_t(unsigned, info->n_channel, EDMA_MAX_DMACH);
-	num_slots = min_t(unsigned, info->n_slot, EDMA_MAX_PARAMENTRY);
+		sprintf(irq_name, "edma%d_err", j);
+		err_irq[j] = platform_get_irq_byname(pdev, irq_name);
+		edma_info[j]->irq_res_end = err_irq[j];
+		status = request_irq(err_irq[j], dma_ccerr_handler, 0,
+					"edma_error", &pdev->dev);
+		if (status < 0) {
+			dev_dbg(&pdev->dev, "request_irq %d failed --> %d\n",
+				err_irq[j], status);
+			goto fail;
+		}
 
-	dev_dbg(&pdev->dev, "DMA REG BASE ADDR=%p\n", edmacc_regs_base);
+		/* Everything lives on transfer controller 1 until otherwise
+		 * specified. This way, long transfers on the low priority queue
+		 * started by the codec engine will not cause audio defects.
+		 */
+		for (i = 0; i < edma_info[j]->num_channels; i++)
+			map_dmach_queue(j, i, EVENTQ_1);
 
-	for (i = 0; i < num_slots; i++)
-		memcpy_toio(edmacc_regs_base + PARM_OFFSET(i),
-				&dummy_paramset, PARM_SIZE);
+		queue_tc_mapping = info[j].queue_tc_mapping;
+		queue_priority_mapping = info[j].queue_priority_mapping;
 
-	noevent = info->noevent;
-	if (noevent) {
-		while (*noevent != -1)
-			set_bit(*noevent++, edma_noevent);
-	}
+		/* Event queue to TC mapping */
+		for (i = 0; queue_tc_mapping[i][0] != -1; i++)
+			map_queue_tc(j, queue_tc_mapping[i][0],
+					queue_tc_mapping[i][1]);
 
-	irq = platform_get_irq(pdev, 0);
-	status = request_irq(irq, dma_irq_handler, 0, "edma", &pdev->dev);
-	if (status < 0) {
-		dev_dbg(&pdev->dev, "request_irq %d failed --> %d\n",
-			irq, status);
-		goto fail;
-	}
+		/* Event queue priority mapping */
+		for (i = 0; queue_priority_mapping[i][0] != -1; i++)
+			assign_priority_to_queue(j,
+						queue_priority_mapping[i][0],
+						queue_priority_mapping[i][1]);
+
+		/* Map the channel to param entry if channel mapping logic
+		 * exist
+		 */
+		if (edma_read(j, EDMA_CCCFG) & CHMAP_EXIST)
+			map_dmach_param(j);
 
-	err_irq = platform_get_irq(pdev, 1);
-	status = request_irq(err_irq, dma_ccerr_handler, 0,
-				"edma_error", &pdev->dev);
-	if (status < 0) {
-		dev_dbg(&pdev->dev, "request_irq %d failed --> %d\n",
-			err_irq, status);
-		goto fail;
+		for (i = 0; i < info[j].n_region; i++) {
+			edma_write_array2(j, EDMA_DRAE, i, 0, 0x0);
+			edma_write_array2(j, EDMA_DRAE, i, 1, 0x0);
+			edma_write_array(j, EDMA_QRAE, i, 0x0);
+		}
 	}
 
 	if (tc_errs_handled) {
@@ -1087,38 +1335,23 @@  static int __init edma_probe(struct platform_device *pdev)
 		}
 	}
 
-	/* Everything lives on transfer controller 1 until otherwise specified.
-	 * This way, long transfers on the low priority queue
-	 * started by the codec engine will not cause audio defects.
-	 */
-	for (i = 0; i < num_channels; i++)
-		map_dmach_queue(i, EVENTQ_1);
-
-	/* Event queue to TC mapping */
-	for (i = 0; queue_tc_mapping[i][0] != -1; i++)
-		map_queue_tc(queue_tc_mapping[i][0], queue_tc_mapping[i][1]);
-
-	/* Event queue priority mapping */
-	for (i = 0; queue_priority_mapping[i][0] != -1; i++)
-		assign_priority_to_queue(queue_priority_mapping[i][0],
-					 queue_priority_mapping[i][1]);
-
-	for (i = 0; i < info->n_region; i++) {
-		edma_write_array2(EDMA_DRAE, i, 0, 0x0);
-		edma_write_array2(EDMA_DRAE, i, 1, 0x0);
-		edma_write_array(EDMA_QRAE, i, 0x0);
-	}
-
 	return 0;
 
 fail:
-	if (err_irq)
-		free_irq(err_irq, NULL);
-	if (irq)
-		free_irq(irq, NULL);
-	iounmap(edmacc_regs_base);
+	for (i = 0; i < EDMA_MAX_CC; i++) {
+		if (err_irq[i])
+			free_irq(err_irq[i], &pdev->dev);
+		if (irq[i])
+			free_irq(irq[i], &pdev->dev);
+	}
 fail1:
-	release_mem_region(r->start, len);
+	for (i = 0; i < EDMA_MAX_CC; i++) {
+		if (r[i])
+			release_mem_region(r[i]->start, len[i]);
+		if (edmacc_regs_base[i])
+			iounmap(edmacc_regs_base[i]);
+		kfree(edma_info[i]);
+	}
 	return status;
 }
 
diff --git a/arch/arm/mach-davinci/include/mach/edma.h b/arch/arm/mach-davinci/include/mach/edma.h
index 24a3792..ba2ebdd 100644
--- a/arch/arm/mach-davinci/include/mach/edma.h
+++ b/arch/arm/mach-davinci/include/mach/edma.h
@@ -170,6 +170,10 @@  enum sync_dimension {
 	ABSYNC = 1
 };
 
+#define EDMA_CTLR_CHAN(ctlr, chan)	(((ctlr) << 16) | (chan))
+#define EDMA_CTLR(i)			((i) >> 16)
+#define EDMA_CHAN_SLOT(i)		((i) & 0xffff)
+
 #define EDMA_CHANNEL_ANY		-1	/* for edma_alloc_channel() */
 #define EDMA_SLOT_ANY			-1	/* for edma_alloc_slot() */
 
@@ -180,7 +184,7 @@  int edma_alloc_channel(int channel,
 void edma_free_channel(unsigned channel);
 
 /* alloc/free parameter RAM slots */
-int edma_alloc_slot(int slot);
+int edma_alloc_slot(unsigned ctlr, int slot);
 void edma_free_slot(unsigned slot);
 
 /* calls that operate on part of a parameter RAM slot */
@@ -216,9 +220,12 @@  struct edma_soc_info {
 	unsigned	n_region;
 	unsigned	n_slot;
 	unsigned	n_tc;
+	unsigned	n_cc;
 
 	/* list of channels with no even trigger; terminated by "-1" */
 	const s8	*noevent;
+	const s8	(*queue_tc_mapping)[2];
+	const s8	(*queue_priority_mapping)[2];
 };
 
 #endif
diff --git a/sound/soc/davinci/davinci-evm.c b/sound/soc/davinci/davinci-evm.c
index 58fd1cb..832d5db 100644
--- a/sound/soc/davinci/davinci-evm.c
+++ b/sound/soc/davinci/davinci-evm.c
@@ -175,8 +175,8 @@  static struct resource evm_snd_resources[] = {
 };
 
 static struct evm_snd_platform_data evm_snd_data = {
-	.tx_dma_ch	= DAVINCI_DMA_ASP0_TX,
-	.rx_dma_ch	= DAVINCI_DMA_ASP0_RX,
+	.tx_dma_ch	= EDMA_CTLR_CHAN(0, DAVINCI_DMA_ASP0_TX),
+	.rx_dma_ch	= EDMA_CTLR_CHAN(0, DAVINCI_DMA_ASP0_RX),
 };
 
 /* DM335 EVM uses ASP1; line-out is a stereo mini-jack */
@@ -189,8 +189,8 @@  static struct resource dm335evm_snd_resources[] = {
 };
 
 static struct evm_snd_platform_data dm335evm_snd_data = {
-	.tx_dma_ch	= DAVINCI_DMA_ASP1_TX,
-	.rx_dma_ch	= DAVINCI_DMA_ASP1_RX,
+	.tx_dma_ch	= EDMA_CTLR_CHAN(0, DAVINCI_DMA_ASP1_TX),
+	.rx_dma_ch	= EDMA_CTLR_CHAN(0, DAVINCI_DMA_ASP1_RX),
 };
 
 static struct platform_device *evm_snd_device;
diff --git a/sound/soc/davinci/davinci-pcm.c b/sound/soc/davinci/davinci-pcm.c
index a059965..3ee38b6 100644
--- a/sound/soc/davinci/davinci-pcm.c
+++ b/sound/soc/davinci/davinci-pcm.c
@@ -143,7 +143,7 @@  static int davinci_pcm_dma_request(struct snd_pcm_substream *substream)
 	prtd->master_lch = ret;
 
 	/* Request parameter RAM reload slot */
-	ret = edma_alloc_slot(EDMA_SLOT_ANY);
+	ret = edma_alloc_slot(EDMA_CTLR(prtd->master_lch), EDMA_SLOT_ANY);
 	if (ret < 0) {
 		edma_free_channel(prtd->master_lch);
 		return ret;
@@ -160,8 +160,8 @@  static int davinci_pcm_dma_request(struct snd_pcm_substream *substream)
 	 * so davinci_pcm_enqueue_dma() takes less time in IRQ.
 	 */
 	edma_read_slot(prtd->slave_lch, &p_ram);
-	p_ram.opt |= TCINTEN | EDMA_TCC(prtd->master_lch);
-	p_ram.link_bcntrld = prtd->slave_lch << 5;
+	p_ram.opt |= TCINTEN | EDMA_TCC(EDMA_CHAN_SLOT(prtd->master_lch));
+	p_ram.link_bcntrld = EDMA_CHAN_SLOT(prtd->slave_lch) << 5;
 	edma_write_slot(prtd->slave_lch, &p_ram);
 
 	return 0;