diff mbox

[RFC,v9,2/6] dma: mpc512x: add support for peripheral transfers

Message ID 1394624875-24411-3-git-send-email-a13xp0p0v88@gmail.com (mailing list archive)
State Accepted
Delegated to: Vinod Koul
Headers show

Commit Message

Alexander Popov March 12, 2014, 11:47 a.m. UTC
Introduce support for slave s/g transfer preparation and the associated
device control callback in the MPC512x DMA controller driver, which adds
support for data transfers between memory and peripheral I/O to the
previously supported mem-to-mem transfers.

Signed-off-by: Alexander Popov <a13xp0p0v88@gmail.com>
---
 drivers/dma/mpc512x_dma.c | 236 +++++++++++++++++++++++++++++++++++++++++++++-
 1 file changed, 231 insertions(+), 5 deletions(-)

Comments

Andy Shevchenko March 14, 2014, 9:47 a.m. UTC | #1
On Wed, 2014-03-12 at 15:47 +0400, Alexander Popov wrote:
> Introduce support for slave s/g transfer preparation and the associated
> device control callback in the MPC512x DMA controller driver, which adds
> support for data transfers between memory and peripheral I/O to the
> previously supported mem-to-mem transfers.


> --- a/drivers/dma/mpc512x_dma.c
> +++ b/drivers/dma/mpc512x_dma.c
> @@ -2,6 +2,7 @@
>   * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008.
>   * Copyright (C) Semihalf 2009
>   * Copyright (C) Ilya Yanok, Emcraft Systems 2010
> + * Copyright (C) Alexander Popov, Promcontroller 2013

2014?

[]

> +static int mpc_dma_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
> +							unsigned long arg)
> +{
> +	struct mpc_dma_chan *mchan;
> +	struct mpc_dma *mdma;
> +	struct dma_slave_config *cfg;
> +	unsigned long flags;
> +
> +	mchan = dma_chan_to_mpc_dma_chan(chan);
> +	switch (cmd) {
> +	case DMA_TERMINATE_ALL:
> +		/* Disable channel requests */
> +		mdma = dma_chan_to_mpc_dma(chan);
> +
> +		spin_lock_irqsave(&mchan->lock, flags);
> +
> +		out_8(&mdma->regs->dmacerq, chan->chan_id);
> +		list_splice_tail_init(&mchan->prepared, &mchan->free);
> +		list_splice_tail_init(&mchan->queued, &mchan->free);
> +		list_splice_tail_init(&mchan->active, &mchan->free);
> +
> +		spin_unlock_irqrestore(&mchan->lock, flags);
> +
> +		return 0;
> +	case DMA_SLAVE_CONFIG:
> +		/* Constraints:
> +		 *  - only transfers between a peripheral device and
> +		 *     memory are supported;
> +		 *  - minimal transfer chunk is 4 bytes and consequently
> +		 *     source and destination addresses must be 4-byte aligned
> +		 *     and transfer size must be aligned on (4 * maxburst)
> +		 *     boundary;
> +		 *  - during the transfer RAM address is being incremented by
> +		 *     the size of minimal transfer chunk;
> +		 *  - peripheral port's address is constant during the transfer.
> +		 */
> +
> +		cfg = (void *)arg;
> +
> +		if (!is_slave_direction(cfg->direction))
> +			return -EINVAL;

As far as I understand the intention you have not to use direction field
in the dma_slave_config. It will be removed once.

> +
> +		if (cfg->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES &&
> +			cfg->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES)
> +			return -EINVAL;
> +
> +		spin_lock_irqsave(&mchan->lock, flags);
> +
> +		if (cfg->direction == DMA_DEV_TO_MEM) {
> +			mchan->per_paddr = cfg->src_addr;
> +			mchan->tcd_nunits = cfg->src_maxburst;
> +		} else {
> +			mchan->per_paddr = cfg->dst_addr;
> +			mchan->tcd_nunits = cfg->dst_maxburst;
> +		}

Ditto.
Alexander Popov March 19, 2014, 1:26 p.m. UTC | #2
Hello Andy

2014-03-14 13:47 GMT+04:00 Andy Shevchenko <andriy.shevchenko@linux.intel.com>:
> On Wed, 2014-03-12 at 15:47 +0400, Alexander Popov wrote:
>> +     case DMA_SLAVE_CONFIG:
>> +             /* Constraints:
>> +              *  - only transfers between a peripheral device and
>> +              *     memory are supported;
>> +              *  - minimal transfer chunk is 4 bytes and consequently
>> +              *     source and destination addresses must be 4-byte aligned
>> +              *     and transfer size must be aligned on (4 * maxburst)
>> +              *     boundary;
>> +              *  - during the transfer RAM address is being incremented by
>> +              *     the size of minimal transfer chunk;
>> +              *  - peripheral port's address is constant during the transfer.
>> +              */
>> +
>> +             cfg = (void *)arg;
>> +
>> +             if (!is_slave_direction(cfg->direction))
>> +                     return -EINVAL;
>
> As far as I understand the intention you have not to use direction field
> in the dma_slave_config. It will be removed once.
>
>> +
>> +             if (cfg->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES &&
>> +                     cfg->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES)
>> +                     return -EINVAL;
>> +
>> +             spin_lock_irqsave(&mchan->lock, flags);
>> +
>> +             if (cfg->direction == DMA_DEV_TO_MEM) {
>> +                     mchan->per_paddr = cfg->src_addr;
>> +                     mchan->tcd_nunits = cfg->src_maxburst;
>> +             } else {
>> +                     mchan->per_paddr = cfg->dst_addr;
>> +                     mchan->tcd_nunits = cfg->dst_maxburst;
>> +             }
>
> Ditto.

Excuse me, I don't understand this point.
I have to use cfg->direction because in case of DMA_DEV_TO_MEM
I use cfg->SRC_addr and cfg->SRC_maxburst and in case of
DMA_MEM_TO_DEV I use cfg->DST_addr and cfg->DST_maxburst.
Is it correct?

Best regards,
Alexander
--
To unsubscribe from this list: send the line "unsubscribe dmaengine" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Vinod Koul March 19, 2014, 2:08 p.m. UTC | #3
On Wed, Mar 19, 2014 at 05:26:47PM +0400, Alexander Popov wrote:
> Hello Andy
> 
> 2014-03-14 13:47 GMT+04:00 Andy Shevchenko <andriy.shevchenko@linux.intel.com>:
> > On Wed, 2014-03-12 at 15:47 +0400, Alexander Popov wrote:
> >> +     case DMA_SLAVE_CONFIG:
> >> +             /* Constraints:
> >> +              *  - only transfers between a peripheral device and
> >> +              *     memory are supported;
> >> +              *  - minimal transfer chunk is 4 bytes and consequently
> >> +              *     source and destination addresses must be 4-byte aligned
> >> +              *     and transfer size must be aligned on (4 * maxburst)
> >> +              *     boundary;
> >> +              *  - during the transfer RAM address is being incremented by
> >> +              *     the size of minimal transfer chunk;
> >> +              *  - peripheral port's address is constant during the transfer.
> >> +              */
> >> +
> >> +             cfg = (void *)arg;
> >> +
> >> +             if (!is_slave_direction(cfg->direction))
> >> +                     return -EINVAL;
> >
> > As far as I understand the intention you have not to use direction field
> > in the dma_slave_config. It will be removed once.
> >
> >> +
> >> +             if (cfg->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES &&
> >> +                     cfg->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES)
> >> +                     return -EINVAL;
> >> +
> >> +             spin_lock_irqsave(&mchan->lock, flags);
> >> +
> >> +             if (cfg->direction == DMA_DEV_TO_MEM) {
> >> +                     mchan->per_paddr = cfg->src_addr;
> >> +                     mchan->tcd_nunits = cfg->src_maxburst;
> >> +             } else {
> >> +                     mchan->per_paddr = cfg->dst_addr;
> >> +                     mchan->tcd_nunits = cfg->dst_maxburst;
> >> +             }
> >
> > Ditto.
> 
> Excuse me, I don't understand this point.
> I have to use cfg->direction because in case of DMA_DEV_TO_MEM
> I use cfg->SRC_addr and cfg->SRC_maxburst and in case of
> DMA_MEM_TO_DEV I use cfg->DST_addr and cfg->DST_maxburst.
> Is it correct?
You store the complete config for both source and destination.
Then based on the descriptor direction you can retrive the values from channel
context and program

This way you _dont_ need to fix the direction and can use it both ways!
diff mbox

Patch

diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index 2ce248b..b1e430c 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -2,6 +2,7 @@ 
  * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008.
  * Copyright (C) Semihalf 2009
  * Copyright (C) Ilya Yanok, Emcraft Systems 2010
+ * Copyright (C) Alexander Popov, Promcontroller 2013
  *
  * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description
  * (defines, structures and comments) was taken from MPC5121 DMA driver
@@ -29,8 +30,17 @@ 
  */
 
 /*
- * This is initial version of MPC5121 DMA driver. Only memory to memory
- * transfers are supported (tested using dmatest module).
+ * MPC512x and MPC8308 DMA driver. It supports
+ * memory to memory data transfers (tested using dmatest module) and
+ * data transfers between memory and peripheral I/O memory
+ * by means of slave s/g with these limitations:
+ *  - chunked transfers (transfers with more than one part) are refused
+ *     as long as proper support for scatter/gather is missing;
+ *  - transfers on MPC8308 always start from software as this SoC appears
+ *     not to have external request lines for peripheral flow control;
+ *  - minimal memory <-> I/O memory transfer chunk is 4 bytes and consequently
+ *     source and destination addresses must be 4-byte aligned
+ *     and transfer size must be aligned on (4 * maxburst) boundary;
  */
 
 #include <linux/module.h>
@@ -189,6 +199,7 @@  struct mpc_dma_desc {
 	dma_addr_t			tcd_paddr;
 	int				error;
 	struct list_head		node;
+	int				will_access_peripheral;
 };
 
 struct mpc_dma_chan {
@@ -201,6 +212,10 @@  struct mpc_dma_chan {
 	struct mpc_dma_tcd		*tcd;
 	dma_addr_t			tcd_paddr;
 
+	/* Settings for access to peripheral FIFO */
+	dma_addr_t			per_paddr;	/* FIFO address */
+	u32				tcd_nunits;
+
 	/* Lock for this structure */
 	spinlock_t			lock;
 };
@@ -251,8 +266,23 @@  static void mpc_dma_execute(struct mpc_dma_chan *mchan)
 	struct mpc_dma_desc *mdesc;
 	int cid = mchan->chan.chan_id;
 
-	/* Move all queued descriptors to active list */
-	list_splice_tail_init(&mchan->queued, &mchan->active);
+	while (!list_empty(&mchan->queued)) {
+		mdesc = list_first_entry(&mchan->queued,
+						struct mpc_dma_desc, node);
+		/*
+		 * Grab either several mem-to-mem transfer descriptors
+		 * or one peripheral transfer descriptor,
+		 * don't mix mem-to-mem and peripheral transfer descriptors
+		 * within the same 'active' list.
+		 */
+		if (mdesc->will_access_peripheral) {
+			if (list_empty(&mchan->active))
+				list_move_tail(&mdesc->node, &mchan->active);
+			break;
+		} else {
+			list_move_tail(&mdesc->node, &mchan->active);
+		}
+	}
 
 	/* Chain descriptors into one transaction */
 	list_for_each_entry(mdesc, &mchan->active, node) {
@@ -278,7 +308,17 @@  static void mpc_dma_execute(struct mpc_dma_chan *mchan)
 
 	if (first != prev)
 		mdma->tcd[cid].e_sg = 1;
-	out_8(&mdma->regs->dmassrt, cid);
+
+	if (mdma->is_mpc8308) {
+		/* MPC8308, no request lines, software initiated start */
+		out_8(&mdma->regs->dmassrt, cid);
+	} else if (first->will_access_peripheral) {
+		/* Peripherals involved, start by external request signal */
+		out_8(&mdma->regs->dmaserq, cid);
+	} else {
+		/* Memory to memory transfer, software initiated start */
+		out_8(&mdma->regs->dmassrt, cid);
+	}
 }
 
 /* Handle interrupt on one half of DMA controller (32 channels) */
@@ -596,6 +636,7 @@  mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
 	}
 
 	mdesc->error = 0;
+	mdesc->will_access_peripheral = 0;
 	tcd = mdesc->tcd;
 
 	/* Prepare Transfer Control Descriptor for this transaction */
@@ -643,6 +684,188 @@  mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
 	return &mdesc->desc;
 }
 
+static struct dma_async_tx_descriptor *
+mpc_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+		unsigned int sg_len, enum dma_transfer_direction direction,
+		unsigned long flags, void *context)
+{
+	struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
+	struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
+	struct mpc_dma_desc *mdesc = NULL;
+	dma_addr_t per_paddr;
+	u32 tcd_nunits;
+	struct mpc_dma_tcd *tcd;
+	unsigned long iflags;
+	struct scatterlist *sg;
+	size_t len;
+	int iter, i;
+
+	/* Currently there is no proper support for scatter/gather */
+	if (sg_len != 1)
+		return NULL;
+
+	if (!is_slave_direction(direction))
+		return NULL;
+
+	for_each_sg(sgl, sg, sg_len, i) {
+		spin_lock_irqsave(&mchan->lock, iflags);
+
+		mdesc = list_first_entry(&mchan->free,
+						struct mpc_dma_desc, node);
+		if (!mdesc) {
+			spin_unlock_irqrestore(&mchan->lock, iflags);
+			/* Try to free completed descriptors */
+			mpc_dma_process_completed(mdma);
+			return NULL;
+		}
+
+		list_del(&mdesc->node);
+
+		per_paddr = mchan->per_paddr;
+		tcd_nunits = mchan->tcd_nunits;
+
+		spin_unlock_irqrestore(&mchan->lock, iflags);
+
+		if (per_paddr == 0 || tcd_nunits == 0)
+			goto err_prep;
+
+		mdesc->error = 0;
+		mdesc->will_access_peripheral = 1;
+
+		/* Prepare Transfer Control Descriptor for this transaction */
+		tcd = mdesc->tcd;
+
+		memset(tcd, 0, sizeof(struct mpc_dma_tcd));
+
+		if (!IS_ALIGNED(sg_dma_address(sg), 4))
+			goto err_prep;
+
+		if (direction == DMA_DEV_TO_MEM) {
+			tcd->saddr = per_paddr;
+			tcd->daddr = sg_dma_address(sg);
+			tcd->soff = 0;
+			tcd->doff = 4;
+		} else {
+			tcd->saddr = sg_dma_address(sg);
+			tcd->daddr = per_paddr;
+			tcd->soff = 4;
+			tcd->doff = 0;
+		}
+
+		tcd->ssize = MPC_DMA_TSIZE_4;
+		tcd->dsize = MPC_DMA_TSIZE_4;
+
+		len = sg_dma_len(sg);
+		tcd->nbytes = tcd_nunits * 4;
+		if (!IS_ALIGNED(len, tcd->nbytes))
+			goto err_prep;
+
+		iter = len / tcd->nbytes;
+		if (iter >= 1 << 15) {
+			/* len is too big */
+			goto err_prep;
+		}
+		/* citer_linkch contains the high bits of iter */
+		tcd->biter = iter & 0x1ff;
+		tcd->biter_linkch = iter >> 9;
+		tcd->citer = tcd->biter;
+		tcd->citer_linkch = tcd->biter_linkch;
+
+		tcd->e_sg = 0;
+		tcd->d_req = 1;
+
+		/* Place descriptor in prepared list */
+		spin_lock_irqsave(&mchan->lock, iflags);
+		list_add_tail(&mdesc->node, &mchan->prepared);
+		spin_unlock_irqrestore(&mchan->lock, iflags);
+	}
+
+	return &mdesc->desc;
+
+err_prep:
+	/* Put the descriptor back */
+	spin_lock_irqsave(&mchan->lock, iflags);
+	list_add_tail(&mdesc->node, &mchan->free);
+	spin_unlock_irqrestore(&mchan->lock, iflags);
+
+	return NULL;
+}
+
+static int mpc_dma_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+							unsigned long arg)
+{
+	struct mpc_dma_chan *mchan;
+	struct mpc_dma *mdma;
+	struct dma_slave_config *cfg;
+	unsigned long flags;
+
+	mchan = dma_chan_to_mpc_dma_chan(chan);
+	switch (cmd) {
+	case DMA_TERMINATE_ALL:
+		/* Disable channel requests */
+		mdma = dma_chan_to_mpc_dma(chan);
+
+		spin_lock_irqsave(&mchan->lock, flags);
+
+		out_8(&mdma->regs->dmacerq, chan->chan_id);
+		list_splice_tail_init(&mchan->prepared, &mchan->free);
+		list_splice_tail_init(&mchan->queued, &mchan->free);
+		list_splice_tail_init(&mchan->active, &mchan->free);
+
+		spin_unlock_irqrestore(&mchan->lock, flags);
+
+		return 0;
+	case DMA_SLAVE_CONFIG:
+		/* Constraints:
+		 *  - only transfers between a peripheral device and
+		 *     memory are supported;
+		 *  - minimal transfer chunk is 4 bytes and consequently
+		 *     source and destination addresses must be 4-byte aligned
+		 *     and transfer size must be aligned on (4 * maxburst)
+		 *     boundary;
+		 *  - during the transfer RAM address is being incremented by
+		 *     the size of minimal transfer chunk;
+		 *  - peripheral port's address is constant during the transfer.
+		 */
+
+		cfg = (void *)arg;
+
+		if (!is_slave_direction(cfg->direction))
+			return -EINVAL;
+
+		if (cfg->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES &&
+			cfg->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES)
+			return -EINVAL;
+
+		spin_lock_irqsave(&mchan->lock, flags);
+
+		if (cfg->direction == DMA_DEV_TO_MEM) {
+			mchan->per_paddr = cfg->src_addr;
+			mchan->tcd_nunits = cfg->src_maxburst;
+		} else {
+			mchan->per_paddr = cfg->dst_addr;
+			mchan->tcd_nunits = cfg->dst_maxburst;
+		}
+
+		if (!IS_ALIGNED(mchan->per_paddr, 4)) {
+			spin_unlock_irqrestore(&mchan->lock, flags);
+			return -EINVAL;
+		}
+
+		if (mchan->tcd_nunits == 0)
+			mchan->tcd_nunits = 1;	/* Apply default */
+
+		spin_unlock_irqrestore(&mchan->lock, flags);
+
+		return 0;
+	default:
+		/* Unknown command */
+		break;
+	}
+
+	return -ENXIO;
+}
+
 static int mpc_dma_probe(struct platform_device *op)
 {
 	struct device_node *dn = op->dev.of_node;
@@ -727,9 +950,12 @@  static int mpc_dma_probe(struct platform_device *op)
 	dma->device_issue_pending = mpc_dma_issue_pending;
 	dma->device_tx_status = mpc_dma_tx_status;
 	dma->device_prep_dma_memcpy = mpc_dma_prep_memcpy;
+	dma->device_prep_slave_sg = mpc_dma_prep_slave_sg;
+	dma->device_control = mpc_dma_device_control;
 
 	INIT_LIST_HEAD(&dma->channels);
 	dma_cap_set(DMA_MEMCPY, dma->cap_mask);
+	dma_cap_set(DMA_SLAVE, dma->cap_mask);
 
 	for (i = 0; i < dma->chancnt; i++) {
 		mchan = &mdma->channels[i];