diff mbox series

[v2,2/2] dmaengine: ti: edma: Enable support for polled (memcpy) completion

Message ID 20190521075945.14085-3-peter.ujfalusi@ti.com (mailing list archive)
State New, archived
Headers show
Series dmaengine: ti: edma: Polled completion support | expand

Commit Message

Peter Ujfalusi May 21, 2019, 7:59 a.m. UTC
When a DMA client driver decides that it is not providing callback for
completion of a transfer (and/or does not set the DMA_PREP_INTERRUPT) but
it will poll the status of the transfer (in case of short memcpy for
example) we will not get interrupt for the completion of the transfer and
will not mark the transaction as done.

Check the event registers (ER and EER) and if the channel is inactive then
return with DMA_COMPLETE to let the client know that the transfer is
completed.

Signed-off-by: Peter Ujfalusi <peter.ujfalusi@ti.com>
---
 drivers/dma/ti/edma.c | 23 ++++++++++++++++++++---
 1 file changed, 20 insertions(+), 3 deletions(-)
diff mbox series

Patch

diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
index a5822925a327..0f4873c2aa12 100644
--- a/drivers/dma/ti/edma.c
+++ b/drivers/dma/ti/edma.c
@@ -1226,8 +1226,9 @@  static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
 
 	edesc->pset[0].param.opt |= ITCCHEN;
 	if (nslots == 1) {
-		/* Enable transfer complete interrupt */
-		edesc->pset[0].param.opt |= TCINTEN;
+		/* Enable transfer complete interrupt if requested */
+		if (tx_flags & DMA_PREP_INTERRUPT)
+			edesc->pset[0].param.opt |= TCINTEN;
 	} else {
 		/* Enable transfer complete chaining for the first slot */
 		edesc->pset[0].param.opt |= TCCHEN;
@@ -1254,7 +1255,9 @@  static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
 		}
 
 		edesc->pset[1].param.opt |= ITCCHEN;
-		edesc->pset[1].param.opt |= TCINTEN;
+		/* Enable transfer complete interrupt if requested */
+		if (tx_flags & DMA_PREP_INTERRUPT)
+			edesc->pset[1].param.opt |= TCINTEN;
 	}
 
 	return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
@@ -1816,6 +1819,20 @@  static enum dma_status edma_tx_status(struct dma_chan *chan,
 	unsigned long flags;
 
 	ret = dma_cookie_status(chan, cookie, txstate);
+
+	if (ret != DMA_COMPLETE && echan->edesc && !echan->edesc->cyclic) {
+		struct edma_cc *ecc = echan->ecc;
+		int channel = EDMA_CHAN_SLOT(echan->ch_num);
+		int idx = EDMA_REG_ARRAY_INDEX(channel);
+		int ch_bit = EDMA_CHANNEL_BIT(channel);
+		unsigned int sh_er = edma_shadow0_read_array(ecc, SH_ER, idx);
+		unsigned int sh_eer = edma_shadow0_read_array(ecc, SH_EER, idx);
+
+		/* The channel is no longer active */
+		if (!(sh_er & ch_bit) && !(sh_eer & ch_bit))
+			ret = DMA_COMPLETE;
+	}
+
 	if (ret == DMA_COMPLETE || !txstate)
 		return ret;