diff mbox

[1/2] dma: pl330: Differentiate between submitted and issued descriptors

Message ID 1389467319-11957-1-git-send-email-lars@metafoo.de (mailing list archive)
State Accepted
Commit 04abf5daf7df
Delegated to: Vinod Koul
Headers show

Commit Message

Lars-Peter Clausen Jan. 11, 2014, 7:08 p.m. UTC
The pl330 dmaengine driver currently does not differentiate between submitted
and issued descriptors. It won't start transferring a newly submitted
descriptor until issue_pending() is called, but only if it is idle. If it is
active and a new descriptor is submitted before it goes idle it will happily
start the newly submitted descriptor once all earlier submitted descriptors have
been completed. This is not a 100% correct with regards to the dmaengine
interface semantics. A descriptor is not supposed to be started until the next
issue_pending() call after the descriptor has been submitted. This patch adds a
second per channel list that keeps track of the submitted descriptors. Once
issue_pending() is called the submitted descriptors are moved to the working
list and only descriptors on the working list are started.

Signed-off-by: Lars-Peter Clausen <lars@metafoo.de>
---
 drivers/dma/pl330.c | 24 ++++++++++++++++++++----
 1 file changed, 20 insertions(+), 4 deletions(-)

Comments

Vinod Koul Jan. 20, 2014, 8:21 a.m. UTC | #1
On Sat, Jan 11, 2014 at 08:08:38PM +0100, Lars-Peter Clausen wrote:
> The pl330 dmaengine driver currently does not differentiate between submitted
> and issued descriptors. It won't start transferring a newly submitted
> descriptor until issue_pending() is called, but only if it is idle. If it is
> active and a new descriptor is submitted before it goes idle it will happily
> start the newly submitted descriptor once all earlier submitted descriptors have
> been completed. This is not a 100% correct with regards to the dmaengine
> interface semantics. A descriptor is not supposed to be started until the next
> issue_pending() call after the descriptor has been submitted. This patch adds a
> second per channel list that keeps track of the submitted descriptors. Once
> issue_pending() is called the submitted descriptors are moved to the working
> list and only descriptors on the working list are started.

Applied both, Thanks

~Vinod
--
To unsubscribe from this list: send the line "unsubscribe dmaengine" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 7adaf3a..f9e5029 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -543,7 +543,9 @@  struct dma_pl330_chan {
 	/* DMA-Engine Channel */
 	struct dma_chan chan;
 
-	/* List of to be xfered descriptors */
+	/* List of submitted descriptors */
+	struct list_head submitted_list;
+	/* List of issued descriptors */
 	struct list_head work_list;
 	/* List of completed descriptors */
 	struct list_head completed_list;
@@ -2388,6 +2390,11 @@  static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned
 		pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
 
 		/* Mark all desc done */
+		list_for_each_entry(desc, &pch->submitted_list, node) {
+			desc->status = FREE;
+			dma_cookie_complete(&desc->txd);
+		}
+
 		list_for_each_entry(desc, &pch->work_list , node) {
 			desc->status = FREE;
 			dma_cookie_complete(&desc->txd);
@@ -2398,6 +2405,7 @@  static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned
 			dma_cookie_complete(&desc->txd);
 		}
 
+		list_splice_tail_init(&pch->submitted_list, &pdmac->desc_pool);
 		list_splice_tail_init(&pch->work_list, &pdmac->desc_pool);
 		list_splice_tail_init(&pch->completed_list, &pdmac->desc_pool);
 		spin_unlock_irqrestore(&pch->lock, flags);
@@ -2456,7 +2464,14 @@  pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
 
 static void pl330_issue_pending(struct dma_chan *chan)
 {
-	pl330_tasklet((unsigned long) to_pchan(chan));
+	struct dma_pl330_chan *pch = to_pchan(chan);
+	unsigned long flags;
+
+	spin_lock_irqsave(&pch->lock, flags);
+	list_splice_tail_init(&pch->submitted_list, &pch->work_list);
+	spin_unlock_irqrestore(&pch->lock, flags);
+
+	pl330_tasklet((unsigned long)pch);
 }
 
 /*
@@ -2483,11 +2498,11 @@  static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
 
 		dma_cookie_assign(&desc->txd);
 
-		list_move_tail(&desc->node, &pch->work_list);
+		list_move_tail(&desc->node, &pch->submitted_list);
 	}
 
 	cookie = dma_cookie_assign(&last->txd);
-	list_add_tail(&last->node, &pch->work_list);
+	list_add_tail(&last->node, &pch->submitted_list);
 	spin_unlock_irqrestore(&pch->lock, flags);
 
 	return cookie;
@@ -2979,6 +2994,7 @@  pl330_probe(struct amba_device *adev, const struct amba_id *id)
 		else
 			pch->chan.private = adev->dev.of_node;
 
+		INIT_LIST_HEAD(&pch->submitted_list);
 		INIT_LIST_HEAD(&pch->work_list);
 		INIT_LIST_HEAD(&pch->completed_list);
 		spin_lock_init(&pch->lock);