diff mbox

[v2,7/7] dmaengine: cppi41: Fix a race between PM runtime and channel abort

Message ID 20170117134540.9988-8-abailon@baylibre.com (mailing list archive)
State New, archived
Headers show

Commit Message

Alexandre Bailon Jan. 17, 2017, 1:45 p.m. UTC
cppi41_dma_issue_pending() may be called while the device is runtime
suspended. In that case, the descriptor will be pushed to the pending
list and then be queued to hardware queue.
But if cppi41_stop_chan() is called before the device got time to
resume, then the descriptor will remain in the pending list and be
queued to hardware queue after the teardown.
During the channel stop, check if there is a pending descriptor
and if so, remove it.

Signed-off-by: Alexandre Bailon <abailon@baylibre.com>
---
 drivers/dma/cppi41.c | 10 +++++++++-
 1 file changed, 9 insertions(+), 1 deletion(-)
diff mbox

Patch

diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c
index 303ccee..0bc4f1a 100644
--- a/drivers/dma/cppi41.c
+++ b/drivers/dma/cppi41.c
@@ -687,10 +687,17 @@  static int cppi41_stop_chan(struct dma_chan *chan)
 {
 	struct cppi41_channel *c = to_cpp41_chan(chan);
 	struct cppi41_dd *cdd = c->cdd;
+	unsigned long flags;
 	u32 desc_num;
 	u32 desc_phys;
 	int ret;
 
+	/* Remove pending descriptor that haven't been pushed to queue */
+	spin_lock_irqsave(&cdd->lock, flags);
+	if (!list_empty(&c->node))
+		list_del_init(&c->node);
+	spin_unlock_irqrestore(&cdd->lock, flags);
+
 	desc_phys = lower_32_bits(c->desc_phys);
 	desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc);
 	if (!cdd->chan_busy[desc_num])
@@ -748,6 +755,7 @@  static int cppi41_add_chans(struct device *dev, struct cppi41_dd *cdd)
 		cchan->desc_phys = cdd->descs_phys;
 		cchan->desc_phys += i * sizeof(struct cppi41_desc);
 		cchan->chan.device = &cdd->ddev;
+		INIT_LIST_HEAD(&cchan->node);
 		list_add_tail(&cchan->chan.device_node, &cdd->ddev.channels);
 	}
 	cdd->first_td_desc = n_chans;
@@ -1163,7 +1171,7 @@  static int __maybe_unused cppi41_runtime_resume(struct device *dev)
 	spin_lock_irqsave(&cdd->lock, flags);
 	list_for_each_entry_safe(c, _c, &cdd->pending, node) {
 		push_desc_queue(c);
-		list_del(&c->node);
+		list_del_init(&c->node);
 	}
 	spin_unlock_irqrestore(&cdd->lock, flags);