diff mbox

mmc: mxcmmc: fix bug that may block a data transfer forever.

Message ID 1346917248-6685-1-git-send-email-javier.martin@vista-silicon.com (mailing list archive)
State Changes Requested, archived
Headers show

Commit Message

Javier Martin Sept. 6, 2012, 7:40 a.m. UTC
The problem can be easily reproduced using a script that loops
copying a file in an SD card to another place in the same SD card
and its related to read transfers. This only happens with DMA enabled.

This is related to the fact that, when reading, an MMC irq signals
the fact that all data from the SD card has been copied to the
internal buffers. However, it doesn't signal whether the DMA transfer
that is in charge of moving data from these internal buffers to RAM
has finished or not. Thus, calling dmaengine_terminate_all() in the
MMC irq routine can cancel an ongoing DMA transfer leaving some data
in the internal buffers that produces an accumulative effect which,
in the end, blocks a read data transfer forever.

The following patch watches DMA and MMC irqs and makes sure both have
arrived before marking any transfer as finished. The 'dangerous' usage
of dmaengine_terminate_all() is removed and a timeout of 2 seconds
is added so that the MMC won't block forever anymore.

Signed-off-by: Javier Martin <javier.martin@vista-silicon.com>
---
 drivers/mmc/host/mxcmmc.c |   97 ++++++++++++++++++++++++++++++++++++++++++---
 1 file changed, 92 insertions(+), 5 deletions(-)

Comments

Sascha Hauer Sept. 6, 2012, 10:32 a.m. UTC | #1
On Thu, Sep 06, 2012 at 09:40:48AM +0200, Javier Martin wrote:
> The problem can be easily reproduced using a script that loops
> copying a file in an SD card to another place in the same SD card
> and its related to read transfers. This only happens with DMA enabled.
> 
> This is related to the fact that, when reading, an MMC irq signals
> the fact that all data from the SD card has been copied to the
> internal buffers. However, it doesn't signal whether the DMA transfer
> that is in charge of moving data from these internal buffers to RAM
> has finished or not. Thus, calling dmaengine_terminate_all() in the
> MMC irq routine can cancel an ongoing DMA transfer leaving some data
> in the internal buffers that produces an accumulative effect which,
> in the end, blocks a read data transfer forever.

Doesn't that mean that in case of a DMA read we just have to wait for
the DMA callback instead of the MMC irq? Something like:

static int mxcmci_start_cmd(struct mxcmci_host *host, struct mmc_command *cmd,
		unsigned int cmdat)
{
	...

	if (mxcmci_use_dma(host)) {
		if (host->dma_dir == DMA_FROM_DEVICE)
			setup_dma_callback();
		else
			int_cntr |= INT_WRITE_OP_DONE_EN;
	}

	...
}

That would be a cleaner solution I think.

Sascha
diff mbox

Patch

diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index 28ed52d..4907d40 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -150,6 +150,11 @@  struct mxcmci_host {
 	int			dmareq;
 	struct dma_slave_config dma_slave_config;
 	struct imx_dma_data	dma_data;
+
+	bool			dmairq;
+	bool			mmcirq;
+	struct timer_list	watchdog;
+	spinlock_t		irqlock;
 };
 
 static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios);
@@ -213,6 +218,28 @@  static void mxcmci_softreset(struct mxcmci_host *host)
 	writew(0xff, host->base + MMC_REG_RES_TO);
 }
 static int mxcmci_setup_dma(struct mmc_host *mmc);
+static void mxcmci_data_done(struct mxcmci_host *host, unsigned int stat);
+
+static void mxcmci_dma_irq_callback(void *param)
+{
+	struct mxcmci_host *host = param;
+	u32 stat = readl(host->base + MMC_REG_STATUS);
+	unsigned long flags;
+
+	spin_lock_irqsave(&host->irqlock, flags);
+
+	host->dmairq = true;
+	if (host->mmcirq) {
+		del_timer(&host->watchdog);
+		host->mmcirq = false;
+		host->dmairq = false;
+		mxcmci_data_done(host, stat);
+	}
+
+	spin_unlock_irqrestore(&host->irqlock, flags);
+
+	dev_dbg(mmc_dev(host->mmc), "%s: 0x%08x\n", __func__, stat);
+}
 
 static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
 {
@@ -268,9 +295,13 @@  static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
 	}
 	wmb();
 
+	host->desc->callback = mxcmci_dma_irq_callback;
+	host->desc->callback_param = host;
 	dmaengine_submit(host->desc);
 	dma_async_issue_pending(host->dma);
 
+	mod_timer(&host->watchdog, jiffies + msecs_to_jiffies(2000));
+
 	return 0;
 }
 
@@ -345,11 +376,9 @@  static int mxcmci_finish_data(struct mxcmci_host *host, unsigned int stat)
 	struct mmc_data *data = host->data;
 	int data_error;
 
-	if (mxcmci_use_dma(host)) {
-		dmaengine_terminate_all(host->dma);
+	if (mxcmci_use_dma(host))
 		dma_unmap_sg(host->dma->device->dev, data->sg, data->sg_len,
 				host->dma_dir);
-	}
 
 	if (stat & STATUS_ERR_MASK) {
 		dev_dbg(mmc_dev(host->mmc), "request failed. status: 0x%08x\n",
@@ -624,8 +653,18 @@  static irqreturn_t mxcmci_irq(int irq, void *devid)
 		mxcmci_cmd_done(host, stat);
 
 	if (mxcmci_use_dma(host) &&
-		  (stat & (STATUS_DATA_TRANS_DONE | STATUS_WRITE_OP_DONE)))
-		mxcmci_data_done(host, stat);
+		  (stat & (STATUS_DATA_TRANS_DONE | STATUS_WRITE_OP_DONE))) {
+		spin_lock_irqsave(&host->irqlock, flags);
+		host->mmcirq = true;
+		if (host->dmairq) {
+			del_timer(&host->watchdog);
+			host->mmcirq = false;
+			host->dmairq = false;
+			mxcmci_data_done(host, stat);
+		}
+		spin_unlock_irqrestore(&host->irqlock, flags);
+
+	}
 
 	if (host->default_irq_mask &&
 		  (stat & (STATUS_CARD_INSERTION | STATUS_CARD_REMOVAL)))
@@ -844,6 +883,48 @@  static const struct mmc_host_ops mxcmci_ops = {
 	.init_card		= mxcmci_init_card,
 };
 
+static void mxcmci_watchdog(unsigned long data)
+{
+	struct mmc_host *mmc = (struct mmc_host *)data;
+	struct mxcmci_host *host = mmc_priv(mmc);
+	struct mmc_request *req = host->req;
+	unsigned int stat = readl(host->base + MMC_REG_STATUS);
+	unsigned long flags;
+
+	dev_err(mmc_dev(host->mmc), "%s: mmc status = 0x%08x\n", __func__, stat);
+
+	spin_lock_irqsave(&host->irqlock, flags);
+	/* Protect agains spurius triggers */
+	if (host->dmairq && host->mmcirq) {
+		dev_err(mmc_dev(host->mmc), "%s: spurius timeout\n", __func__);
+		host->mmcirq = false;
+		host->dmairq = false;
+		return;
+	}
+
+	if (!host->dmairq) {
+		dev_err(mmc_dev(host->mmc), "%s: irq from DMA didn't arrive\n", __func__);
+		dmaengine_terminate_all(host->dma);
+	}
+	if (!host->mmcirq) {
+		dev_err(mmc_dev(host->mmc), "%s: irq from MMC didn't arrive\n", __func__);
+		mxcmci_softreset(host);
+	}
+
+	host->mmcirq = false;
+	host->dmairq = false;
+	spin_unlock_irqrestore(&host->irqlock, flags);
+
+	/* Mark transfer as erroneus and inform the upper layers */
+	host->data->error = -ETIMEDOUT;
+
+	host->req = NULL;
+	host->cmd = NULL;
+	host->data = NULL;
+
+	mmc_request_done(host->mmc, req);
+}
+
 static int mxcmci_probe(struct platform_device *pdev)
 {
 	struct mmc_host *mmc;
@@ -889,6 +970,7 @@  static int mxcmci_probe(struct platform_device *pdev)
 	host->mmc = mmc;
 	host->pdata = pdev->dev.platform_data;
 	spin_lock_init(&host->lock);
+	spin_lock_init(&host->irqlock);
 
 	mxcmci_init_ocr(host);
 
@@ -968,6 +1050,11 @@  static int mxcmci_probe(struct platform_device *pdev)
 
 	mmc_add_host(mmc);
 
+	/* DMA watchdog */
+	init_timer(&host->watchdog);
+	host->watchdog.function = &mxcmci_watchdog;
+	host->watchdog.data = (unsigned long)mmc;
+
 	return 0;
 
 out_free_irq: