diff mbox

[2/2] mmc: tmio: use PIO for short transfers

Message ID Pine.LNX.4.64.1103071429420.29543@axis700.grange (mailing list archive)
State New, archived
Headers show

Commit Message

Guennadi Liakhovetski March 7, 2011, 1:30 p.m. UTC
None
diff mbox

Patch

diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index aa384ba..f5ec5e6 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -100,6 +100,8 @@ 
 		TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT)
 #define TMIO_MASK_IRQ     (TMIO_MASK_READOP | TMIO_MASK_WRITEOP | TMIO_MASK_CMD)
 
+#define TMIO_MIN_DMA_LEN 8
+
 #define enable_mmc_irqs(host, i) \
 	do { \
 		u32 mask;\
@@ -147,6 +149,7 @@  struct tmio_mmc_host {
 	struct platform_device *pdev;
 
 	/* DMA support */
+	bool			force_pio;
 	struct dma_chan		*chan_rx;
 	struct dma_chan		*chan_tx;
 	struct tasklet_struct	dma_complete;
@@ -384,6 +387,7 @@  static void tmio_mmc_reset_work(struct work_struct *work)
 	host->cmd = NULL;
 	host->data = NULL;
 	host->mrq = NULL;
+	host->force_pio = false;
 
 	spin_unlock_irqrestore(&host->lock, flags);
 
@@ -403,6 +407,7 @@  tmio_mmc_finish_request(struct tmio_mmc_host *host)
 	host->mrq = NULL;
 	host->cmd = NULL;
 	host->data = NULL;
+	host->force_pio = false;
 
 	cancel_delayed_work(&host->delayed_reset_work);
 
@@ -484,7 +489,7 @@  static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
 	unsigned int count;
 	unsigned long flags;
 
-	if (host->chan_tx || host->chan_rx) {
+	if ((host->chan_tx || host->chan_rx) && !host->force_pio) {
 		pr_err("PIO IRQ in DMA mode!\n");
 		return;
 	} else if (!data) {
@@ -550,15 +555,11 @@  static void tmio_mmc_do_data_irq(struct tmio_mmc_host *host)
 	 */
 
 	if (data->flags & MMC_DATA_READ) {
-		if (!host->chan_rx)
-			disable_mmc_irqs(host, TMIO_MASK_READOP);
-		else
+		if (host->chan_rx && !host->force_pio)
 			tmio_check_bounce_buffer(host);
 		dev_dbg(&host->pdev->dev, "Complete Rx request %p\n",
 			host->mrq);
 	} else {
-		if (!host->chan_tx)
-			disable_mmc_irqs(host, TMIO_MASK_WRITEOP);
 		dev_dbg(&host->pdev->dev, "Complete Tx request %p\n",
 			host->mrq);
 	}
@@ -582,7 +583,7 @@  static void tmio_mmc_data_irq(struct tmio_mmc_host *host)
 	if (!data)
 		goto out;
 
-	if (host->chan_tx && (data->flags & MMC_DATA_WRITE)) {
+	if (host->chan_tx && (data->flags & MMC_DATA_WRITE) && !host->force_pio) {
 		/*
 		 * Has all data been written out yet? Testing on SuperH showed,
 		 * that in most cases the first interrupt comes already with the
@@ -595,11 +596,12 @@  static void tmio_mmc_data_irq(struct tmio_mmc_host *host)
 			disable_mmc_irqs(host, TMIO_STAT_DATAEND);
 			tasklet_schedule(&host->dma_complete);
 		}
-	} else if (host->chan_rx && (data->flags & MMC_DATA_READ)) {
+	} else if (host->chan_rx && (data->flags & MMC_DATA_READ) && !host->force_pio) {
 		disable_mmc_irqs(host, TMIO_STAT_DATAEND);
 		tasklet_schedule(&host->dma_complete);
 	} else {
 		tmio_mmc_do_data_irq(host);
+		disable_mmc_irqs(host, TMIO_MASK_READOP | TMIO_MASK_WRITEOP);
 	}
 out:
 	spin_unlock(&host->lock);
@@ -648,12 +650,12 @@  static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
 	 */
 	if (host->data && !cmd->error) {
 		if (host->data->flags & MMC_DATA_READ) {
-			if (!host->chan_rx)
+			if (host->force_pio || !host->chan_rx)
 				enable_mmc_irqs(host, TMIO_MASK_READOP);
 			else
 				tasklet_schedule(&host->dma_issue);
 		} else {
-			if (!host->chan_tx)
+			if (host->force_pio || !host->chan_tx)
 				enable_mmc_irqs(host, TMIO_MASK_WRITEOP);
 			else
 				tasklet_schedule(&host->dma_issue);
@@ -811,6 +813,11 @@  static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
 		goto pio;
 	}
 
+	if (sg->length < TMIO_MIN_DMA_LEN) {
+		host->force_pio = true;
+		return;
+	}
+
 	disable_mmc_irqs(host, TMIO_STAT_RXRDY);
 
 	/* The only sg element can be unaligned, use our bounce buffer then */
@@ -886,6 +893,11 @@  static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
 		goto pio;
 	}
 
+	if (sg->length < TMIO_MIN_DMA_LEN) {
+		host->force_pio = true;
+		return;
+	}
+
 	disable_mmc_irqs(host, TMIO_STAT_TXRQ);
 
 	/* The only sg element can be unaligned, use our bounce buffer then */
@@ -1132,6 +1144,7 @@  static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
 
 fail:
 	host->mrq = NULL;
+	host->force_pio = false;
 	mrq->cmd->error = ret;
 	mmc_request_done(mmc, mrq);
 }