From patchwork Mon Mar 7 13:30:31 2011 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Guennadi Liakhovetski X-Patchwork-Id: 615631 Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by demeter1.kernel.org (8.14.4/8.14.3) with ESMTP id p27DUR0g018542 for ; Mon, 7 Mar 2011 13:30:35 GMT Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1753169Ab1CGNaf (ORCPT ); Mon, 7 Mar 2011 08:30:35 -0500 Received: from moutng.kundenserver.de ([212.227.126.187]:52015 "EHLO moutng.kundenserver.de" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1753127Ab1CGNae (ORCPT ); Mon, 7 Mar 2011 08:30:34 -0500 Received: from axis700.grange (pD9EB8CE6.dip0.t-ipconnect.de [217.235.140.230]) by mrelayeu.kundenserver.de (node=mrbap2) with ESMTP (Nemesis) id 0MaU9p-1PhNx01Y7N-00Kjw1; Mon, 07 Mar 2011 14:30:31 +0100 Received: by axis700.grange (Postfix, from userid 1000) id 19428189B86; Mon, 7 Mar 2011 14:30:31 +0100 (CET) Received: from localhost (localhost [127.0.0.1]) by axis700.grange (Postfix) with ESMTP id 16A98189B85; Mon, 7 Mar 2011 14:30:31 +0100 (CET) Date: Mon, 7 Mar 2011 14:30:31 +0100 (CET) From: Guennadi Liakhovetski X-X-Sender: lyakh@axis700.grange To: linux-sh@vger.kernel.org cc: linux-mmc@vger.kernel.org, Ian Molton , Chris Ball Subject: [PATCH 1/2] mmc: tmio: Improve DMA stability on sh-mobile In-Reply-To: Message-ID: References: MIME-Version: 1.0 X-Provags-ID: V02:K0:0cjlFp7wDjrTC9uVqw/hTo5QtvRmjlDT48D0UMzWgip sDPqcoXqagJJh43O0rSs8nLpv9AtCMsieZKNVfIXD3fldGQiDJ IXiRn4pRkP0nujeFuDKF7HYqxZYHbC2PS96BvMZ2jsjPhIAvu8 jTijpyWI2QLNG9u7KPrzAL1i/N3rpAuGA+Iqf3Buo6EJmsXsr/ 8jliT9hGrgXYChUd7CPgr1EtKnV6uz5o68KvZ4qbnA= Sender: linux-mmc-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-mmc@vger.kernel.org X-Greylist: IP, sender and recipient auto-whitelisted, not delayed by milter-greylist-4.2.6 (demeter1.kernel.org [140.211.167.41]); Mon, 07 Mar 2011 13:30:35 +0000 (UTC) diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c index e3c6ef2..aa384ba 100644 --- a/drivers/mmc/host/tmio_mmc.c +++ b/drivers/mmc/host/tmio_mmc.c @@ -484,7 +484,10 @@ static void tmio_mmc_pio_irq(struct tmio_mmc_host *host) unsigned int count; unsigned long flags; - if (!data) { + if (host->chan_tx || host->chan_rx) { + pr_err("PIO IRQ in DMA mode!\n"); + return; + } else if (!data) { pr_debug("Spurious PIO IRQ\n"); return; } @@ -647,6 +650,8 @@ static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host, if (host->data->flags & MMC_DATA_READ) { if (!host->chan_rx) enable_mmc_irqs(host, TMIO_MASK_READOP); + else + tasklet_schedule(&host->dma_issue); } else { if (!host->chan_tx) enable_mmc_irqs(host, TMIO_MASK_WRITEOP); @@ -779,18 +784,6 @@ static void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable) #endif } -static void tmio_dma_complete(void *arg) -{ - struct tmio_mmc_host *host = arg; - - dev_dbg(&host->pdev->dev, "Command completed\n"); - - if (!host->data) - dev_warn(&host->pdev->dev, "NULL data in DMA completion!\n"); - else - enable_mmc_irqs(host, TMIO_STAT_DATAEND); -} - static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host) { struct scatterlist *sg = host->sg_ptr, *sg_tmp; @@ -818,6 +811,8 @@ static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host) goto pio; } + disable_mmc_irqs(host, TMIO_STAT_RXRDY); + /* The only sg element can be unaligned, use our bounce buffer then */ if (!aligned) { sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length); @@ -829,18 +824,14 @@ static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host) if (ret > 0) { host->dma_sglen = ret; desc = chan->device->device_prep_slave_sg(chan, sg, ret, - DMA_FROM_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + DMA_FROM_DEVICE, DMA_CTRL_ACK); } if (desc) { - desc->callback = tmio_dma_complete; - desc->callback_param = host; cookie = desc->tx_submit(desc); if (cookie < 0) { desc = NULL; ret = cookie; - } else { - chan->device->device_issue_pending(chan); } } dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", @@ -895,6 +886,8 @@ static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host) goto pio; } + disable_mmc_irqs(host, TMIO_STAT_TXRQ); + /* The only sg element can be unaligned, use our bounce buffer then */ if (!aligned) { unsigned long flags; @@ -910,12 +903,10 @@ static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host) if (ret > 0) { host->dma_sglen = ret; desc = chan->device->device_prep_slave_sg(chan, sg, ret, - DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + DMA_TO_DEVICE, DMA_CTRL_ACK); } if (desc) { - desc->callback = tmio_dma_complete; - desc->callback_param = host; cookie = desc->tx_submit(desc); if (cookie < 0) { desc = NULL; @@ -962,17 +953,30 @@ static void tmio_mmc_start_dma(struct tmio_mmc_host *host, static void tmio_issue_tasklet_fn(unsigned long priv) { struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv; - struct dma_chan *chan = host->chan_tx; + struct dma_chan *chan = NULL; + + spin_lock_irq(&host->lock); + + if (host && host->data) { + if (host->data->flags & MMC_DATA_READ) + chan = host->chan_rx; + else + chan = host->chan_tx; + } + + spin_unlock_irq(&host->lock); + + enable_mmc_irqs(host, TMIO_STAT_DATAEND); - chan->device->device_issue_pending(chan); + if (chan) + chan->device->device_issue_pending(chan); } static void tmio_tasklet_fn(unsigned long arg) { struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg; - unsigned long flags; - spin_lock_irqsave(&host->lock, flags); + spin_lock_irq(&host->lock); if (!host->data) goto out; @@ -986,7 +990,7 @@ static void tmio_tasklet_fn(unsigned long arg) tmio_mmc_do_data_irq(host); out: - spin_unlock_irqrestore(&host->lock, flags); + spin_unlock_irq(&host->lock); } /* It might be necessary to make filter MFD specific */