From patchwork Wed Oct 22 15:43:40 2014 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Maxime Ripard X-Patchwork-Id: 5134571 Return-Path: X-Original-To: patchwork-dmaengine@patchwork.kernel.org Delivered-To: patchwork-parsemail@patchwork1.web.kernel.org Received: from mail.kernel.org (mail.kernel.org [198.145.19.201]) by patchwork1.web.kernel.org (Postfix) with ESMTP id BEC439F349 for ; Wed, 22 Oct 2014 16:01:22 +0000 (UTC) Received: from mail.kernel.org (localhost [127.0.0.1]) by mail.kernel.org (Postfix) with ESMTP id 885C820121 for ; Wed, 22 Oct 2014 16:01:21 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id 2F5A2200E8 for ; Wed, 22 Oct 2014 16:01:20 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1755120AbaJVQBB (ORCPT ); Wed, 22 Oct 2014 12:01:01 -0400 Received: from down.free-electrons.com ([37.187.137.238]:54089 "EHLO mail.free-electrons.com" rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP id S1754796AbaJVPpr (ORCPT ); Wed, 22 Oct 2014 11:45:47 -0400 Received: by mail.free-electrons.com (Postfix, from userid 106) id DC52D501A; Wed, 22 Oct 2014 17:45:46 +0200 (CEST) X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on mail.kernel.org X-Spam-Level: X-Spam-Status: No, score=-8.3 required=5.0 tests=BAYES_00, RCVD_IN_DNSWL_HI, RP_MATCHES_RCVD, UNPARSEABLE_RELAY autolearn=unavailable version=3.3.1 Received: from localhost (col31-4-88-188-83-94.fbx.proxad.net [88.188.83.94]) by mail.free-electrons.com (Postfix) with ESMTPSA id 2D0FC500A; Wed, 22 Oct 2014 17:45:46 +0200 (CEST) From: Maxime Ripard To: dmaengine@vger.kernel.org, Vinod Koul Cc: linux-arm-kernel@lists.infradead.org, linux-kernel@vger.kernel.org, Laurent Pinchart , =?UTF-8?q?Antoine=20T=C3=A9nart?= , lars@metafoo.de, Russell King , Maxime Ripard , Dan Williams Subject: [PATCH v3 26/59] dmaengine: k3: Split device_control Date: Wed, 22 Oct 2014 17:43:40 +0200 Message-Id: <1413992653-21963-27-git-send-email-maxime.ripard@free-electrons.com> X-Mailer: git-send-email 2.1.1 In-Reply-To: <1413992653-21963-1-git-send-email-maxime.ripard@free-electrons.com> References: <1413992653-21963-1-git-send-email-maxime.ripard@free-electrons.com> Sender: dmaengine-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: dmaengine@vger.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP Split the device_control callback of the Hisilicon K3 DMA driver to make use of the newly introduced callbacks, that will eventually be used to retrieve slave capabilities. Signed-off-by: Maxime Ripard --- drivers/dma/k3dma.c | 197 ++++++++++++++++++++++++++++------------------------ 1 file changed, 107 insertions(+), 90 deletions(-) diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c index a1f911aaf220..fb4dd2cffd80 100644 --- a/drivers/dma/k3dma.c +++ b/drivers/dma/k3dma.c @@ -441,7 +441,7 @@ static struct dma_async_tx_descriptor *k3_dma_prep_memcpy( num = 0; if (!c->ccfg) { - /* default is memtomem, without calling device_control */ + /* default is memtomem, without calling device_config */ c->ccfg = CX_CFG_SRCINCR | CX_CFG_DSTINCR | CX_CFG_EN; c->ccfg |= (0xf << 20) | (0xf << 24); /* burst = 16 */ c->ccfg |= (0x3 << 12) | (0x3 << 16); /* width = 64 bit */ @@ -523,112 +523,126 @@ static struct dma_async_tx_descriptor *k3_dma_prep_slave_sg( return vchan_tx_prep(&c->vc, &ds->vd, flags); } -static int k3_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, - unsigned long arg) +static int k3_dma_config(struct dma_chan *chan, + struct dma_slave_config *cfg) +{ + struct k3_dma_chan *c = to_k3_chan(chan); + u32 maxburst = 0, val = 0; + enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED; + + if (cfg == NULL) + return -EINVAL; + c->dir = cfg->direction; + if (c->dir == DMA_DEV_TO_MEM) { + c->ccfg = CX_CFG_DSTINCR; + c->dev_addr = cfg->src_addr; + maxburst = cfg->src_maxburst; + width = cfg->src_addr_width; + } else if (c->dir == DMA_MEM_TO_DEV) { + c->ccfg = CX_CFG_SRCINCR; + c->dev_addr = cfg->dst_addr; + maxburst = cfg->dst_maxburst; + width = cfg->dst_addr_width; + } + switch (width) { + case DMA_SLAVE_BUSWIDTH_1_BYTE: + case DMA_SLAVE_BUSWIDTH_2_BYTES: + case DMA_SLAVE_BUSWIDTH_4_BYTES: + case DMA_SLAVE_BUSWIDTH_8_BYTES: + val = __ffs(width); + break; + default: + val = 3; + break; + } + c->ccfg |= (val << 12) | (val << 16); + + if ((maxburst == 0) || (maxburst > 16)) + val = 16; + else + val = maxburst - 1; + c->ccfg |= (val << 20) | (val << 24); + c->ccfg |= CX_CFG_MEM2PER | CX_CFG_EN; + + /* specific request line */ + c->ccfg |= c->vc.chan.chan_id << 4; + + return 0; +} + +static int k3_dma_terminate_all(struct dma_chan *chan) { struct k3_dma_chan *c = to_k3_chan(chan); struct k3_dma_dev *d = to_k3_dma(chan->device); - struct dma_slave_config *cfg = (void *)arg; struct k3_dma_phy *p = c->phy; unsigned long flags; - u32 maxburst = 0, val = 0; - enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED; LIST_HEAD(head); - switch (cmd) { - case DMA_SLAVE_CONFIG: - if (cfg == NULL) - return -EINVAL; - c->dir = cfg->direction; - if (c->dir == DMA_DEV_TO_MEM) { - c->ccfg = CX_CFG_DSTINCR; - c->dev_addr = cfg->src_addr; - maxburst = cfg->src_maxburst; - width = cfg->src_addr_width; - } else if (c->dir == DMA_MEM_TO_DEV) { - c->ccfg = CX_CFG_SRCINCR; - c->dev_addr = cfg->dst_addr; - maxburst = cfg->dst_maxburst; - width = cfg->dst_addr_width; - } - switch (width) { - case DMA_SLAVE_BUSWIDTH_1_BYTE: - case DMA_SLAVE_BUSWIDTH_2_BYTES: - case DMA_SLAVE_BUSWIDTH_4_BYTES: - case DMA_SLAVE_BUSWIDTH_8_BYTES: - val = __ffs(width); - break; - default: - val = 3; - break; - } - c->ccfg |= (val << 12) | (val << 16); + dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc); - if ((maxburst == 0) || (maxburst > 16)) - val = 16; - else - val = maxburst - 1; - c->ccfg |= (val << 20) | (val << 24); - c->ccfg |= CX_CFG_MEM2PER | CX_CFG_EN; + /* Prevent this channel being scheduled */ + spin_lock(&d->lock); + list_del_init(&c->node); + spin_unlock(&d->lock); - /* specific request line */ - c->ccfg |= c->vc.chan.chan_id << 4; - break; + /* Clear the tx descriptor lists */ + spin_lock_irqsave(&c->vc.lock, flags); + vchan_get_all_descriptors(&c->vc, &head); + if (p) { + /* vchan is assigned to a pchan - stop the channel */ + k3_dma_terminate_chan(p, d); + c->phy = NULL; + p->vchan = NULL; + p->ds_run = p->ds_done = NULL; + } + spin_unlock_irqrestore(&c->vc.lock, flags); + vchan_dma_desc_free_list(&c->vc, &head); - case DMA_TERMINATE_ALL: - dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc); + return 0; +} - /* Prevent this channel being scheduled */ - spin_lock(&d->lock); - list_del_init(&c->node); - spin_unlock(&d->lock); +static int k3_dma_pause(struct dma_chan *chan) +{ + struct k3_dma_chan *c = to_k3_chan(chan); + struct k3_dma_dev *d = to_k3_dma(chan->device); + struct k3_dma_phy *p = c->phy; - /* Clear the tx descriptor lists */ - spin_lock_irqsave(&c->vc.lock, flags); - vchan_get_all_descriptors(&c->vc, &head); + dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc); + if (c->status == DMA_IN_PROGRESS) { + c->status = DMA_PAUSED; if (p) { - /* vchan is assigned to a pchan - stop the channel */ - k3_dma_terminate_chan(p, d); - c->phy = NULL; - p->vchan = NULL; - p->ds_run = p->ds_done = NULL; + k3_dma_pause_dma(p, false); + } else { + spin_lock(&d->lock); + list_del_init(&c->node); + spin_unlock(&d->lock); } - spin_unlock_irqrestore(&c->vc.lock, flags); - vchan_dma_desc_free_list(&c->vc, &head); - break; + } - case DMA_PAUSE: - dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc); - if (c->status == DMA_IN_PROGRESS) { - c->status = DMA_PAUSED; - if (p) { - k3_dma_pause_dma(p, false); - } else { - spin_lock(&d->lock); - list_del_init(&c->node); - spin_unlock(&d->lock); - } - } - break; + return 0; +} - case DMA_RESUME: - dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc); - spin_lock_irqsave(&c->vc.lock, flags); - if (c->status == DMA_PAUSED) { - c->status = DMA_IN_PROGRESS; - if (p) { - k3_dma_pause_dma(p, true); - } else if (!list_empty(&c->vc.desc_issued)) { - spin_lock(&d->lock); - list_add_tail(&c->node, &d->chan_pending); - spin_unlock(&d->lock); - } +static int k3_dma_resume(struct dma_chan *chan) +{ + struct k3_dma_chan *c = to_k3_chan(chan); + struct k3_dma_dev *d = to_k3_dma(chan->device); + struct k3_dma_phy *p = c->phy; + unsigned long flags; + + dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc); + spin_lock_irqsave(&c->vc.lock, flags); + if (c->status == DMA_PAUSED) { + c->status = DMA_IN_PROGRESS; + if (p) { + k3_dma_pause_dma(p, true); + } else if (!list_empty(&c->vc.desc_issued)) { + spin_lock(&d->lock); + list_add_tail(&c->node, &d->chan_pending); + spin_unlock(&d->lock); } - spin_unlock_irqrestore(&c->vc.lock, flags); - break; - default: - return -ENXIO; } + spin_unlock_irqrestore(&c->vc.lock, flags); + return 0; } @@ -720,7 +734,10 @@ static int k3_dma_probe(struct platform_device *op) d->slave.device_prep_dma_memcpy = k3_dma_prep_memcpy; d->slave.device_prep_slave_sg = k3_dma_prep_slave_sg; d->slave.device_issue_pending = k3_dma_issue_pending; - d->slave.device_control = k3_dma_control; + d->slave.device_config = k3_dma_config; + d->slave.device_pause = k3_dma_pause; + d->slave.device_resume = k3_dma_resume; + d->slave.device_terminate_all = k3_dma_terminate_all; d->slave.copy_align = DMA_ALIGN; d->slave.chancnt = d->dma_requests;