From patchwork Tue Sep 5 08:16:38 2017 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Lars-Peter Clausen X-Patchwork-Id: 9938175 Return-Path: Received: from mail.wl.linuxfoundation.org (pdx-wl-mail.web.codeaurora.org [172.30.200.125]) by pdx-korg-patchwork.web.codeaurora.org (Postfix) with ESMTP id 649AA603F9 for ; Tue, 5 Sep 2017 08:17:23 +0000 (UTC) Received: from mail.wl.linuxfoundation.org (localhost [127.0.0.1]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id 55A8F288EB for ; Tue, 5 Sep 2017 08:17:23 +0000 (UTC) Received: by mail.wl.linuxfoundation.org (Postfix, from userid 486) id 4A55428901; Tue, 5 Sep 2017 08:17:23 +0000 (UTC) X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on pdx-wl-mail.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-6.9 required=2.0 tests=BAYES_00,RCVD_IN_DNSWL_HI autolearn=ham version=3.3.1 Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id 78A3E288EB for ; Tue, 5 Sep 2017 08:17:22 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1751296AbdIEIQt (ORCPT ); Tue, 5 Sep 2017 04:16:49 -0400 Received: from www381.your-server.de ([78.46.137.84]:45823 "EHLO www381.your-server.de" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1751212AbdIEIQs (ORCPT ); Tue, 5 Sep 2017 04:16:48 -0400 Received: from [78.47.166.52] (helo=sslproxy04.your-server.de) by www381.your-server.de with esmtpsa (TLSv1.2:DHE-RSA-AES256-GCM-SHA384:256) (Exim 4.85_2) (envelope-from ) id 1dp92L-0001V4-Oq; Tue, 05 Sep 2017 10:16:45 +0200 Received: from [2003:86:2c30:3400:8200:bff:fe9b:6612] (helo=lars-laptop.ad.analog.com) by sslproxy04.your-server.de with esmtpsa (TLSv1.2:DHE-RSA-AES128-GCM-SHA256:128) (Exim 4.84_2) (envelope-from ) id 1dp92L-0002uh-Io; Tue, 05 Sep 2017 10:16:45 +0200 From: Lars-Peter Clausen To: Vinod Koul Cc: dmaengine@vger.kernel.org, Lars-Peter Clausen Subject: [PATCH 2/2] dma: axi-dmac: Fix software cyclic mode Date: Tue, 5 Sep 2017 10:16:38 +0200 Message-Id: <20170905081638.5117-2-lars@metafoo.de> X-Mailer: git-send-email 2.11.0 In-Reply-To: <20170905081638.5117-1-lars@metafoo.de> References: <20170905081638.5117-1-lars@metafoo.de> X-Authenticated-Sender: lars@metafoo.de X-Virus-Scanned: Clear (ClamAV 0.99.2/23774/Tue Sep 5 06:34:50 2017) Sender: dmaengine-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: dmaengine@vger.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP When running in software cyclic mode the driver currently does not go back to the first segment once the last segment has been reached. Effectively making the transfer non-cyclic. Fix this by going back to the first segment once the last segment has been reached for cyclic transfers. Special care need to be taken to avoid a segment from being submitted multiple times concurrently, which could happen for transfers with a number of segments that is smaller than the DMA controller's internal queue. Signed-off-by: Lars-Peter Clausen --- drivers/dma/dma-axi-dmac.c | 69 ++++++++++++++++++++++++++++++++++------------ 1 file changed, 51 insertions(+), 18 deletions(-) diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c index eb289aa187dd..2419fe524daa 100644 --- a/drivers/dma/dma-axi-dmac.c +++ b/drivers/dma/dma-axi-dmac.c @@ -72,6 +72,9 @@ #define AXI_DMAC_FLAG_CYCLIC BIT(0) +/* The maximum ID allocated by the hardware is 31 */ +#define AXI_DMAC_SG_UNUSED 32U + struct axi_dmac_sg { dma_addr_t src_addr; dma_addr_t dest_addr; @@ -80,6 +83,7 @@ struct axi_dmac_sg { unsigned int dest_stride; unsigned int src_stride; unsigned int id; + bool schedule_when_free; }; struct axi_dmac_desc { @@ -200,11 +204,21 @@ static void axi_dmac_start_transfer(struct axi_dmac_chan *chan) } sg = &desc->sg[desc->num_submitted]; + /* Already queued in cyclic mode. Wait for it to finish */ + if (sg->id != AXI_DMAC_SG_UNUSED) { + sg->schedule_when_free = true; + return; + } + desc->num_submitted++; - if (desc->num_submitted == desc->num_sgs) - chan->next_desc = NULL; - else + if (desc->num_submitted == desc->num_sgs) { + if (desc->cyclic) + desc->num_submitted = 0; /* Start again */ + else + chan->next_desc = NULL; + } else { chan->next_desc = desc; + } sg->id = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_ID); @@ -239,37 +253,52 @@ static struct axi_dmac_desc *axi_dmac_active_desc(struct axi_dmac_chan *chan) struct axi_dmac_desc, vdesc.node); } -static void axi_dmac_transfer_done(struct axi_dmac_chan *chan, +static bool axi_dmac_transfer_done(struct axi_dmac_chan *chan, unsigned int completed_transfers) { struct axi_dmac_desc *active; struct axi_dmac_sg *sg; + bool start_next = false; active = axi_dmac_active_desc(chan); if (!active) - return; + return false; - if (active->cyclic) { - vchan_cyclic_callback(&active->vdesc); - } else { - do { - sg = &active->sg[active->num_completed]; - if (!(BIT(sg->id) & completed_transfers)) - break; - active->num_completed++; - if (active->num_completed == active->num_sgs) { + do { + sg = &active->sg[active->num_completed]; + if (sg->id == AXI_DMAC_SG_UNUSED) /* Not yet submitted */ + break; + if (!(BIT(sg->id) & completed_transfers)) + break; + active->num_completed++; + sg->id = AXI_DMAC_SG_UNUSED; + if (sg->schedule_when_free) { + sg->schedule_when_free = false; + start_next = true; + } + + if (active->cyclic) + vchan_cyclic_callback(&active->vdesc); + + if (active->num_completed == active->num_sgs) { + if (active->cyclic) { + active->num_completed = 0; /* wrap around */ + } else { list_del(&active->vdesc.node); vchan_cookie_complete(&active->vdesc); active = axi_dmac_active_desc(chan); } - } while (active); - } + } + } while (active); + + return start_next; } static irqreturn_t axi_dmac_interrupt_handler(int irq, void *devid) { struct axi_dmac *dmac = devid; unsigned int pending; + bool start_next = false; pending = axi_dmac_read(dmac, AXI_DMAC_REG_IRQ_PENDING); if (!pending) @@ -283,10 +312,10 @@ static irqreturn_t axi_dmac_interrupt_handler(int irq, void *devid) unsigned int completed; completed = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_DONE); - axi_dmac_transfer_done(&dmac->chan, completed); + start_next = axi_dmac_transfer_done(&dmac->chan, completed); } /* Space has become available in the descriptor queue */ - if (pending & AXI_DMAC_IRQ_SOT) + if ((pending & AXI_DMAC_IRQ_SOT) || start_next) axi_dmac_start_transfer(&dmac->chan); spin_unlock(&dmac->chan.vchan.lock); @@ -336,12 +365,16 @@ static void axi_dmac_issue_pending(struct dma_chan *c) static struct axi_dmac_desc *axi_dmac_alloc_desc(unsigned int num_sgs) { struct axi_dmac_desc *desc; + unsigned int i; desc = kzalloc(sizeof(struct axi_dmac_desc) + sizeof(struct axi_dmac_sg) * num_sgs, GFP_NOWAIT); if (!desc) return NULL; + for (i = 0; i < num_sgs; i++) + desc->sg[i].id = AXI_DMAC_SG_UNUSED; + desc->num_sgs = num_sgs; return desc;