Message ID | 1416918346-2442-2-git-send-email-r.baldyga@samsung.com (mailing list archive) |
---|---|
State | Rejected |
Headers | show |
On 11/25/2014 01:25 PM, Robert Baldyga wrote: [...] > static enum dma_status > pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie, > struct dma_tx_state *txstate) > { > - return dma_cookie_status(chan, cookie, txstate); > + enum dma_status ret; > + unsigned long flags; > + struct dma_pl330_desc *desc; > + struct dma_pl330_chan *pch = to_pchan(chan); > + unsigned int bytes_transferred; > + unsigned int residual; > + > + /* Check in pending list */ > + spin_lock_irqsave(&pch->lock, flags); > + list_for_each_entry(desc, &pch->work_list, node) { > + if (desc->txd.cookie == cookie) { > + bytes_transferred = > + pl330_get_current_xferred_count(pch, desc); > + residual = desc->bytes_requested - > + bytes_transferred % desc->bytes_requested; > + dma_set_residue(txstate, residual); > + ret = desc->status; > + spin_unlock_irqrestore(&pch->lock, flags); > + return ret; I don't think this has the correct semantics. The expected behavior is that you pass a cookie of a descriptor and tx_status tells you how many bytes for the whole descriptor are still left to be transferred. What you implemented tells you how many bytes are still left for the current segment of the active descriptor. This will only work fine for descriptors that only have one segment. But this will definitely break audio playback using the PL330 where you have more than one segment per descriptor. > + } > + } > + spin_unlock_irqrestore(&pch->lock, flags); > + > + ret = dma_cookie_status(chan, cookie, txstate); > + dma_set_residue(txstate, pch->transfered); > + pch->transfered is always 0? -- To unsubscribe from this list: send the line "unsubscribe dmaengine" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
On 25.11.2014 13:25, Robert Baldyga wrote: > This patch adds possibility to read residue of DMA transfer. It's useful > when we want to know how many bytes have been transfered before we > terminate channel. It can take place, for example, on timeout interrupt. > > Signed-off-by: Lukasz Czerwinski <l.czerwinski@samsung.com> > Signed-off-by: Robert Baldyga <r.baldyga@samsung.com> > --- > drivers/dma/pl330.c | 61 ++++++++++++++++++++++++++++++++++++++++++++++++++++- > 1 file changed, 60 insertions(+), 1 deletion(-) > > diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c > index d5149aa..c32806d 100644 > --- a/drivers/dma/pl330.c > +++ b/drivers/dma/pl330.c > @@ -437,6 +437,7 @@ struct dma_pl330_chan { > /* For D-to-M and M-to-D channels */ > int burst_sz; /* the peripheral fifo width */ > int burst_len; /* the number of burst */ > + int transfered; > dma_addr_t fifo_addr; > > /* for cyclic capability */ > @@ -500,6 +501,9 @@ struct dma_pl330_desc { > > enum desc_status status; > > + int bytes_requested; > + int direction; > + > /* The channel which currently holds this desc */ > struct dma_pl330_chan *pchan; > > @@ -2156,11 +2160,60 @@ static void pl330_free_chan_resources(struct dma_chan *chan) > spin_unlock_irqrestore(&pch->lock, flags); > } > > +int pl330_get_current_xferred_count(struct dma_pl330_chan *pch, > + struct dma_pl330_desc *desc) > +{ > + u32 val, addr; > + struct pl330_thread *thrd = pch->thread; > + void __iomem *regs = thrd->dmac->base; > + > + val = addr = 0; > + switch (desc->direction) { > + case DMA_MEM_TO_DEV: > + val = readl(regs + SA(thrd->id)); > + addr = desc->px.src_addr; > + break; > + case DMA_DEV_TO_MEM: > + val = readl(regs + DA(thrd->id)); > + addr = desc->px.dst_addr; > + break; > + default: > + break; > + } > + return val - addr; > +} > + > static enum dma_status > pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie, > struct dma_tx_state *txstate) > { > - return dma_cookie_status(chan, cookie, txstate); > + enum dma_status ret; > + unsigned long flags; > + struct dma_pl330_desc *desc; > + struct dma_pl330_chan *pch = to_pchan(chan); > + unsigned int bytes_transferred; > + unsigned int residual; > + > + /* Check in pending list */ > + spin_lock_irqsave(&pch->lock, flags); > + list_for_each_entry(desc, &pch->work_list, node) { > + if (desc->txd.cookie == cookie) { > + bytes_transferred = > + pl330_get_current_xferred_count(pch, desc); As in 2nd patch - what if this get called after all transfers complete? You're touching the device here so pm_runtime_get_sync()/put/autosuspend may be needed. Best regards, Krzysztof > + residual = desc->bytes_requested - > + bytes_transferred % desc->bytes_requested; > + dma_set_residue(txstate, residual); > + ret = desc->status; > + spin_unlock_irqrestore(&pch->lock, flags); > + return ret; > + } > + } > + spin_unlock_irqrestore(&pch->lock, flags); > + > + ret = dma_cookie_status(chan, cookie, txstate); > + dma_set_residue(txstate, pch->transfered); > + > + return ret; > } > > static void pl330_issue_pending(struct dma_chan *chan) > @@ -2421,10 +2474,13 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic( > break; > } > > + desc->direction = direction; > desc->rqtype = direction; > desc->rqcfg.brst_size = pch->burst_sz; > desc->rqcfg.brst_len = 1; > + desc->bytes_requested = period_len; > fill_px(&desc->px, dst, src, period_len); > + pch->transfered = 0; > > if (!first) > first = desc; > @@ -2554,9 +2610,12 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, > sg_dma_address(sg), addr, sg_dma_len(sg)); > } > > + desc->direction = direction; > desc->rqcfg.brst_size = pch->burst_sz; > desc->rqcfg.brst_len = 1; > desc->rqtype = direction; > + desc->bytes_requested = sg_dma_len(sg); > + pch->transfered = 0; > } > > /* Return the last desc in the chain */ > -- To unsubscribe from this list: send the line "unsubscribe dmaengine" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
Hi Robert, I tested your patches on exynos5420 peach-pit with audio playback and got some underrun error. I posted a similar patch in the below link. https://patchwork.kernel.org/patch/5384551/ Please do review the same. Thanks Padma > On 25.11.2014 13:25, Robert Baldyga wrote: >> This patch adds possibility to read residue of DMA transfer. It's useful >> when we want to know how many bytes have been transfered before we >> terminate channel. It can take place, for example, on timeout interrupt. >> >> Signed-off-by: Lukasz Czerwinski <l.czerwinski@samsung.com> >> Signed-off-by: Robert Baldyga <r.baldyga@samsung.com> >> --- >> drivers/dma/pl330.c | 61 >> ++++++++++++++++++++++++++++++++++++++++++++++++++++- >> 1 file changed, 60 insertions(+), 1 deletion(-) >> -- To unsubscribe from this list: send the line "unsubscribe dmaengine" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index d5149aa..c32806d 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -437,6 +437,7 @@ struct dma_pl330_chan { /* For D-to-M and M-to-D channels */ int burst_sz; /* the peripheral fifo width */ int burst_len; /* the number of burst */ + int transfered; dma_addr_t fifo_addr; /* for cyclic capability */ @@ -500,6 +501,9 @@ struct dma_pl330_desc { enum desc_status status; + int bytes_requested; + int direction; + /* The channel which currently holds this desc */ struct dma_pl330_chan *pchan; @@ -2156,11 +2160,60 @@ static void pl330_free_chan_resources(struct dma_chan *chan) spin_unlock_irqrestore(&pch->lock, flags); } +int pl330_get_current_xferred_count(struct dma_pl330_chan *pch, + struct dma_pl330_desc *desc) +{ + u32 val, addr; + struct pl330_thread *thrd = pch->thread; + void __iomem *regs = thrd->dmac->base; + + val = addr = 0; + switch (desc->direction) { + case DMA_MEM_TO_DEV: + val = readl(regs + SA(thrd->id)); + addr = desc->px.src_addr; + break; + case DMA_DEV_TO_MEM: + val = readl(regs + DA(thrd->id)); + addr = desc->px.dst_addr; + break; + default: + break; + } + return val - addr; +} + static enum dma_status pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie, struct dma_tx_state *txstate) { - return dma_cookie_status(chan, cookie, txstate); + enum dma_status ret; + unsigned long flags; + struct dma_pl330_desc *desc; + struct dma_pl330_chan *pch = to_pchan(chan); + unsigned int bytes_transferred; + unsigned int residual; + + /* Check in pending list */ + spin_lock_irqsave(&pch->lock, flags); + list_for_each_entry(desc, &pch->work_list, node) { + if (desc->txd.cookie == cookie) { + bytes_transferred = + pl330_get_current_xferred_count(pch, desc); + residual = desc->bytes_requested - + bytes_transferred % desc->bytes_requested; + dma_set_residue(txstate, residual); + ret = desc->status; + spin_unlock_irqrestore(&pch->lock, flags); + return ret; + } + } + spin_unlock_irqrestore(&pch->lock, flags); + + ret = dma_cookie_status(chan, cookie, txstate); + dma_set_residue(txstate, pch->transfered); + + return ret; } static void pl330_issue_pending(struct dma_chan *chan) @@ -2421,10 +2474,13 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic( break; } + desc->direction = direction; desc->rqtype = direction; desc->rqcfg.brst_size = pch->burst_sz; desc->rqcfg.brst_len = 1; + desc->bytes_requested = period_len; fill_px(&desc->px, dst, src, period_len); + pch->transfered = 0; if (!first) first = desc; @@ -2554,9 +2610,12 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, sg_dma_address(sg), addr, sg_dma_len(sg)); } + desc->direction = direction; desc->rqcfg.brst_size = pch->burst_sz; desc->rqcfg.brst_len = 1; desc->rqtype = direction; + desc->bytes_requested = sg_dma_len(sg); + pch->transfered = 0; } /* Return the last desc in the chain */