@@ -504,6 +504,9 @@ struct dma_pl330_desc {
enum desc_status status;
+ int bytes_requested;
+ bool last;
+
/* The channel which currently holds this desc */
struct dma_pl330_chan *pchan;
@@ -2167,11 +2170,74 @@ static void pl330_free_chan_resources(struct dma_chan *chan)
pm_runtime_put_autosuspend(pch->dmac->ddma.dev);
}
+int pl330_get_current_xferred_count(struct dma_pl330_chan *pch,
+ struct dma_pl330_desc *desc)
+{
+ struct pl330_thread *thrd = pch->thread;
+ struct pl330_dmac *pl330 = pch->dmac;
+ void __iomem *regs = thrd->dmac->base;
+ u32 val, addr;
+
+ pm_runtime_get_sync(pl330->ddma.dev);
+ val = addr = 0;
+ if (desc->rqcfg.src_inc) {
+ val = readl(regs + SA(thrd->id));
+ addr = desc->px.src_addr;
+ } else {
+ val = readl(regs + DA(thrd->id));
+ addr = desc->px.dst_addr;
+ }
+ pm_runtime_mark_last_busy(pch->dmac->ddma.dev);
+ pm_runtime_put_autosuspend(pl330->ddma.dev);
+ return val - addr;
+}
+
static enum dma_status
pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
struct dma_tx_state *txstate)
{
- return dma_cookie_status(chan, cookie, txstate);
+ enum dma_status ret;
+ unsigned long flags;
+ struct dma_pl330_desc *desc, *running = NULL;
+ struct dma_pl330_chan *pch = to_pchan(chan);
+ unsigned int transferred, residual = 0;
+
+ ret = dma_cookie_status(chan, cookie, txstate);
+
+ if (!txstate)
+ return ret;
+
+ if (ret == DMA_COMPLETE)
+ goto out;
+
+ spin_lock_irqsave(&pch->lock, flags);
+
+ if (pch->thread->req_running != -1)
+ running = pch->thread->req[pch->thread->req_running].desc;
+
+ /* Check in pending list */
+ list_for_each_entry(desc, &pch->work_list, node) {
+ if (desc->status == DONE)
+ transferred = desc->bytes_requested;
+ else if (running && desc == running)
+ transferred =
+ pl330_get_current_xferred_count(pch, desc);
+ else
+ transferred = 0;
+ residual += desc->bytes_requested - transferred;
+ if (desc->txd.cookie == cookie) {
+ ret = desc->status;
+ break;
+ }
+ if (desc->last)
+ residual = 0;
+ }
+ spin_unlock_irqrestore(&pch->lock, flags);
+
+out:
+ dma_set_residue(txstate, residual);
+
+ return ret;
}
static void pl330_issue_pending(struct dma_chan *chan)
@@ -2216,12 +2282,14 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
desc->txd.callback = last->txd.callback;
desc->txd.callback_param = last->txd.callback_param;
}
+ last->last = false;
dma_cookie_assign(&desc->txd);
list_move_tail(&desc->node, &pch->submitted_list);
}
+ last->last = true;
cookie = dma_cookie_assign(&last->txd);
list_add_tail(&last->node, &pch->submitted_list);
spin_unlock_irqrestore(&pch->lock, flags);
@@ -2444,6 +2512,7 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
desc->rqtype = direction;
desc->rqcfg.brst_size = pch->burst_sz;
desc->rqcfg.brst_len = 1;
+ desc->bytes_requested = period_len;
fill_px(&desc->px, dst, src, period_len);
if (!first)
@@ -2586,6 +2655,7 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
desc->rqcfg.brst_size = pch->burst_sz;
desc->rqcfg.brst_len = 1;
desc->rqtype = direction;
+ desc->bytes_requested = sg_dma_len(sg);
}
/* Return the last desc in the chain */
@@ -2771,7 +2841,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
pd->src_addr_widths = PL330_DMA_BUSWIDTHS;
pd->dst_addr_widths = PL330_DMA_BUSWIDTHS;
pd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
- pd->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
+ pd->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
ret = dma_async_device_register(pd);
if (ret) {