@@ -29,7 +29,7 @@ dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx)
spin_lock_irqsave(&vc->lock, flags);
cookie = dma_cookie_assign(tx);
- list_move_tail(&vd->node, &vc->desc_submitted);
+ list_add_tail(&vd->node, &vc->desc_submitted);
spin_unlock_irqrestore(&vc->lock, flags);
dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n",
@@ -83,10 +83,8 @@ static void vchan_complete(unsigned long arg)
cb_data = vd->tx.callback_param;
list_del(&vd->node);
- if (async_tx_test_ack(&vd->tx))
- list_add(&vd->node, &vc->desc_allocated);
- else
- vc->desc_free(vd);
+
+ vc->desc_free(vd);
if (cb)
cb(cb_data);
@@ -98,13 +96,9 @@ void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head)
while (!list_empty(head)) {
struct virt_dma_desc *vd = list_first_entry(head,
struct virt_dma_desc, node);
- if (async_tx_test_ack(&vd->tx)) {
- list_move_tail(&vd->node, &vc->desc_allocated);
- } else {
- dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd);
- list_del(&vd->node);
- vc->desc_free(vd);
- }
+ list_del(&vd->node);
+ dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd);
+ vc->desc_free(vd);
}
}
EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list);
@@ -114,7 +108,6 @@ void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev)
dma_cookie_init(&vc->chan);
spin_lock_init(&vc->lock);
- INIT_LIST_HEAD(&vc->desc_allocated);
INIT_LIST_HEAD(&vc->desc_submitted);
INIT_LIST_HEAD(&vc->desc_issued);
INIT_LIST_HEAD(&vc->desc_completed);
@@ -29,7 +29,6 @@ struct virt_dma_chan {
spinlock_t lock;
/* protected by vc.lock */
- struct list_head desc_allocated;
struct list_head desc_submitted;
struct list_head desc_issued;
struct list_head desc_completed;
@@ -56,16 +55,11 @@ static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan
struct virt_dma_desc *vd, unsigned long tx_flags)
{
extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *);
- unsigned long flags;
dma_async_tx_descriptor_init(&vd->tx, &vc->chan);
vd->tx.flags = tx_flags;
vd->tx.tx_submit = vchan_tx_submit;
- spin_lock_irqsave(&vc->lock, flags);
- list_add_tail(&vd->node, &vc->desc_allocated);
- spin_unlock_irqrestore(&vc->lock, flags);
-
return &vd->tx;
}
@@ -128,8 +122,7 @@ static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc)
}
/**
- * vchan_get_all_descriptors - obtain all allocated, submitted and issued
- * descriptors
+ * vchan_get_all_descriptors - obtain all submitted and issued descriptors
* vc: virtual channel to get descriptors from
* head: list of descriptors found
*
@@ -141,7 +134,6 @@ static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc)
static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc,
struct list_head *head)
{
- list_splice_tail_init(&vc->desc_allocated, head);
list_splice_tail_init(&vc->desc_submitted, head);
list_splice_tail_init(&vc->desc_issued, head);
list_splice_tail_init(&vc->desc_completed, head);
@@ -149,14 +141,11 @@ static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc,
static inline void vchan_free_chan_resources(struct virt_dma_chan *vc)
{
- struct virt_dma_desc *vd;
unsigned long flags;
LIST_HEAD(head);
spin_lock_irqsave(&vc->lock, flags);
vchan_get_all_descriptors(vc, &head);
- list_for_each_entry(vd, &head, node)
- async_tx_clear_ack(&vd->tx);
spin_unlock_irqrestore(&vc->lock, flags);
vchan_dma_desc_free_list(vc, &head);