diff mbox

[v2,2/2] dmaengine: dw: fix cyclic transfer callbacks

Message ID 1452517469-26922-3-git-send-email-mans@mansr.com (mailing list archive)
State Accepted
Headers show

Commit Message

Måns Rullgård Jan. 11, 2016, 1:04 p.m. UTC
Cyclic transfer callbacks rely on block completion interrupts which were
disabled in commit ff7b05f29fd4 ("dmaengine/dw_dmac: Don't handle block
interrupts").  This re-enables block interrupts so the cyclic callbacks
can work.  Other transfer types are not affected as they set the INT_EN
bit only on the last block.

Fixes: ff7b05f29fd4 ("dmaengine/dw_dmac: Don't handle block interrupts")
Signed-off-by: Mans Rullgard <mans@mansr.com>
---
Changes:
- new patch
---
 drivers/dma/dw/core.c | 21 +++++++++++++++------
 1 file changed, 15 insertions(+), 6 deletions(-)

Comments

Viresh Kumar Jan. 11, 2016, 2:10 p.m. UTC | #1
On 11-01-16, 13:04, Mans Rullgard wrote:
> Cyclic transfer callbacks rely on block completion interrupts which were
> disabled in commit ff7b05f29fd4 ("dmaengine/dw_dmac: Don't handle block
> interrupts").  This re-enables block interrupts so the cyclic callbacks
> can work.  Other transfer types are not affected as they set the INT_EN
> bit only on the last block.
> 
> Fixes: ff7b05f29fd4 ("dmaengine/dw_dmac: Don't handle block interrupts")
> Signed-off-by: Mans Rullgard <mans@mansr.com>
> ---
> Changes:
> - new patch
> ---
>  drivers/dma/dw/core.c | 21 +++++++++++++++------
>  1 file changed, 15 insertions(+), 6 deletions(-)

Reviewed-by: Viresh Kumar <viresh.kumar@linaro.org>

BTW, Shouldn't you mark these for stable trees as well ? :)
Måns Rullgård Jan. 11, 2016, 2:14 p.m. UTC | #2
Viresh Kumar <viresh.kumar@linaro.org> writes:

> On 11-01-16, 13:04, Mans Rullgard wrote:
>> Cyclic transfer callbacks rely on block completion interrupts which were
>> disabled in commit ff7b05f29fd4 ("dmaengine/dw_dmac: Don't handle block
>> interrupts").  This re-enables block interrupts so the cyclic callbacks
>> can work.  Other transfer types are not affected as they set the INT_EN
>> bit only on the last block.
>> 
>> Fixes: ff7b05f29fd4 ("dmaengine/dw_dmac: Don't handle block interrupts")
>> Signed-off-by: Mans Rullgard <mans@mansr.com>
>> ---
>> Changes:
>> - new patch
>> ---
>>  drivers/dma/dw/core.c | 21 +++++++++++++++------
>>  1 file changed, 15 insertions(+), 6 deletions(-)
>
> Reviewed-by: Viresh Kumar <viresh.kumar@linaro.org>
>
> BTW, Shouldn't you mark these for stable trees as well ? :)

Probably.  Some maintainers prefer to do that themselves, but apparently
you're not one of them.
Andy Shevchenko Jan. 11, 2016, 3:04 p.m. UTC | #3
On Mon, 2016-01-11 at 13:04 +0000, Mans Rullgard wrote:
> Cyclic transfer callbacks rely on block completion interrupts which
> were
> disabled in commit ff7b05f29fd4 ("dmaengine/dw_dmac: Don't handle
> block
> interrupts").  This re-enables block interrupts so the cyclic
> callbacks
> can work.  Other transfer types are not affected as they set the
> INT_EN
> bit only on the last block.
> 
> Fixes: ff7b05f29fd4 ("dmaengine/dw_dmac: Don't handle block
> interrupts")
> Signed-off-by: Mans Rullgard <mans@mansr.com>

How did you test that?

From my understanding the custom stuff that does cyclic interrupts
prepares a set of descriptors per period, which at the end of transfer
will generate XFER interrupt. Next period will go in the same way.

Maybe I missed something.

> ---
> Changes:
> - new patch
> ---
>  drivers/dma/dw/core.c | 21 +++++++++++++++------
>  1 file changed, 15 insertions(+), 6 deletions(-)
> 
> diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
> index af2b92f8501e..b92662722404 100644
> --- a/drivers/dma/dw/core.c
> +++ b/drivers/dma/dw/core.c
> @@ -156,6 +156,7 @@ static void dwc_initialize(struct dw_dma_chan
> *dwc)
>  
>  	/* Enable interrupts */
>  	channel_set_bit(dw, MASK.XFER, dwc->mask);
> +	channel_set_bit(dw, MASK.BLOCK, dwc->mask);
>  	channel_set_bit(dw, MASK.ERROR, dwc->mask);
>  
>  	dwc->initialized = true;
> @@ -536,16 +537,17 @@ EXPORT_SYMBOL(dw_dma_get_dst_addr);
>  
>  /* Called with dwc->lock held and all DMAC interrupts disabled */
>  static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan
> *dwc,
> -		u32 status_err, u32 status_xfer)
> +		u32 status_block, u32 status_err, u32 status_xfer)
>  {
>  	unsigned long flags;
>  
> -	if (dwc->mask) {
> +	if (status_block & dwc->mask) {
>  		void (*callback)(void *param);
>  		void *callback_param;
>  
>  		dev_vdbg(chan2dev(&dwc->chan), "new cyclic period
> llp 0x%08x\n",
>  				channel_readl(dwc, LLP));
> +		dma_writel(dw, CLEAR.BLOCK, dwc->mask);
>  
>  		callback = dwc->cdesc->period_callback;
>  		callback_param = dwc->cdesc->period_callback_param;
> @@ -577,6 +579,7 @@ static void dwc_handle_cyclic(struct dw_dma *dw,
> struct dw_dma_chan *dwc,
>  		channel_writel(dwc, CTL_LO, 0);
>  		channel_writel(dwc, CTL_HI, 0);
>  
> +		dma_writel(dw, CLEAR.BLOCK, dwc->mask);
>  		dma_writel(dw, CLEAR.ERROR, dwc->mask);
>  		dma_writel(dw, CLEAR.XFER, dwc->mask);
>  
> @@ -593,10 +596,12 @@ static void dw_dma_tasklet(unsigned long data)
>  {
>  	struct dw_dma *dw = (struct dw_dma *)data;
>  	struct dw_dma_chan *dwc;
> +	u32 status_block;
>  	u32 status_xfer;
>  	u32 status_err;
>  	int i;
>  
> +	status_block = dma_readl(dw, RAW.BLOCK);
>  	status_xfer = dma_readl(dw, RAW.XFER);
>  	status_err = dma_readl(dw, RAW.ERROR);
>  
> @@ -605,7 +610,8 @@ static void dw_dma_tasklet(unsigned long data)
>  	for (i = 0; i < dw->dma.chancnt; i++) {
>  		dwc = &dw->chan[i];
>  		if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
> -			dwc_handle_cyclic(dw, dwc, status_err,
> status_xfer);
> +			dwc_handle_cyclic(dw, dwc, status_block,
> status_err,
> +					status_xfer);
>  		else if (status_err & (1 << i))
>  			dwc_handle_error(dw, dwc);
>  		else if (status_xfer & (1 << i))
> @@ -616,6 +622,7 @@ static void dw_dma_tasklet(unsigned long data)
>  	 * Re-enable interrupts.
>  	 */
>  	channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
> +	channel_set_bit(dw, MASK.BLOCK, dw->all_chan_mask);
>  	channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
>  }
>  
> @@ -635,6 +642,7 @@ static irqreturn_t dw_dma_interrupt(int irq, void
> *dev_id)
>  	 * softirq handler.
>  	 */
>  	channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
> +	channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
>  	channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
>  
>  	status = dma_readl(dw, STATUS_INT);
> @@ -645,6 +653,7 @@ static irqreturn_t dw_dma_interrupt(int irq, void
> *dev_id)
>  
>  		/* Try to recover */
>  		channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1);
> +		channel_clear_bit(dw, MASK.BLOCK, (1 << 8) - 1);
>  		channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1);
>  		channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1);
>  		channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1);
> @@ -1111,6 +1120,7 @@ static void dw_dma_off(struct dw_dma *dw)
>  	dma_writel(dw, CFG, 0);
>  
>  	channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
> +	channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
>  	channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
>  	channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
>  	channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
> @@ -1216,6 +1226,7 @@ static void dwc_free_chan_resources(struct
> dma_chan *chan)
>  
>  	/* Disable interrupts */
>  	channel_clear_bit(dw, MASK.XFER, dwc->mask);
> +	channel_clear_bit(dw, MASK.BLOCK, dwc->mask);
>  	channel_clear_bit(dw, MASK.ERROR, dwc->mask);
>  
>  	spin_unlock_irqrestore(&dwc->lock, flags);
> @@ -1458,6 +1469,7 @@ void dw_dma_cyclic_free(struct dma_chan *chan)
>  
>  	dwc_chan_disable(dw, dwc);
>  
> +	dma_writel(dw, CLEAR.BLOCK, dwc->mask);
>  	dma_writel(dw, CLEAR.ERROR, dwc->mask);
>  	dma_writel(dw, CLEAR.XFER, dwc->mask);
>  
> @@ -1546,9 +1558,6 @@ int dw_dma_probe(struct dw_dma_chip *chip,
> struct dw_dma_platform_data *pdata)
>  	/* Force dma off, just in case */
>  	dw_dma_off(dw);
>  
> -	/* Disable BLOCK interrupts as well */
> -	channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
> -
>  	/* Create a pool of consistent memory blocks for hardware
> descriptors */
>  	dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", chip-
> >dev,
>  					 sizeof(struct dw_desc), 4,
> 0);
Måns Rullgård Jan. 11, 2016, 3:09 p.m. UTC | #4
Andy Shevchenko <andriy.shevchenko@linux.intel.com> writes:

> On Mon, 2016-01-11 at 13:04 +0000, Mans Rullgard wrote:
>> Cyclic transfer callbacks rely on block completion interrupts which
>> were
>> disabled in commit ff7b05f29fd4 ("dmaengine/dw_dmac: Don't handle
>> block
>> interrupts").  This re-enables block interrupts so the cyclic
>> callbacks
>> can work.  Other transfer types are not affected as they set the
>> INT_EN
>> bit only on the last block.
>> 
>> Fixes: ff7b05f29fd4 ("dmaengine/dw_dmac: Don't handle block
>> interrupts")
>> Signed-off-by: Mans Rullgard <mans@mansr.com>
>
> How did you test that?

With the ABDAC sound driver on the AVR32.  It fails rather miserably
without these patches.

> From my understanding the custom stuff that does cyclic interrupts
> prepares a set of descriptors per period, which at the end of transfer
> will generate XFER interrupt. Next period will go in the same way.
>
> Maybe I missed something.

The cyclic DMA is done by setting up a set of descriptors, one per
period, with the last linked back to the first.  The chain never ends,
so there is never an XFER interrupt.
Andy Shevchenko Jan. 11, 2016, 3:17 p.m. UTC | #5
On Mon, Jan 11, 2016 at 5:09 PM, Måns Rullgård <mans@mansr.com> wrote:
> Andy Shevchenko <andriy.shevchenko@linux.intel.com> writes:
>
>> On Mon, 2016-01-11 at 13:04 +0000, Mans Rullgard wrote:
>>> Cyclic transfer callbacks rely on block completion interrupts which
>>> were
>>> disabled in commit ff7b05f29fd4 ("dmaengine/dw_dmac: Don't handle
>>> block
>>> interrupts").  This re-enables block interrupts so the cyclic
>>> callbacks
>>> can work.  Other transfer types are not affected as they set the
>>> INT_EN
>>> bit only on the last block.
>>>
>>> Fixes: ff7b05f29fd4 ("dmaengine/dw_dmac: Don't handle block
>>> interrupts")
>>> Signed-off-by: Mans Rullgard <mans@mansr.com>
>>
>> How did you test that?
>
> With the ABDAC sound driver on the AVR32.  It fails rather miserably
> without these patches.
>
>> From my understanding the custom stuff that does cyclic interrupts
>> prepares a set of descriptors per period, which at the end of transfer
>> will generate XFER interrupt. Next period will go in the same way.
>>
>> Maybe I missed something.
>
> The cyclic DMA is done by setting up a set of descriptors, one per
> period, with the last linked back to the first.  The chain never ends,
> so there is never an XFER interrupt.

Thanks for clarification.

Nevertheless, my plan is to get rid of custom implementation of the
cyclic API in this driver, thus, I would suggest to have this patch
only for stable and as interim for mainline.
Måns Rullgård Jan. 11, 2016, 3:22 p.m. UTC | #6
Andy Shevchenko <andy.shevchenko@gmail.com> writes:

> On Mon, Jan 11, 2016 at 5:09 PM, Måns Rullgård <mans@mansr.com> wrote:
>> Andy Shevchenko <andriy.shevchenko@linux.intel.com> writes:
>>
>>> On Mon, 2016-01-11 at 13:04 +0000, Mans Rullgard wrote:
>>>> Cyclic transfer callbacks rely on block completion interrupts which
>>>> were
>>>> disabled in commit ff7b05f29fd4 ("dmaengine/dw_dmac: Don't handle
>>>> block
>>>> interrupts").  This re-enables block interrupts so the cyclic
>>>> callbacks
>>>> can work.  Other transfer types are not affected as they set the
>>>> INT_EN
>>>> bit only on the last block.
>>>>
>>>> Fixes: ff7b05f29fd4 ("dmaengine/dw_dmac: Don't handle block
>>>> interrupts")
>>>> Signed-off-by: Mans Rullgard <mans@mansr.com>
>>>
>>> How did you test that?
>>
>> With the ABDAC sound driver on the AVR32.  It fails rather miserably
>> without these patches.
>>
>>> From my understanding the custom stuff that does cyclic interrupts
>>> prepares a set of descriptors per period, which at the end of transfer
>>> will generate XFER interrupt. Next period will go in the same way.
>>>
>>> Maybe I missed something.
>>
>> The cyclic DMA is done by setting up a set of descriptors, one per
>> period, with the last linked back to the first.  The chain never ends,
>> so there is never an XFER interrupt.
>
> Thanks for clarification.
>
> Nevertheless, my plan is to get rid of custom implementation of the
> cyclic API in this driver,

Good idea.

> thus, I would suggest to have this patch only for stable and as
> interim for mainline.

Better to start from a working state.
Andy Shevchenko Jan. 11, 2016, 3:24 p.m. UTC | #7
On Mon, Jan 11, 2016 at 5:22 PM, Måns Rullgård <mans@mansr.com> wrote:
> Andy Shevchenko <andy.shevchenko@gmail.com> writes:

>> thus, I would suggest to have this patch only for stable and as
>> interim for mainline.
>
> Better to start from a working state.

Agreed.
Viresh Kumar Jan. 11, 2016, 4:08 p.m. UTC | #8
On 11-01-16, 14:14, Måns Rullgård wrote:
> Viresh Kumar <viresh.kumar@linaro.org> writes:
> 
> > On 11-01-16, 13:04, Mans Rullgard wrote:
> >> Cyclic transfer callbacks rely on block completion interrupts which were
> >> disabled in commit ff7b05f29fd4 ("dmaengine/dw_dmac: Don't handle block
> >> interrupts").  This re-enables block interrupts so the cyclic callbacks
> >> can work.  Other transfer types are not affected as they set the INT_EN
> >> bit only on the last block.
> >> 
> >> Fixes: ff7b05f29fd4 ("dmaengine/dw_dmac: Don't handle block interrupts")
> >> Signed-off-by: Mans Rullgard <mans@mansr.com>
> >> ---
> >> Changes:
> >> - new patch
> >> ---
> >>  drivers/dma/dw/core.c | 21 +++++++++++++++------
> >>  1 file changed, 15 insertions(+), 6 deletions(-)
> >
> > Reviewed-by: Viresh Kumar <viresh.kumar@linaro.org>
> >
> > BTW, Shouldn't you mark these for stable trees as well ? :)
> 
> Probably.  Some maintainers prefer to do that themselves, but apparently
> you're not one of them.

Vinod is the maintainer who is going to apply the patch :)
Vinod Koul Jan. 14, 2016, 5:44 a.m. UTC | #9
On Mon, Jan 11, 2016 at 09:38:27PM +0530, Viresh Kumar wrote:
> On 11-01-16, 14:14, Måns Rullgård wrote:
> > Viresh Kumar <viresh.kumar@linaro.org> writes:
> > 
> > > On 11-01-16, 13:04, Mans Rullgard wrote:
> > >> Cyclic transfer callbacks rely on block completion interrupts which were
> > >> disabled in commit ff7b05f29fd4 ("dmaengine/dw_dmac: Don't handle block
> > >> interrupts").  This re-enables block interrupts so the cyclic callbacks
> > >> can work.  Other transfer types are not affected as they set the INT_EN
> > >> bit only on the last block.
> > >> 
> > >> Fixes: ff7b05f29fd4 ("dmaengine/dw_dmac: Don't handle block interrupts")
> > >> Signed-off-by: Mans Rullgard <mans@mansr.com>
> > >> ---
> > >> Changes:
> > >> - new patch
> > >> ---
> > >>  drivers/dma/dw/core.c | 21 +++++++++++++++------
> > >>  1 file changed, 15 insertions(+), 6 deletions(-)
> > >
> > > Reviewed-by: Viresh Kumar <viresh.kumar@linaro.org>
> > >
> > > BTW, Shouldn't you mark these for stable trees as well ? :)
> > 
> > Probably.  Some maintainers prefer to do that themselves, but apparently
> > you're not one of them.
> 
> Vinod is the maintainer who is going to apply the patch :)

Letting know is fine too, either commenting after the SOB line or adding CC
line.. Maintainers can add/remove :)
diff mbox

Patch

diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index af2b92f8501e..b92662722404 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -156,6 +156,7 @@  static void dwc_initialize(struct dw_dma_chan *dwc)
 
 	/* Enable interrupts */
 	channel_set_bit(dw, MASK.XFER, dwc->mask);
+	channel_set_bit(dw, MASK.BLOCK, dwc->mask);
 	channel_set_bit(dw, MASK.ERROR, dwc->mask);
 
 	dwc->initialized = true;
@@ -536,16 +537,17 @@  EXPORT_SYMBOL(dw_dma_get_dst_addr);
 
 /* Called with dwc->lock held and all DMAC interrupts disabled */
 static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
-		u32 status_err, u32 status_xfer)
+		u32 status_block, u32 status_err, u32 status_xfer)
 {
 	unsigned long flags;
 
-	if (dwc->mask) {
+	if (status_block & dwc->mask) {
 		void (*callback)(void *param);
 		void *callback_param;
 
 		dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n",
 				channel_readl(dwc, LLP));
+		dma_writel(dw, CLEAR.BLOCK, dwc->mask);
 
 		callback = dwc->cdesc->period_callback;
 		callback_param = dwc->cdesc->period_callback_param;
@@ -577,6 +579,7 @@  static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
 		channel_writel(dwc, CTL_LO, 0);
 		channel_writel(dwc, CTL_HI, 0);
 
+		dma_writel(dw, CLEAR.BLOCK, dwc->mask);
 		dma_writel(dw, CLEAR.ERROR, dwc->mask);
 		dma_writel(dw, CLEAR.XFER, dwc->mask);
 
@@ -593,10 +596,12 @@  static void dw_dma_tasklet(unsigned long data)
 {
 	struct dw_dma *dw = (struct dw_dma *)data;
 	struct dw_dma_chan *dwc;
+	u32 status_block;
 	u32 status_xfer;
 	u32 status_err;
 	int i;
 
+	status_block = dma_readl(dw, RAW.BLOCK);
 	status_xfer = dma_readl(dw, RAW.XFER);
 	status_err = dma_readl(dw, RAW.ERROR);
 
@@ -605,7 +610,8 @@  static void dw_dma_tasklet(unsigned long data)
 	for (i = 0; i < dw->dma.chancnt; i++) {
 		dwc = &dw->chan[i];
 		if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
-			dwc_handle_cyclic(dw, dwc, status_err, status_xfer);
+			dwc_handle_cyclic(dw, dwc, status_block, status_err,
+					status_xfer);
 		else if (status_err & (1 << i))
 			dwc_handle_error(dw, dwc);
 		else if (status_xfer & (1 << i))
@@ -616,6 +622,7 @@  static void dw_dma_tasklet(unsigned long data)
 	 * Re-enable interrupts.
 	 */
 	channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
+	channel_set_bit(dw, MASK.BLOCK, dw->all_chan_mask);
 	channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
 }
 
@@ -635,6 +642,7 @@  static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
 	 * softirq handler.
 	 */
 	channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
+	channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
 	channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
 
 	status = dma_readl(dw, STATUS_INT);
@@ -645,6 +653,7 @@  static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
 
 		/* Try to recover */
 		channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1);
+		channel_clear_bit(dw, MASK.BLOCK, (1 << 8) - 1);
 		channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1);
 		channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1);
 		channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1);
@@ -1111,6 +1120,7 @@  static void dw_dma_off(struct dw_dma *dw)
 	dma_writel(dw, CFG, 0);
 
 	channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
+	channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
 	channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
 	channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
 	channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
@@ -1216,6 +1226,7 @@  static void dwc_free_chan_resources(struct dma_chan *chan)
 
 	/* Disable interrupts */
 	channel_clear_bit(dw, MASK.XFER, dwc->mask);
+	channel_clear_bit(dw, MASK.BLOCK, dwc->mask);
 	channel_clear_bit(dw, MASK.ERROR, dwc->mask);
 
 	spin_unlock_irqrestore(&dwc->lock, flags);
@@ -1458,6 +1469,7 @@  void dw_dma_cyclic_free(struct dma_chan *chan)
 
 	dwc_chan_disable(dw, dwc);
 
+	dma_writel(dw, CLEAR.BLOCK, dwc->mask);
 	dma_writel(dw, CLEAR.ERROR, dwc->mask);
 	dma_writel(dw, CLEAR.XFER, dwc->mask);
 
@@ -1546,9 +1558,6 @@  int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
 	/* Force dma off, just in case */
 	dw_dma_off(dw);
 
-	/* Disable BLOCK interrupts as well */
-	channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
-
 	/* Create a pool of consistent memory blocks for hardware descriptors */
 	dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", chip->dev,
 					 sizeof(struct dw_desc), 4, 0);