@@ -602,6 +602,7 @@ static int hpb_dmae_probe(struct platform_device *pdev)
dma_dev->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+ hpbdev->shdma_dev.multiplexed = true;
hpbdev->shdma_dev.ops = &hpb_dmae_ops;
hpbdev->shdma_dev.desc_size = sizeof(struct hpb_desc);
err = shdma_init(&pdev->dev, &hpbdev->shdma_dev, pdata->num_channels);
@@ -293,6 +293,17 @@ bool shdma_chan_filter(struct dma_chan *chan, void *arg)
return false;
sdev = to_shdma_dev(schan->dma_chan.device);
+
+ /*
+ * FIXME: this is short term bug fix
+ * dma_request_slave_channel_compat() will call
+ * __dma_request_channel() (ex DT case)
+ * sdhma / shdma_chan_filter doesn't have compatibility
+ * with non multiplexed DMAC (ex rcar-dmac)
+ */
+ if (!sdev->multiplexed)
+ return false;
+
ret = sdev->ops->set_slave(schan, match, 0, true);
if (ret < 0)
return false;
@@ -754,6 +754,7 @@ static int sh_dmae_probe(struct platform_device *pdev)
/* Default transfer size of 32 bytes requires 32-byte alignment */
dma_dev->copy_align = LOG2_DEFAULT_XFER_SIZE;
+ shdev->shdma_dev.multiplexed = true;
shdev->shdma_dev.ops = &sh_dmae_shdma_ops;
shdev->shdma_dev.desc_size = sizeof(struct sh_dmae_desc);
err = shdma_init(&pdev->dev, &shdev->shdma_dev,
@@ -110,6 +110,7 @@ struct shdma_dev {
struct shdma_chan **schan;
const struct shdma_ops *ops;
size_t desc_size;
+ bool multiplexed;
};
#define shdma_for_each_chan(c, d, i) for (i = 0, c = (d)->schan[0]; \