diff mbox

[1/2] dma: Add Freescale qDMA engine driver support

Message ID 1441950833-27684-1-git-send-email-yao.yuan@freescale.com (mailing list archive)
State Changes Requested
Headers show

Commit Message

yao yuan Sept. 11, 2015, 5:53 a.m. UTC
Add Freescale Queue Direct Memory Access(qDMA) controller support.
This module can be found on LS-1 and LS-2 SoCs.

This add the legacy mode support for qDMA.

Signed-off-by: Yuan Yao <yao.yuan@freescale.com>
---
 Documentation/devicetree/bindings/dma/fsl-qdma.txt |  43 ++
 MAINTAINERS                                        |   7 +
 drivers/dma/Kconfig                                |  10 +
 drivers/dma/Makefile                               |   1 +
 drivers/dma/fsl-qdma.c                             | 521 +++++++++++++++++++++
 5 files changed, 582 insertions(+)
 create mode 100644 Documentation/devicetree/bindings/dma/fsl-qdma.txt
 create mode 100644 drivers/dma/fsl-qdma.c

Comments

Li Yang-R58472 Sept. 23, 2015, 11:45 p.m. UTC | #1
On Fri, Sep 11, 2015 at 12:53 AM, Yuan Yao <yao.yuan@freescale.com> wrote:
> Add Freescale Queue Direct Memory Access(qDMA) controller support.
> This module can be found on LS-1 and LS-2 SoCs.
>
> This add the legacy mode support for qDMA.
>
> Signed-off-by: Yuan Yao <yao.yuan@freescale.com>
> ---
>  Documentation/devicetree/bindings/dma/fsl-qdma.txt |  43 ++
>  MAINTAINERS                                        |   7 +
>  drivers/dma/Kconfig                                |  10 +
>  drivers/dma/Makefile                               |   1 +
>  drivers/dma/fsl-qdma.c                             | 521 +++++++++++++++++++++
>  5 files changed, 582 insertions(+)
>  create mode 100644 Documentation/devicetree/bindings/dma/fsl-qdma.txt
>  create mode 100644 drivers/dma/fsl-qdma.c
>
> diff --git a/Documentation/devicetree/bindings/dma/fsl-qdma.txt b/Documentation/devicetree/bindings/dma/fsl-qdma.txt
> new file mode 100644
> index 0000000..cdae71c
> --- /dev/null
> +++ b/Documentation/devicetree/bindings/dma/fsl-qdma.txt
> @@ -0,0 +1,43 @@
> +* Freescale queue Direct Memory Access Controller(qDMA) Controller
> +
> +  The qDMA controller transfers blocks of data between one source and one or more
> +destinations. The blocks of data transferred can be represented in memory as contiguous
> +or non-contiguous using scatter/gather table(s). Channel virtualization is supported
> +through enqueuing of DMA jobs to, or dequeuing DMA jobs from, different work
> +queues.
> +  Legacy mode is primarily included for software requiring the earlier
> +QorIQ DMA programming model. This mode provides a simple programming
> +model not utilizing the datapath architecture. In legacy mode, DMA
> +operations are directly configured through a set of architectural
> +registers per channel.

Is this binding only covering the legacy mode?  The binding should
describe the whole IP block no matter if we have driver for all the
features.

> +
> +* qDMA Controller
> +Required properties:
> +- compatible :
> +       - "fsl,ls-qdma" for qDMA used similar to that on LS SoC

Compatible need to be specific like "fsl,ls1021a-qdma".  See
http://www.devicetree.org/Device_Tree_Usage.

> +- reg : Specifies base physical address(s) and size of the qDMA registers.
> +       The region is qDMA control register's address and size.
> +- interrupts : A list of interrupt-specifiers, one for each entry in
> +       interrupt-names.
> +- interrupt-names : Should contain:
> +       "qdma-tx" - the  interrupt
> +       "qdma-err" - the error interrupt
> +- channels : Number of channels supported by the controller
> +
> +Optional properties:
> +- big-endian: If present registers and hardware scatter/gather descriptors
> +       of the qDMA are implemented in big endian mode, otherwise in little

Endian
> +       mode.
> +
> +
> +Examples:
> +
> +       qdma: qdma@8390000 {
> +               compatible = "fsl,ls-qdma";
> +               reg = <0x0 0x8380000 0x0 0x20000>;
> +               interrupts = <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
> +                               <GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>;
> +               interrupt-names = "qdma-tx", "qdma-err";
> +               big-endian;
> +               channels = <1>;
> +       };
> diff --git a/MAINTAINERS b/MAINTAINERS
> index 5772ccf..a4d1b52 100644
> --- a/MAINTAINERS
> +++ b/MAINTAINERS
> @@ -4357,6 +4357,13 @@ L:       linuxppc-dev@lists.ozlabs.org
>  S:     Maintained
>  F:     drivers/dma/fsldma.*
>
> +FREESCALE qDMA DRIVER
> +M:     Yuan Yao <yao.yuan@freescale.com>
> +L:     linux-arm-kernel@lists.infradead.org

Interestingly you listed arm mailing list instead of dmaengine mailing list.

> +S:     Maintained
> +F:     Documentation/devicetree/bindings/dma/fsl-qdma.txt
> +F:     drivers/dma/fsl-qdma.c
> +
>  FREESCALE I2C CPM DRIVER
>  M:     Jochen Friedrich <jochen@scram.de>
>  L:     linuxppc-dev@lists.ozlabs.org
> diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
> index b458475..e29e985 100644
> --- a/drivers/dma/Kconfig
> +++ b/drivers/dma/Kconfig
> @@ -193,6 +193,16 @@ config FSL_EDMA
>           multiplexing capability for DMA request sources(slot).
>           This module can be found on Freescale Vybrid and LS-1 SoCs.
>
> +config FSL_QDMA
> +       tristate "Freescale qDMA engine support"
> +       select DMA_ENGINE
> +       select DMA_VIRTUAL_CHANNELS
> +       help
> +         Support the Freescale qDMA engine with command queue and legacy mode.
> +         Channel virtualization is supported through enqueuing of DMA jobs to,
> +         or dequeuing DMA jobs from, different work queues.
> +         This module can be found on Freescale LS SoCs.

Better to spell out Layerscape

> +
>  config FSL_RAID
>          tristate "Freescale RAID engine Support"
>          depends on FSL_SOC && !ASYNC_TX_ENABLE_CHANNEL_SWITCH
> diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
> index 7711a71..8de7526 100644
> --- a/drivers/dma/Makefile
> +++ b/drivers/dma/Makefile
> @@ -29,6 +29,7 @@ obj-$(CONFIG_DW_DMAC_CORE) += dw/
>  obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
>  obj-$(CONFIG_FSL_DMA) += fsldma.o
>  obj-$(CONFIG_FSL_EDMA) += fsl-edma.o
> +obj-$(CONFIG_FSL_QDMA) += fsl-qdma.o
>  obj-$(CONFIG_FSL_RAID) += fsl_raid.o
>  obj-$(CONFIG_HSU_DMA) += hsu/
>  obj-$(CONFIG_IMG_MDC_DMA) += img-mdc-dma.o
> diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c
> new file mode 100644
> index 0000000..846cdba
> --- /dev/null
> +++ b/drivers/dma/fsl-qdma.c
> @@ -0,0 +1,521 @@
> +/*
> + * drivers/dma/fsl-qdma.c
> + *
> + * Copyright 2014-2015 Freescale Semiconductor, Inc.
> + *
> + * Driver for the Freescale qDMA engine with legacy mode.

If this is only for legacy mode, name it fsl-qdma-legacy.c

> + * This module can be found on Freescale LS SoCs.
> + *
> + * This program is free software; you can redistribute  it and/or modify it
> + * under the terms of the GNU General Public License version 2 as published
> + * by the Free Software Foundation.
> + */
> +
> +#include <linux/clk.h>
> +#include <linux/delay.h>
> +#include <linux/dma-mapping.h>
> +#include <linux/dmapool.h>
> +#include <linux/init.h>
> +#include <linux/interrupt.h>
> +#include <linux/module.h>
> +#include <linux/of.h>
> +#include <linux/of_address.h>
> +#include <linux/of_device.h>
> +#include <linux/of_dma.h>
> +#include <linux/of_irq.h>
> +#include <linux/slab.h>
> +#include <linux/spinlock.h>
> +
> +#include "virt-dma.h"
> +
> +#define FSL_QDMA_DMR           0x0
> +#define FSL_QDMA_DSR_P         0x4
> +
> +#define FSL_QDMA_DSR_M         0x10004
> +#define FSL_QDMA_DLMR          0x10100
> +#define FSL_QDMA_DLSR          0x10104
> +#define FSL_QDMA_DLSATR                0x10110
> +#define FSL_QDMA_DLSAR         0x10114
> +#define FSL_QDMA_DLDATR                0x10118
> +#define FSL_QDMA_DLDAR         0x1011c
> +#define FSL_QDMA_DLBCR         0x10120
> +#define FSL_QDMA_DLESAD                0x10148
> +#define FSL_QDMA_DLEDAD                0x1014c
> +
> +#define FSL_QDMA_DLMR_CS       0x1
> +#define FSL_QDMA_DLMR_EOSIE    0x200
> +#define FSL_QDMA_DLMR_EIE      0x40
> +#define FSL_QDMA_DLSR_TE       0x80
> +#define FSL_QDMA_DLSR_CH       0x20
> +#define FSL_QDMA_DLSR_PE       0x10
> +#define FSL_QDMA_DLSR_CB       0x4
> +#define FSL_QDMA_DLSR_EOSI     0x2
> +
> +#define FSL_QDMA_SRTTYPE_R_N   0x40000
> +
> +struct fsl_qdma_tcd {
> +       u64     saddr;
> +       u32     nbytes;
> +       u64     daddr;
> +};
> +
> +struct fsl_qdma_chan_config {
> +       enum dma_transfer_direction     dir;
> +       enum dma_slave_buswidth         addr_width;
> +       u32                             burst;
> +       u32                             attr;
> +};
> +
> +struct fsl_qdma_desc {
> +       struct virt_dma_desc            vdesc;
> +       struct fsl_qdma_chan            *qchan;
> +       struct fsl_qdma_tcd             tcd;
> +};
> +
> +struct fsl_qdma_chan {
> +       struct virt_dma_chan            vchan;
> +       struct fsl_qdma_desc            *desc;
> +       enum dma_status                 status;
> +       u32                             slave_id;
> +       struct fsl_qdma_engine          *qdma;
> +};
> +
> +struct fsl_qdma_engine {
> +       struct dma_device       dma_dev;
> +       void __iomem            *membase;
> +       u32                     n_chans;
> +       struct mutex            fsl_qdma_mutex;
> +       int                     controller_irq;
> +       int                     err_irq;
> +       bool                    big_endian;
> +       struct fsl_qdma_chan    chans[];
> +
> +};
> +
> +static u32 qdma_readl(struct fsl_qdma_engine *qdma, void __iomem *addr)
> +{
> +       if (qdma->big_endian)
> +               return ioread32be(addr);
> +       else
> +               return ioread32(addr);
> +}
> +
> +static void qdma_writel(struct fsl_qdma_engine *qdma, u32 val,
> +                                               void __iomem *addr)
> +{
> +       if (qdma->big_endian)
> +               iowrite32be(val, addr);
> +       else
> +               iowrite32(val, addr);
> +}
> +
> +static struct fsl_qdma_chan *to_fsl_qdma_chan(struct dma_chan *chan)
> +{
> +       return container_of(chan, struct fsl_qdma_chan, vchan.chan);
> +}
> +
> +static struct fsl_qdma_desc *to_fsl_qdma_desc(struct virt_dma_desc *vd)
> +{
> +       return container_of(vd, struct fsl_qdma_desc, vdesc);
> +}
> +
> +static int fsl_qdma_alloc_chan_resources(struct dma_chan *chan)
> +{
> +       struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
> +
> +       fsl_chan->desc = NULL;
> +       return 0;
> +}
> +
> +static void fsl_qdma_free_chan_resources(struct dma_chan *chan)
> +{
> +       struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
> +       unsigned long flags;
> +       LIST_HEAD(head);
> +
> +       spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
> +       vchan_get_all_descriptors(&fsl_chan->vchan, &head);
> +       spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
> +
> +       vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
> +}
> +
> +static void fsl_qdma_set_tcd_params(struct fsl_qdma_chan *fsl_chan,
> +                                       u64 src, u64 dst, u32 nbytes)
> +{
> +       void __iomem *addr = fsl_chan->qdma->membase;
> +       u32 reg;
> +
> +       /*
> +        * Source address.
> +        * Represents address bits 31-0 of a 49-bit source address.
> +        */
> +       qdma_writel(fsl_chan->qdma, (u32)src, addr + FSL_QDMA_DLSAR);
> +       /*
> +        * Source address.
> +        * Represents address bits 47-32 of a 49-bit source address.
> +        */
> +       reg = qdma_readl(fsl_chan->qdma, addr + FSL_QDMA_DLSATR);
> +       reg |= (u16)(src >> 32) & 0xffff;
> +       reg |= FSL_QDMA_SRTTYPE_R_N;
> +       qdma_writel(fsl_chan->qdma, reg, addr + FSL_QDMA_DLSATR);
> +       /*
> +        * Source address.
> +        * Represents address bits 48 of a 49-bit source address.
> +        */
> +       reg = qdma_readl(fsl_chan->qdma, addr + FSL_QDMA_DLESAD);
> +       reg |= (src >> 48) & 0x1;
> +       qdma_writel(fsl_chan->qdma, reg, addr + FSL_QDMA_DLESAD);
> +
> +       /*
> +        * Destination address.
> +        * Represents address bits 31-0 of a 49-bit destination address.
> +        */
> +       qdma_writel(fsl_chan->qdma, (u32)dst, addr + FSL_QDMA_DLDAR);
> +       /*
> +        * Destination address.
> +        * Represents address bits 47-32 of a 49-bit destination address.
> +        */
> +       reg = qdma_readl(fsl_chan->qdma, addr + FSL_QDMA_DLDATR);
> +       reg |= (u16)(dst >> 32) & 0xffff;
> +       reg |= FSL_QDMA_SRTTYPE_R_N;
> +       qdma_writel(fsl_chan->qdma, reg, addr + FSL_QDMA_DLDATR);
> +       /*
> +        * Destination address.
> +        * Represents address bits 48 of a 49-bit destination address.
> +        */
> +       reg = qdma_readl(fsl_chan->qdma, addr + FSL_QDMA_DLEDAD);
> +       reg |= (dst >> 48) & 0x1;
> +       qdma_writel(fsl_chan->qdma, reg, addr + FSL_QDMA_DLEDAD);
> +
> +       /*
> +        * Byte count.
> +        * Contains the number of bytes to transfer.
> +        */
> +       qdma_writel(fsl_chan->qdma, nbytes, addr + FSL_QDMA_DLBCR);
> +}
> +
> +static int fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
> +{
> +       u32 reg;
> +
> +       reg = qdma_readl(fsl_qdma, fsl_qdma->membase + FSL_QDMA_DLMR);
> +       reg |= FSL_QDMA_DLMR_EOSIE;
> +       reg |= FSL_QDMA_DLMR_EIE;
> +       qdma_writel(fsl_qdma, reg, fsl_qdma->membase + FSL_QDMA_DLMR);
> +       return 0;
> +}
> +
> +static void fsl_qdma_enable_request(struct fsl_qdma_chan *fsl_chan)
> +{
> +       void __iomem *addr = fsl_chan->qdma->membase;
> +       u32 reg;
> +
> +       reg = qdma_readl(fsl_chan->qdma, addr + FSL_QDMA_DLMR);
> +
> +       reg &= ~FSL_QDMA_DLMR_CS;
> +       qdma_writel(fsl_chan->qdma, reg, addr + FSL_QDMA_DLMR);
> +
> +       reg |= FSL_QDMA_DLMR_CS;
> +       qdma_writel(fsl_chan->qdma, reg, addr + FSL_QDMA_DLMR);
> +}
> +
> +static struct fsl_qdma_desc *fsl_qdma_alloc_desc(struct fsl_qdma_chan *fsl_chan)
> +{
> +       struct fsl_qdma_desc *fsl_desc;
> +
> +       fsl_desc = kzalloc(sizeof(*fsl_desc), GFP_NOWAIT);
> +
> +       if (!fsl_desc)
> +               return NULL;
> +
> +       fsl_desc->qchan = fsl_chan;
> +
> +       return fsl_desc;
> +}
> +
> +static void fsl_qdma_free_desc(struct virt_dma_desc *vdesc)
> +{
> +       struct fsl_qdma_desc *fsl_desc;
> +
> +       fsl_desc = to_fsl_qdma_desc(vdesc);
> +       kfree(fsl_desc);
> +}
> +
> +static void fsl_qdma_enqueue_desc(struct fsl_qdma_chan *fsl_chan)
> +{
> +       struct fsl_qdma_tcd *tcd;
> +       struct virt_dma_desc *vdesc;
> +
> +       vdesc = vchan_next_desc(&fsl_chan->vchan);
> +       if (!vdesc)
> +               return;
> +
> +       fsl_chan->desc = to_fsl_qdma_desc(vdesc);
> +       tcd = &fsl_chan->desc->tcd;
> +       fsl_qdma_set_tcd_params(fsl_chan, tcd->saddr, tcd->daddr, tcd->nbytes);
> +       fsl_qdma_enable_request(fsl_chan);
> +       fsl_chan->status = DMA_IN_PROGRESS;
> +}
> +
> +static void fsl_qdma_issue_pending(struct dma_chan *chan)
> +{
> +       struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
> +       unsigned long flags;
> +
> +       spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
> +
> +       if (vchan_issue_pending(&fsl_chan->vchan) && !fsl_chan->desc)
> +               fsl_qdma_enqueue_desc(fsl_chan);
> +
> +       spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
> +}
> +
> +static struct dma_async_tx_descriptor *
> +fsl_qdma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst,
> +               dma_addr_t src, size_t len, unsigned long flags)
> +{
> +       struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
> +       struct fsl_qdma_desc *fsl_desc;
> +       struct fsl_qdma_tcd *tcd;
> +
> +       fsl_desc = fsl_qdma_alloc_desc(fsl_chan);
> +       if (!fsl_desc)
> +               return NULL;
> +
> +       tcd = &fsl_desc->tcd;
> +       tcd->saddr = (u64)src;
> +       tcd->nbytes = (u32)len;
> +       tcd->daddr = (u64)dst;
> +
> +       return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
> +}
> +
> +static enum dma_status fsl_qdma_tx_status(struct dma_chan *chan,
> +               dma_cookie_t cookie, struct dma_tx_state *txstate)
> +{
> +       return dma_cookie_status(chan, cookie, txstate);
> +}
> +
> +static irqreturn_t fsl_qdma_controller_handler(int irq, void *dev_id)
> +{
> +       struct fsl_qdma_engine *fsl_qdma = dev_id;
> +       struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[0];
> +       void __iomem *addr = fsl_qdma->membase;
> +       u32 reg;
> +
> +       reg = qdma_readl(fsl_qdma, addr + FSL_QDMA_DLSR);
> +       if (!(reg & FSL_QDMA_DLSR_EOSI))
> +               return IRQ_NONE;
> +
> +       /* Don't clean TE and PE bit if they are set. */
> +       reg &= ~FSL_QDMA_DLSR_TE & ~FSL_QDMA_DLSR_PE;
> +       qdma_writel(fsl_qdma, reg, addr + FSL_QDMA_DLSR);
> +
> +       spin_lock(&fsl_chan->vchan.lock);
> +       list_del(&fsl_chan->desc->vdesc.node);
> +       vchan_cookie_complete(&fsl_chan->desc->vdesc);
> +       fsl_chan->desc = NULL;
> +       fsl_chan->status = DMA_COMPLETE;
> +       fsl_qdma_enqueue_desc(fsl_chan);
> +       spin_unlock(&fsl_chan->vchan.lock);
> +
> +       return IRQ_HANDLED;
> +}
> +
> +static irqreturn_t fsl_qdma_controller_handler_err(int irq, void *dev_id)
> +{
> +       struct fsl_qdma_engine *fsl_qdma = dev_id;
> +       u32 reg;
> +
> +       reg = qdma_readl(fsl_qdma, fsl_qdma->membase + FSL_QDMA_DLSR);
> +
> +       if (reg & FSL_QDMA_DLSR_TE) {
> +               dev_err(fsl_qdma->dma_dev.dev,
> +                       "Transfer error. Check your address please!\n");
> +       }
> +
> +       if (reg & FSL_QDMA_DLSR_PE) {
> +               dev_err(fsl_qdma->dma_dev.dev,
> +                       "Programming error. Check your setting please!\n");
> +       }
> +
> +       /* Don't clean EOSI bit if it's set. */
> +       reg &= ~FSL_QDMA_DLSR_EOSI;
> +       qdma_writel(fsl_qdma, reg, fsl_qdma->membase + FSL_QDMA_DLSR);
> +
> +       return IRQ_HANDLED;
> +}
> +
> +static irqreturn_t fsl_qdma_irq_handler(int irq, void *dev_id)
> +{
> +       if (fsl_qdma_controller_handler(irq, dev_id) == IRQ_HANDLED)
> +               return IRQ_HANDLED;
> +
> +       return fsl_qdma_controller_handler_err(irq, dev_id);
> +}
> +
> +static int fsl_qdma_irq_init(struct platform_device *pdev,
> +                                       struct fsl_qdma_engine *fsl_qdma)
> +{
> +       int ret;
> +
> +       fsl_qdma->controller_irq = platform_get_irq_byname(pdev,
> +                                                       "qdma-tx");
> +       if (fsl_qdma->controller_irq < 0) {
> +               dev_err(&pdev->dev, "Can't get qdma controller irq.\n");
> +               return fsl_qdma->controller_irq;
> +       }
> +
> +       fsl_qdma->err_irq = platform_get_irq_byname(pdev,
> +                                                       "qdma-err");
> +       if (fsl_qdma->err_irq < 0) {
> +               dev_err(&pdev->dev, "Can't get qdma err irq.\n");
> +               return fsl_qdma->err_irq;
> +       }
> +
> +       if (fsl_qdma->controller_irq == fsl_qdma->err_irq) {
> +               ret = devm_request_irq(&pdev->dev, fsl_qdma->controller_irq,
> +                                       fsl_qdma_irq_handler, 0,
> +                                       "qDMA controller", fsl_qdma);
> +
> +               if (ret) {
> +                       dev_err(&pdev->dev, "Can't register qDMA IRQ.\n");
> +                       return  ret;
> +               }
> +       } else {
> +               ret = devm_request_irq(&pdev->dev, fsl_qdma->controller_irq,
> +                               fsl_qdma_controller_handler, 0,
> +                               "qDMA controller", fsl_qdma);
> +               if (ret) {
> +                       dev_err(&pdev->dev,
> +                               "Can't register qDMA controller IRQ.\n");
> +                       return  ret;
> +               }
> +
> +               ret = devm_request_irq(&pdev->dev, fsl_qdma->err_irq,
> +                               fsl_qdma_controller_handler_err, 0,
> +                               "qDMA err", fsl_qdma);
> +               if (ret) {
> +                       dev_err(&pdev->dev, "Can't register qDMA err IRQ.\n");
> +                       return  ret;
> +               }
> +       }
> +
> +       return 0;
> +}
> +
> +static int fsl_qdma_probe(struct platform_device *pdev)
> +{
> +       struct device_node *np = pdev->dev.of_node;
> +       struct fsl_qdma_engine *fsl_qdma;
> +       struct fsl_qdma_chan *fsl_chan;
> +       struct resource *res;
> +       unsigned int len, chans;
> +       int ret, i;
> +
> +       ret = of_property_read_u32(np, "channels", &chans);
> +       if (ret) {
> +               dev_err(&pdev->dev, "Can't get channels.\n");
> +               return ret;
> +       }
> +
> +       len = sizeof(*fsl_qdma) + sizeof(*fsl_chan) * chans;
> +       fsl_qdma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
> +       if (!fsl_qdma)
> +               return -ENOMEM;
> +
> +       fsl_qdma->n_chans = chans;
> +       mutex_init(&fsl_qdma->fsl_qdma_mutex);
> +
> +       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
> +       fsl_qdma->membase = devm_ioremap_resource(&pdev->dev, res);
> +       if (IS_ERR(fsl_qdma->membase))
> +               return PTR_ERR(fsl_qdma->membase);
> +
> +       ret = fsl_qdma_irq_init(pdev, fsl_qdma);
> +       if (ret)
> +               return ret;
> +
> +       fsl_qdma->big_endian = of_property_read_bool(np, "big-endian");
> +       INIT_LIST_HEAD(&fsl_qdma->dma_dev.channels);
> +       for (i = 0; i < fsl_qdma->n_chans; i++) {
> +               struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i];
> +
> +               fsl_chan->qdma = fsl_qdma;
> +               fsl_chan->desc = NULL;
> +               fsl_chan->vchan.desc_free = fsl_qdma_free_desc;
> +               vchan_init(&fsl_chan->vchan, &fsl_qdma->dma_dev);
> +       }
> +
> +       dma_cap_set(DMA_PRIVATE, fsl_qdma->dma_dev.cap_mask);
> +       dma_cap_set(DMA_SLAVE, fsl_qdma->dma_dev.cap_mask);
> +       dma_cap_set(DMA_MEMCPY, fsl_qdma->dma_dev.cap_mask);
> +
> +       fsl_qdma->dma_dev.dev = &pdev->dev;
> +       fsl_qdma->dma_dev.device_alloc_chan_resources
> +               = fsl_qdma_alloc_chan_resources;
> +       fsl_qdma->dma_dev.device_free_chan_resources
> +               = fsl_qdma_free_chan_resources;
> +       fsl_qdma->dma_dev.device_tx_status = fsl_qdma_tx_status;
> +       fsl_qdma->dma_dev.device_prep_dma_memcpy = fsl_qdma_prep_memcpy;
> +       fsl_qdma->dma_dev.device_issue_pending = fsl_qdma_issue_pending;
> +
> +       platform_set_drvdata(pdev, fsl_qdma);
> +
> +       ret = dma_async_device_register(&fsl_qdma->dma_dev);
> +       if (ret) {
> +               dev_err(&pdev->dev, "Can't register Freescale qDMA engine.\n");
> +               return ret;
> +       }
> +
> +       ret = fsl_qdma_reg_init(fsl_qdma);
> +       if (ret) {
> +               dev_err(&pdev->dev, "Can't Initialize the qDMA engine.\n");
> +               return ret;
> +       }
> +
> +       return 0;
> +}
> +
> +static int fsl_qdma_remove(struct platform_device *pdev)
> +{
> +       struct device_node *np = pdev->dev.of_node;
> +       struct fsl_qdma_engine *fsl_qdma = platform_get_drvdata(pdev);
> +
> +       of_dma_controller_free(np);
> +       dma_async_device_unregister(&fsl_qdma->dma_dev);
> +       return 0;
> +}
> +
> +static const struct of_device_id fsl_qdma_dt_ids[] = {
> +       { .compatible = "fsl,ls-qdma", },
> +       { /* sentinel */ }
> +};
> +MODULE_DEVICE_TABLE(of, fsl_qdma_dt_ids);
> +
> +static struct platform_driver fsl_qdma_driver = {
> +       .driver         = {
> +               .name   = "fsl-qdma",
> +               .owner  = THIS_MODULE,
> +               .of_match_table = fsl_qdma_dt_ids,
> +       },
> +       .probe          = fsl_qdma_probe,
> +       .remove         = fsl_qdma_remove,
> +};
> +
> +static int __init fsl_qdma_init(void)
> +{
> +       return platform_driver_register(&fsl_qdma_driver);
> +}
> +subsys_initcall(fsl_qdma_init);
> +
> +static void __exit fsl_qdma_exit(void)
> +{
> +       platform_driver_unregister(&fsl_qdma_driver);
> +}
> +module_exit(fsl_qdma_exit);
> +
> +MODULE_ALIAS("platform:fsl-qdma");
> +MODULE_DESCRIPTION("Freescale qDMA engine driver");
> +MODULE_LICENSE("GPL v2");
> --
> 2.1.0.27.g96db324
>
> --
> To unsubscribe from this list: send the line "unsubscribe devicetree" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
--
To unsubscribe from this list: send the line "unsubscribe dmaengine" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Vinod Koul Oct. 5, 2015, 2:37 p.m. UTC | #2
On Fri, Sep 11, 2015 at 01:53:52PM +0800, Yuan Yao wrote:

> +Examples:
> +
> +	qdma: qdma@8390000 {
> +		compatible = "fsl,ls-qdma";
> +		reg = <0x0 0x8380000 0x0 0x20000>;
> +		interrupts = <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
> +				<GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>;
> +		interrupt-names = "qdma-tx", "qdma-err";
> +		big-endian;
> +		channels = <1>;
> +	};

Binding should be a separate patch

> +FREESCALE qDMA DRIVER
> +M:     Yuan Yao <yao.yuan@freescale.com>
> +L:     linux-arm-kernel@lists.infradead.org

not dmaengine ML ?


> +config FSL_QDMA
> +	tristate "Freescale qDMA engine support"
> +	select DMA_ENGINE
> +	select DMA_VIRTUAL_CHANNELS

No depends on arch, can it work on x86?

> +static int fsl_qdma_alloc_chan_resources(struct dma_chan *chan)
> +{
> +	struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
> +
> +	fsl_chan->desc = NULL;
> +	return 0;
> +}

why do you need this it seems to do nothing

> +static struct fsl_qdma_desc *fsl_qdma_alloc_desc(struct fsl_qdma_chan *fsl_chan)
> +{
> +	struct fsl_qdma_desc *fsl_desc;
> +
> +	fsl_desc = kzalloc(sizeof(*fsl_desc), GFP_NOWAIT);
> +

empty line here is not required

> +	if (!fsl_desc)
> +		return NULL;
> +
> +	fsl_desc->qchan = fsl_chan;
> +
> +	return fsl_desc;

why not return fsl_desc->qchan ;


> +	dma_cap_set(DMA_PRIVATE, fsl_qdma->dma_dev.cap_mask);
> +	dma_cap_set(DMA_SLAVE, fsl_qdma->dma_dev.cap_mask);
> +	dma_cap_set(DMA_MEMCPY, fsl_qdma->dma_dev.cap_mask);
> +
> +	fsl_qdma->dma_dev.dev = &pdev->dev;
> +	fsl_qdma->dma_dev.device_alloc_chan_resources
> +		= fsl_qdma_alloc_chan_resources;
> +	fsl_qdma->dma_dev.device_free_chan_resources
> +		= fsl_qdma_free_chan_resources;
> +	fsl_qdma->dma_dev.device_tx_status = fsl_qdma_tx_status;
> +	fsl_qdma->dma_dev.device_prep_dma_memcpy = fsl_qdma_prep_memcpy;
> +	fsl_qdma->dma_dev.device_issue_pending = fsl_qdma_issue_pending;

You claim DMA_SLAVE but no prep_ for that?

> +
> +static int __init fsl_qdma_init(void)
> +{
> +	return platform_driver_register(&fsl_qdma_driver);
> +}
> +subsys_initcall(fsl_qdma_init);
why subsys_init?
yao yuan Oct. 22, 2015, 7:56 a.m. UTC | #3
Hi Vinod,

Thanks for your review, please see my comments inline.

Best Regards,
Yuan Yao

> -----Original Message-----
> From: Vinod Koul [mailto:vinod.koul@intel.com]
> Sent: Monday, October 05, 2015 10:37 PM
> To: Yuan Yao-B46683 <yao.yuan@freescale.com>
> Cc: shawn.guo@linaro.org; dan.j.williams@intel.com;
> dmaengine@vger.kernel.org; linux-kernel@vger.kernel.org; linux-arm-
> kernel@lists.infradead.org; devicetree@vger.kernel.org
> Subject: Re: [PATCH 1/2] dma: Add Freescale qDMA engine driver support
> 
> On Fri, Sep 11, 2015 at 01:53:52PM +0800, Yuan Yao wrote:
> 
> > +Examples:
> > +
> > +	qdma: qdma@8390000 {
> > +		compatible = "fsl,ls-qdma";
> > +		reg = <0x0 0x8380000 0x0 0x20000>;
> > +		interrupts = <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
> > +				<GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>;
> > +		interrupt-names = "qdma-tx", "qdma-err";
> > +		big-endian;
> > +		channels = <1>;
> > +	};
> 
> Binding should be a separate patch
[Yuan Yao] 
Ok, Thanks.

> 
> > +FREESCALE qDMA DRIVER
> > +M:     Yuan Yao <yao.yuan@freescale.com>
> > +L:     linux-arm-kernel@lists.infradead.org
> 
> not dmaengine ML ?
[Yuan Yao] Ok, Thanks.

> 
> 
> > +config FSL_QDMA
> > +	tristate "Freescale qDMA engine support"
> > +	select DMA_ENGINE
> > +	select DMA_VIRTUAL_CHANNELS
> 
> No depends on arch, can it work on x86?
[Yuan Yao] Ok, Thanks.

> 
> > +static int fsl_qdma_alloc_chan_resources(struct dma_chan *chan) {
> > +	struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
> > +
> > +	fsl_chan->desc = NULL;
> > +	return 0;
> > +}
> 
> why do you need this it seems to do nothing
[Yuan Yao] I will remove it.

> 
> > +static struct fsl_qdma_desc *fsl_qdma_alloc_desc(struct fsl_qdma_chan
> > +*fsl_chan) {
> > +	struct fsl_qdma_desc *fsl_desc;
> > +
> > +	fsl_desc = kzalloc(sizeof(*fsl_desc), GFP_NOWAIT);
> > +
> 
> empty line here is not required
> 
> > +	if (!fsl_desc)
> > +		return NULL;
> > +
> > +	fsl_desc->qchan = fsl_chan;
> > +
> > +	return fsl_desc;
> 
> why not return fsl_desc->qchan ;
> 
[Yuan Yao] 
I still need some data in fsl_desc. So I have to return fsl_desc  here.

> 
> > +	dma_cap_set(DMA_PRIVATE, fsl_qdma->dma_dev.cap_mask);
> > +	dma_cap_set(DMA_SLAVE, fsl_qdma->dma_dev.cap_mask);
> > +	dma_cap_set(DMA_MEMCPY, fsl_qdma->dma_dev.cap_mask);
> > +
> > +	fsl_qdma->dma_dev.dev = &pdev->dev;
> > +	fsl_qdma->dma_dev.device_alloc_chan_resources
> > +		= fsl_qdma_alloc_chan_resources;
> > +	fsl_qdma->dma_dev.device_free_chan_resources
> > +		= fsl_qdma_free_chan_resources;
> > +	fsl_qdma->dma_dev.device_tx_status = fsl_qdma_tx_status;
> > +	fsl_qdma->dma_dev.device_prep_dma_memcpy =
> fsl_qdma_prep_memcpy;
> > +	fsl_qdma->dma_dev.device_issue_pending =
> fsl_qdma_issue_pending;
> 
> You claim DMA_SLAVE but no prep_ for that?
> 
[Yuan Yao] It's a mistake. I will remove it.\

> > +
> > +static int __init fsl_qdma_init(void) {
> > +	return platform_driver_register(&fsl_qdma_driver);
> > +}
> > +subsys_initcall(fsl_qdma_init);
> why subsys_init?
> 
[Yuan Yao] For a preventive, some driver base on DMA, QDMA have to initialize earlier than them.
Even now there is no kernel driver base on QDMA, But I still think the subsys_init is better.

> --
> ~Vinod
--
To unsubscribe from this list: send the line "unsubscribe dmaengine" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/Documentation/devicetree/bindings/dma/fsl-qdma.txt b/Documentation/devicetree/bindings/dma/fsl-qdma.txt
new file mode 100644
index 0000000..cdae71c
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/fsl-qdma.txt
@@ -0,0 +1,43 @@ 
+* Freescale queue Direct Memory Access Controller(qDMA) Controller
+
+  The qDMA controller transfers blocks of data between one source and one or more
+destinations. The blocks of data transferred can be represented in memory as contiguous
+or non-contiguous using scatter/gather table(s). Channel virtualization is supported
+through enqueuing of DMA jobs to, or dequeuing DMA jobs from, different work
+queues.
+  Legacy mode is primarily included for software requiring the earlier
+QorIQ DMA programming model. This mode provides a simple programming
+model not utilizing the datapath architecture. In legacy mode, DMA
+operations are directly configured through a set of architectural
+registers per channel.
+
+* qDMA Controller
+Required properties:
+- compatible :
+	- "fsl,ls-qdma" for qDMA used similar to that on LS SoC
+- reg : Specifies base physical address(s) and size of the qDMA registers.
+	The region is qDMA control register's address and size.
+- interrupts : A list of interrupt-specifiers, one for each entry in
+	interrupt-names.
+- interrupt-names : Should contain:
+	"qdma-tx" - the  interrupt
+	"qdma-err" - the error interrupt
+- channels : Number of channels supported by the controller
+
+Optional properties:
+- big-endian: If present registers and hardware scatter/gather descriptors
+	of the qDMA are implemented in big endian mode, otherwise in little
+	mode.
+
+
+Examples:
+
+	qdma: qdma@8390000 {
+		compatible = "fsl,ls-qdma";
+		reg = <0x0 0x8380000 0x0 0x20000>;
+		interrupts = <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-names = "qdma-tx", "qdma-err";
+		big-endian;
+		channels = <1>;
+	};
diff --git a/MAINTAINERS b/MAINTAINERS
index 5772ccf..a4d1b52 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -4357,6 +4357,13 @@  L:	linuxppc-dev@lists.ozlabs.org
 S:	Maintained
 F:	drivers/dma/fsldma.*
 
+FREESCALE qDMA DRIVER
+M:     Yuan Yao <yao.yuan@freescale.com>
+L:     linux-arm-kernel@lists.infradead.org
+S:     Maintained
+F:     Documentation/devicetree/bindings/dma/fsl-qdma.txt
+F:     drivers/dma/fsl-qdma.c
+
 FREESCALE I2C CPM DRIVER
 M:	Jochen Friedrich <jochen@scram.de>
 L:	linuxppc-dev@lists.ozlabs.org
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index b458475..e29e985 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -193,6 +193,16 @@  config FSL_EDMA
 	  multiplexing capability for DMA request sources(slot).
 	  This module can be found on Freescale Vybrid and LS-1 SoCs.
 
+config FSL_QDMA
+	tristate "Freescale qDMA engine support"
+	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
+	help
+	  Support the Freescale qDMA engine with command queue and legacy mode.
+	  Channel virtualization is supported through enqueuing of DMA jobs to,
+	  or dequeuing DMA jobs from, different work queues.
+	  This module can be found on Freescale LS SoCs.
+
 config FSL_RAID
         tristate "Freescale RAID engine Support"
         depends on FSL_SOC && !ASYNC_TX_ENABLE_CHANNEL_SWITCH
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 7711a71..8de7526 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -29,6 +29,7 @@  obj-$(CONFIG_DW_DMAC_CORE) += dw/
 obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
 obj-$(CONFIG_FSL_DMA) += fsldma.o
 obj-$(CONFIG_FSL_EDMA) += fsl-edma.o
+obj-$(CONFIG_FSL_QDMA) += fsl-qdma.o
 obj-$(CONFIG_FSL_RAID) += fsl_raid.o
 obj-$(CONFIG_HSU_DMA) += hsu/
 obj-$(CONFIG_IMG_MDC_DMA) += img-mdc-dma.o
diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c
new file mode 100644
index 0000000..846cdba
--- /dev/null
+++ b/drivers/dma/fsl-qdma.c
@@ -0,0 +1,521 @@ 
+/*
+ * drivers/dma/fsl-qdma.c
+ *
+ * Copyright 2014-2015 Freescale Semiconductor, Inc.
+ *
+ * Driver for the Freescale qDMA engine with legacy mode.
+ * This module can be found on Freescale LS SoCs.
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_dma.h>
+#include <linux/of_irq.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "virt-dma.h"
+
+#define FSL_QDMA_DMR		0x0
+#define FSL_QDMA_DSR_P		0x4
+
+#define FSL_QDMA_DSR_M		0x10004
+#define FSL_QDMA_DLMR		0x10100
+#define FSL_QDMA_DLSR		0x10104
+#define FSL_QDMA_DLSATR		0x10110
+#define FSL_QDMA_DLSAR		0x10114
+#define FSL_QDMA_DLDATR		0x10118
+#define FSL_QDMA_DLDAR		0x1011c
+#define FSL_QDMA_DLBCR		0x10120
+#define FSL_QDMA_DLESAD		0x10148
+#define FSL_QDMA_DLEDAD		0x1014c
+
+#define FSL_QDMA_DLMR_CS	0x1
+#define FSL_QDMA_DLMR_EOSIE	0x200
+#define FSL_QDMA_DLMR_EIE	0x40
+#define FSL_QDMA_DLSR_TE	0x80
+#define FSL_QDMA_DLSR_CH	0x20
+#define FSL_QDMA_DLSR_PE	0x10
+#define FSL_QDMA_DLSR_CB	0x4
+#define FSL_QDMA_DLSR_EOSI	0x2
+
+#define FSL_QDMA_SRTTYPE_R_N	0x40000
+
+struct fsl_qdma_tcd {
+	u64	saddr;
+	u32	nbytes;
+	u64	daddr;
+};
+
+struct fsl_qdma_chan_config {
+	enum dma_transfer_direction	dir;
+	enum dma_slave_buswidth		addr_width;
+	u32				burst;
+	u32				attr;
+};
+
+struct fsl_qdma_desc {
+	struct virt_dma_desc		vdesc;
+	struct fsl_qdma_chan		*qchan;
+	struct fsl_qdma_tcd		tcd;
+};
+
+struct fsl_qdma_chan {
+	struct virt_dma_chan		vchan;
+	struct fsl_qdma_desc		*desc;
+	enum dma_status			status;
+	u32				slave_id;
+	struct fsl_qdma_engine		*qdma;
+};
+
+struct fsl_qdma_engine {
+	struct dma_device	dma_dev;
+	void __iomem		*membase;
+	u32			n_chans;
+	struct mutex            fsl_qdma_mutex;
+	int			controller_irq;
+	int			err_irq;
+	bool			big_endian;
+	struct fsl_qdma_chan	chans[];
+
+};
+
+static u32 qdma_readl(struct fsl_qdma_engine *qdma, void __iomem *addr)
+{
+	if (qdma->big_endian)
+		return ioread32be(addr);
+	else
+		return ioread32(addr);
+}
+
+static void qdma_writel(struct fsl_qdma_engine *qdma, u32 val,
+						void __iomem *addr)
+{
+	if (qdma->big_endian)
+		iowrite32be(val, addr);
+	else
+		iowrite32(val, addr);
+}
+
+static struct fsl_qdma_chan *to_fsl_qdma_chan(struct dma_chan *chan)
+{
+	return container_of(chan, struct fsl_qdma_chan, vchan.chan);
+}
+
+static struct fsl_qdma_desc *to_fsl_qdma_desc(struct virt_dma_desc *vd)
+{
+	return container_of(vd, struct fsl_qdma_desc, vdesc);
+}
+
+static int fsl_qdma_alloc_chan_resources(struct dma_chan *chan)
+{
+	struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
+
+	fsl_chan->desc = NULL;
+	return 0;
+}
+
+static void fsl_qdma_free_chan_resources(struct dma_chan *chan)
+{
+	struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
+	unsigned long flags;
+	LIST_HEAD(head);
+
+	spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
+	vchan_get_all_descriptors(&fsl_chan->vchan, &head);
+	spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+
+	vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
+}
+
+static void fsl_qdma_set_tcd_params(struct fsl_qdma_chan *fsl_chan,
+					u64 src, u64 dst, u32 nbytes)
+{
+	void __iomem *addr = fsl_chan->qdma->membase;
+	u32 reg;
+
+	/*
+	 * Source address.
+	 * Represents address bits 31-0 of a 49-bit source address.
+	 */
+	qdma_writel(fsl_chan->qdma, (u32)src, addr + FSL_QDMA_DLSAR);
+	/*
+	 * Source address.
+	 * Represents address bits 47-32 of a 49-bit source address.
+	 */
+	reg = qdma_readl(fsl_chan->qdma, addr + FSL_QDMA_DLSATR);
+	reg |= (u16)(src >> 32) & 0xffff;
+	reg |= FSL_QDMA_SRTTYPE_R_N;
+	qdma_writel(fsl_chan->qdma, reg, addr + FSL_QDMA_DLSATR);
+	/*
+	 * Source address.
+	 * Represents address bits 48 of a 49-bit source address.
+	 */
+	reg = qdma_readl(fsl_chan->qdma, addr + FSL_QDMA_DLESAD);
+	reg |= (src >> 48) & 0x1;
+	qdma_writel(fsl_chan->qdma, reg, addr + FSL_QDMA_DLESAD);
+
+	/*
+	 * Destination address.
+	 * Represents address bits 31-0 of a 49-bit destination address.
+	 */
+	qdma_writel(fsl_chan->qdma, (u32)dst, addr + FSL_QDMA_DLDAR);
+	/*
+	 * Destination address.
+	 * Represents address bits 47-32 of a 49-bit destination address.
+	 */
+	reg = qdma_readl(fsl_chan->qdma, addr + FSL_QDMA_DLDATR);
+	reg |= (u16)(dst >> 32) & 0xffff;
+	reg |= FSL_QDMA_SRTTYPE_R_N;
+	qdma_writel(fsl_chan->qdma, reg, addr + FSL_QDMA_DLDATR);
+	/*
+	 * Destination address.
+	 * Represents address bits 48 of a 49-bit destination address.
+	 */
+	reg = qdma_readl(fsl_chan->qdma, addr + FSL_QDMA_DLEDAD);
+	reg |= (dst >> 48) & 0x1;
+	qdma_writel(fsl_chan->qdma, reg, addr + FSL_QDMA_DLEDAD);
+
+	/*
+	 * Byte count.
+	 * Contains the number of bytes to transfer.
+	 */
+	qdma_writel(fsl_chan->qdma, nbytes, addr + FSL_QDMA_DLBCR);
+}
+
+static int fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
+{
+	u32 reg;
+
+	reg = qdma_readl(fsl_qdma, fsl_qdma->membase + FSL_QDMA_DLMR);
+	reg |= FSL_QDMA_DLMR_EOSIE;
+	reg |= FSL_QDMA_DLMR_EIE;
+	qdma_writel(fsl_qdma, reg, fsl_qdma->membase + FSL_QDMA_DLMR);
+	return 0;
+}
+
+static void fsl_qdma_enable_request(struct fsl_qdma_chan *fsl_chan)
+{
+	void __iomem *addr = fsl_chan->qdma->membase;
+	u32 reg;
+
+	reg = qdma_readl(fsl_chan->qdma, addr + FSL_QDMA_DLMR);
+
+	reg &= ~FSL_QDMA_DLMR_CS;
+	qdma_writel(fsl_chan->qdma, reg, addr + FSL_QDMA_DLMR);
+
+	reg |= FSL_QDMA_DLMR_CS;
+	qdma_writel(fsl_chan->qdma, reg, addr + FSL_QDMA_DLMR);
+}
+
+static struct fsl_qdma_desc *fsl_qdma_alloc_desc(struct fsl_qdma_chan *fsl_chan)
+{
+	struct fsl_qdma_desc *fsl_desc;
+
+	fsl_desc = kzalloc(sizeof(*fsl_desc), GFP_NOWAIT);
+
+	if (!fsl_desc)
+		return NULL;
+
+	fsl_desc->qchan = fsl_chan;
+
+	return fsl_desc;
+}
+
+static void fsl_qdma_free_desc(struct virt_dma_desc *vdesc)
+{
+	struct fsl_qdma_desc *fsl_desc;
+
+	fsl_desc = to_fsl_qdma_desc(vdesc);
+	kfree(fsl_desc);
+}
+
+static void fsl_qdma_enqueue_desc(struct fsl_qdma_chan *fsl_chan)
+{
+	struct fsl_qdma_tcd *tcd;
+	struct virt_dma_desc *vdesc;
+
+	vdesc = vchan_next_desc(&fsl_chan->vchan);
+	if (!vdesc)
+		return;
+
+	fsl_chan->desc = to_fsl_qdma_desc(vdesc);
+	tcd = &fsl_chan->desc->tcd;
+	fsl_qdma_set_tcd_params(fsl_chan, tcd->saddr, tcd->daddr, tcd->nbytes);
+	fsl_qdma_enable_request(fsl_chan);
+	fsl_chan->status = DMA_IN_PROGRESS;
+}
+
+static void fsl_qdma_issue_pending(struct dma_chan *chan)
+{
+	struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
+	unsigned long flags;
+
+	spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
+
+	if (vchan_issue_pending(&fsl_chan->vchan) && !fsl_chan->desc)
+		fsl_qdma_enqueue_desc(fsl_chan);
+
+	spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+}
+
+static struct dma_async_tx_descriptor *
+fsl_qdma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst,
+		dma_addr_t src, size_t len, unsigned long flags)
+{
+	struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
+	struct fsl_qdma_desc *fsl_desc;
+	struct fsl_qdma_tcd *tcd;
+
+	fsl_desc = fsl_qdma_alloc_desc(fsl_chan);
+	if (!fsl_desc)
+		return NULL;
+
+	tcd = &fsl_desc->tcd;
+	tcd->saddr = (u64)src;
+	tcd->nbytes = (u32)len;
+	tcd->daddr = (u64)dst;
+
+	return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
+}
+
+static enum dma_status fsl_qdma_tx_status(struct dma_chan *chan,
+		dma_cookie_t cookie, struct dma_tx_state *txstate)
+{
+	return dma_cookie_status(chan, cookie, txstate);
+}
+
+static irqreturn_t fsl_qdma_controller_handler(int irq, void *dev_id)
+{
+	struct fsl_qdma_engine *fsl_qdma = dev_id;
+	struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[0];
+	void __iomem *addr = fsl_qdma->membase;
+	u32 reg;
+
+	reg = qdma_readl(fsl_qdma, addr + FSL_QDMA_DLSR);
+	if (!(reg & FSL_QDMA_DLSR_EOSI))
+		return IRQ_NONE;
+
+	/* Don't clean TE and PE bit if they are set. */
+	reg &= ~FSL_QDMA_DLSR_TE & ~FSL_QDMA_DLSR_PE;
+	qdma_writel(fsl_qdma, reg, addr + FSL_QDMA_DLSR);
+
+	spin_lock(&fsl_chan->vchan.lock);
+	list_del(&fsl_chan->desc->vdesc.node);
+	vchan_cookie_complete(&fsl_chan->desc->vdesc);
+	fsl_chan->desc = NULL;
+	fsl_chan->status = DMA_COMPLETE;
+	fsl_qdma_enqueue_desc(fsl_chan);
+	spin_unlock(&fsl_chan->vchan.lock);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t fsl_qdma_controller_handler_err(int irq, void *dev_id)
+{
+	struct fsl_qdma_engine *fsl_qdma = dev_id;
+	u32 reg;
+
+	reg = qdma_readl(fsl_qdma, fsl_qdma->membase + FSL_QDMA_DLSR);
+
+	if (reg & FSL_QDMA_DLSR_TE) {
+		dev_err(fsl_qdma->dma_dev.dev,
+			"Transfer error. Check your address please!\n");
+	}
+
+	if (reg & FSL_QDMA_DLSR_PE) {
+		dev_err(fsl_qdma->dma_dev.dev,
+			"Programming error. Check your setting please!\n");
+	}
+
+	/* Don't clean EOSI bit if it's set. */
+	reg &= ~FSL_QDMA_DLSR_EOSI;
+	qdma_writel(fsl_qdma, reg, fsl_qdma->membase + FSL_QDMA_DLSR);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t fsl_qdma_irq_handler(int irq, void *dev_id)
+{
+	if (fsl_qdma_controller_handler(irq, dev_id) == IRQ_HANDLED)
+		return IRQ_HANDLED;
+
+	return fsl_qdma_controller_handler_err(irq, dev_id);
+}
+
+static int fsl_qdma_irq_init(struct platform_device *pdev,
+					struct fsl_qdma_engine *fsl_qdma)
+{
+	int ret;
+
+	fsl_qdma->controller_irq = platform_get_irq_byname(pdev,
+							"qdma-tx");
+	if (fsl_qdma->controller_irq < 0) {
+		dev_err(&pdev->dev, "Can't get qdma controller irq.\n");
+		return fsl_qdma->controller_irq;
+	}
+
+	fsl_qdma->err_irq = platform_get_irq_byname(pdev,
+							"qdma-err");
+	if (fsl_qdma->err_irq < 0) {
+		dev_err(&pdev->dev, "Can't get qdma err irq.\n");
+		return fsl_qdma->err_irq;
+	}
+
+	if (fsl_qdma->controller_irq == fsl_qdma->err_irq) {
+		ret = devm_request_irq(&pdev->dev, fsl_qdma->controller_irq,
+					fsl_qdma_irq_handler, 0,
+					"qDMA controller", fsl_qdma);
+
+		if (ret) {
+			dev_err(&pdev->dev, "Can't register qDMA IRQ.\n");
+			return  ret;
+		}
+	} else {
+		ret = devm_request_irq(&pdev->dev, fsl_qdma->controller_irq,
+				fsl_qdma_controller_handler, 0,
+				"qDMA controller", fsl_qdma);
+		if (ret) {
+			dev_err(&pdev->dev,
+				"Can't register qDMA controller IRQ.\n");
+			return  ret;
+		}
+
+		ret = devm_request_irq(&pdev->dev, fsl_qdma->err_irq,
+				fsl_qdma_controller_handler_err, 0,
+				"qDMA err", fsl_qdma);
+		if (ret) {
+			dev_err(&pdev->dev, "Can't register qDMA err IRQ.\n");
+			return  ret;
+		}
+	}
+
+	return 0;
+}
+
+static int fsl_qdma_probe(struct platform_device *pdev)
+{
+	struct device_node *np = pdev->dev.of_node;
+	struct fsl_qdma_engine *fsl_qdma;
+	struct fsl_qdma_chan *fsl_chan;
+	struct resource *res;
+	unsigned int len, chans;
+	int ret, i;
+
+	ret = of_property_read_u32(np, "channels", &chans);
+	if (ret) {
+		dev_err(&pdev->dev, "Can't get channels.\n");
+		return ret;
+	}
+
+	len = sizeof(*fsl_qdma) + sizeof(*fsl_chan) * chans;
+	fsl_qdma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
+	if (!fsl_qdma)
+		return -ENOMEM;
+
+	fsl_qdma->n_chans = chans;
+	mutex_init(&fsl_qdma->fsl_qdma_mutex);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	fsl_qdma->membase = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(fsl_qdma->membase))
+		return PTR_ERR(fsl_qdma->membase);
+
+	ret = fsl_qdma_irq_init(pdev, fsl_qdma);
+	if (ret)
+		return ret;
+
+	fsl_qdma->big_endian = of_property_read_bool(np, "big-endian");
+	INIT_LIST_HEAD(&fsl_qdma->dma_dev.channels);
+	for (i = 0; i < fsl_qdma->n_chans; i++) {
+		struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i];
+
+		fsl_chan->qdma = fsl_qdma;
+		fsl_chan->desc = NULL;
+		fsl_chan->vchan.desc_free = fsl_qdma_free_desc;
+		vchan_init(&fsl_chan->vchan, &fsl_qdma->dma_dev);
+	}
+
+	dma_cap_set(DMA_PRIVATE, fsl_qdma->dma_dev.cap_mask);
+	dma_cap_set(DMA_SLAVE, fsl_qdma->dma_dev.cap_mask);
+	dma_cap_set(DMA_MEMCPY, fsl_qdma->dma_dev.cap_mask);
+
+	fsl_qdma->dma_dev.dev = &pdev->dev;
+	fsl_qdma->dma_dev.device_alloc_chan_resources
+		= fsl_qdma_alloc_chan_resources;
+	fsl_qdma->dma_dev.device_free_chan_resources
+		= fsl_qdma_free_chan_resources;
+	fsl_qdma->dma_dev.device_tx_status = fsl_qdma_tx_status;
+	fsl_qdma->dma_dev.device_prep_dma_memcpy = fsl_qdma_prep_memcpy;
+	fsl_qdma->dma_dev.device_issue_pending = fsl_qdma_issue_pending;
+
+	platform_set_drvdata(pdev, fsl_qdma);
+
+	ret = dma_async_device_register(&fsl_qdma->dma_dev);
+	if (ret) {
+		dev_err(&pdev->dev, "Can't register Freescale qDMA engine.\n");
+		return ret;
+	}
+
+	ret = fsl_qdma_reg_init(fsl_qdma);
+	if (ret) {
+		dev_err(&pdev->dev, "Can't Initialize the qDMA engine.\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int fsl_qdma_remove(struct platform_device *pdev)
+{
+	struct device_node *np = pdev->dev.of_node;
+	struct fsl_qdma_engine *fsl_qdma = platform_get_drvdata(pdev);
+
+	of_dma_controller_free(np);
+	dma_async_device_unregister(&fsl_qdma->dma_dev);
+	return 0;
+}
+
+static const struct of_device_id fsl_qdma_dt_ids[] = {
+	{ .compatible = "fsl,ls-qdma", },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, fsl_qdma_dt_ids);
+
+static struct platform_driver fsl_qdma_driver = {
+	.driver		= {
+		.name	= "fsl-qdma",
+		.owner  = THIS_MODULE,
+		.of_match_table = fsl_qdma_dt_ids,
+	},
+	.probe          = fsl_qdma_probe,
+	.remove		= fsl_qdma_remove,
+};
+
+static int __init fsl_qdma_init(void)
+{
+	return platform_driver_register(&fsl_qdma_driver);
+}
+subsys_initcall(fsl_qdma_init);
+
+static void __exit fsl_qdma_exit(void)
+{
+	platform_driver_unregister(&fsl_qdma_driver);
+}
+module_exit(fsl_qdma_exit);
+
+MODULE_ALIAS("platform:fsl-qdma");
+MODULE_DESCRIPTION("Freescale qDMA engine driver");
+MODULE_LICENSE("GPL v2");