diff mbox

ARM:SAMSUNG: Move S3C DMA driver to drivers/dma

Message ID 1307432901-22781-1-git-send-email-alim.akhtar@samsung.com (mailing list archive)
State New, archived
Headers show

Commit Message

Alim Akhtar June 7, 2011, 7:48 a.m. UTC
Signed-off-by: alim.akhtar <alim.akhtar@samsung.com>
---
 arch/arm/configs/exynos4_defconfig |    1 +
 arch/arm/configs/s5p64x0_defconfig |    1 +
 arch/arm/configs/s5pc100_defconfig |    1 +
 arch/arm/configs/s5pv210_defconfig |    1 +
 arch/arm/plat-samsung/Kconfig      |    6 -
 arch/arm/plat-samsung/Makefile     |    2 -
 arch/arm/plat-samsung/s3c-pl330.c  | 1244 ------------------------------------
 drivers/dma/Kconfig                |    8 +
 drivers/dma/Makefile               |    2 +
 drivers/dma/s3c-pl330.c            | 1244 ++++++++++++++++++++++++++++++++++++
 10 files changed, 1258 insertions(+), 1252 deletions(-)
 delete mode 100644 arch/arm/plat-samsung/s3c-pl330.c
 create mode 100644 drivers/dma/s3c-pl330.c

Comments

Kyungmin Park June 7, 2011, 8 a.m. UTC | #1
Hi,

As I know there's are PL330 DMA implementation by
MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");

Doesn't it better to use the generic PL330 instead of Samsung specific
PL330 implementation?

As I remember Jassi has a plan to use generic one?

Thank you,
Kyungmin Park

On Tue, Jun 7, 2011 at 4:48 PM, root <alim.akhtar@samsung.com> wrote:
> Signed-off-by: alim.akhtar <alim.akhtar@samsung.com>
> ---
>  arch/arm/configs/exynos4_defconfig |    1 +
>  arch/arm/configs/s5p64x0_defconfig |    1 +
>  arch/arm/configs/s5pc100_defconfig |    1 +
>  arch/arm/configs/s5pv210_defconfig |    1 +
>  arch/arm/plat-samsung/Kconfig      |    6 -
>  arch/arm/plat-samsung/Makefile     |    2 -
>  arch/arm/plat-samsung/s3c-pl330.c  | 1244 ------------------------------------
>  drivers/dma/Kconfig                |    8 +
>  drivers/dma/Makefile               |    2 +
>  drivers/dma/s3c-pl330.c            | 1244 ++++++++++++++++++++++++++++++++++++
>  10 files changed, 1258 insertions(+), 1252 deletions(-)
>  delete mode 100644 arch/arm/plat-samsung/s3c-pl330.c
>  create mode 100644 drivers/dma/s3c-pl330.c
>
> diff --git a/arch/arm/configs/exynos4_defconfig b/arch/arm/configs/exynos4_defconfig
> index da53ff3..6421074 100644
> --- a/arch/arm/configs/exynos4_defconfig
> +++ b/arch/arm/configs/exynos4_defconfig
> @@ -37,6 +37,7 @@ CONFIG_SERIAL_SAMSUNG=y
>  CONFIG_SERIAL_SAMSUNG_CONSOLE=y
>  CONFIG_HW_RANDOM=y
>  CONFIG_I2C=y
> +CONFIG_DMADEVICES=y
>  # CONFIG_HWMON is not set
>  # CONFIG_MFD_SUPPORT is not set
>  # CONFIG_HID_SUPPORT is not set
> diff --git a/arch/arm/configs/s5p64x0_defconfig b/arch/arm/configs/s5p64x0_defconfig
> index ad6b61b..9340ffc 100644
> --- a/arch/arm/configs/s5p64x0_defconfig
> +++ b/arch/arm/configs/s5p64x0_defconfig
> @@ -31,6 +31,7 @@ CONFIG_SERIAL_8250_NR_UARTS=3
>  CONFIG_SERIAL_SAMSUNG=y
>  CONFIG_SERIAL_SAMSUNG_CONSOLE=y
>  CONFIG_HW_RANDOM=y
> +CONFIG_DMADEVICES=y
>  # CONFIG_HWMON is not set
>  CONFIG_DISPLAY_SUPPORT=y
>  # CONFIG_VGA_CONSOLE is not set
> diff --git a/arch/arm/configs/s5pc100_defconfig b/arch/arm/configs/s5pc100_defconfig
> index 41bafc9..694ef97 100644
> --- a/arch/arm/configs/s5pc100_defconfig
> +++ b/arch/arm/configs/s5pc100_defconfig
> @@ -20,6 +20,7 @@ CONFIG_SERIAL_SAMSUNG_CONSOLE=y
>  CONFIG_HW_RANDOM=y
>  CONFIG_I2C=y
>  CONFIG_I2C_CHARDEV=y
> +CONFIG_DMADEVICES=y
>  # CONFIG_VGA_CONSOLE is not set
>  CONFIG_MMC=y
>  CONFIG_MMC_DEBUG=y
> diff --git a/arch/arm/configs/s5pv210_defconfig b/arch/arm/configs/s5pv210_defconfig
> index fa98990..0013593 100644
> --- a/arch/arm/configs/s5pv210_defconfig
> +++ b/arch/arm/configs/s5pv210_defconfig
> @@ -37,6 +37,7 @@ CONFIG_SERIAL_8250=y
>  CONFIG_SERIAL_SAMSUNG=y
>  CONFIG_SERIAL_SAMSUNG_CONSOLE=y
>  CONFIG_HW_RANDOM=y
> +CONFIG_DMADEVICES=y
>  # CONFIG_HWMON is not set
>  # CONFIG_VGA_CONSOLE is not set
>  # CONFIG_HID_SUPPORT is not set
> diff --git a/arch/arm/plat-samsung/Kconfig b/arch/arm/plat-samsung/Kconfig
> index 4d79519..9607ac4 100644
> --- a/arch/arm/plat-samsung/Kconfig
> +++ b/arch/arm/plat-samsung/Kconfig
> @@ -294,12 +294,6 @@ config S3C_DMA
>        help
>          Internal configuration for S3C DMA core
>
> -config S3C_PL330_DMA
> -       bool
> -       select PL330
> -       help
> -         S3C DMA API Driver for PL330 DMAC.
> -
>  comment "Power management"
>
>  config SAMSUNG_PM_DEBUG
> diff --git a/arch/arm/plat-samsung/Makefile b/arch/arm/plat-samsung/Makefile
> index 53eb15b..895c697 100644
> --- a/arch/arm/plat-samsung/Makefile
> +++ b/arch/arm/plat-samsung/Makefile
> @@ -64,8 +64,6 @@ obj-$(CONFIG_SAMSUNG_DEV_PWM) += dev-pwm.o
>
>  obj-$(CONFIG_S3C_DMA)          += dma.o
>
> -obj-$(CONFIG_S3C_PL330_DMA)    += s3c-pl330.o
> -
>  # PM support
>
>  obj-$(CONFIG_PM)               += pm.o
> diff --git a/arch/arm/plat-samsung/s3c-pl330.c b/arch/arm/plat-samsung/s3c-pl330.c
> deleted file mode 100644
> index f85638c..0000000
> --- a/arch/arm/plat-samsung/s3c-pl330.c
> +++ /dev/null
> @@ -1,1244 +0,0 @@
> -/* linux/arch/arm/plat-samsung/s3c-pl330.c
> - *
> - * Copyright (C) 2010 Samsung Electronics Co. Ltd.
> - *     Jaswinder Singh <jassi.brar@samsung.com>
> - *
> - * This program is free software; you can redistribute it and/or modify
> - * it under the terms of the GNU General Public License as published by
> - * the Free Software Foundation; either version 2 of the License, or
> - * (at your option) any later version.
> - */
> -
> -#include <linux/init.h>
> -#include <linux/module.h>
> -#include <linux/interrupt.h>
> -#include <linux/io.h>
> -#include <linux/slab.h>
> -#include <linux/platform_device.h>
> -#include <linux/clk.h>
> -#include <linux/err.h>
> -
> -#include <asm/hardware/pl330.h>
> -
> -#include <plat/s3c-pl330-pdata.h>
> -
> -/**
> - * struct s3c_pl330_dmac - Logical representation of a PL330 DMAC.
> - * @busy_chan: Number of channels currently busy.
> - * @peri: List of IDs of peripherals this DMAC can work with.
> - * @node: To attach to the global list of DMACs.
> - * @pi: PL330 configuration info for the DMAC.
> - * @kmcache: Pool to quickly allocate xfers for all channels in the dmac.
> - * @clk: Pointer of DMAC operation clock.
> - */
> -struct s3c_pl330_dmac {
> -       unsigned                busy_chan;
> -       enum dma_ch             *peri;
> -       struct list_head        node;
> -       struct pl330_info       *pi;
> -       struct kmem_cache       *kmcache;
> -       struct clk              *clk;
> -};
> -
> -/**
> - * struct s3c_pl330_xfer - A request submitted by S3C DMA clients.
> - * @token: Xfer ID provided by the client.
> - * @node: To attach to the list of xfers on a channel.
> - * @px: Xfer for PL330 core.
> - * @chan: Owner channel of this xfer.
> - */
> -struct s3c_pl330_xfer {
> -       void                    *token;
> -       struct list_head        node;
> -       struct pl330_xfer       px;
> -       struct s3c_pl330_chan   *chan;
> -};
> -
> -/**
> - * struct s3c_pl330_chan - Logical channel to communicate with
> - *     a Physical peripheral.
> - * @pl330_chan_id: Token of a hardware channel thread of PL330 DMAC.
> - *     NULL if the channel is available to be acquired.
> - * @id: ID of the peripheral that this channel can communicate with.
> - * @options: Options specified by the client.
> - * @sdaddr: Address provided via s3c2410_dma_devconfig.
> - * @node: To attach to the global list of channels.
> - * @lrq: Pointer to the last submitted pl330_req to PL330 core.
> - * @xfer_list: To manage list of xfers enqueued.
> - * @req: Two requests to communicate with the PL330 engine.
> - * @callback_fn: Callback function to the client.
> - * @rqcfg: Channel configuration for the xfers.
> - * @xfer_head: Pointer to the xfer to be next executed.
> - * @dmac: Pointer to the DMAC that manages this channel, NULL if the
> - *     channel is available to be acquired.
> - * @client: Client of this channel. NULL if the
> - *     channel is available to be acquired.
> - */
> -struct s3c_pl330_chan {
> -       void                            *pl330_chan_id;
> -       enum dma_ch                     id;
> -       unsigned int                    options;
> -       unsigned long                   sdaddr;
> -       struct list_head                node;
> -       struct pl330_req                *lrq;
> -       struct list_head                xfer_list;
> -       struct pl330_req                req[2];
> -       s3c2410_dma_cbfn_t              callback_fn;
> -       struct pl330_reqcfg             rqcfg;
> -       struct s3c_pl330_xfer           *xfer_head;
> -       struct s3c_pl330_dmac           *dmac;
> -       struct s3c2410_dma_client       *client;
> -};
> -
> -/* All DMACs in the platform */
> -static LIST_HEAD(dmac_list);
> -
> -/* All channels to peripherals in the platform */
> -static LIST_HEAD(chan_list);
> -
> -/*
> - * Since we add resources(DMACs and Channels) to the global pool,
> - * we need to guard access to the resources using a global lock
> - */
> -static DEFINE_SPINLOCK(res_lock);
> -
> -/* Returns the channel with ID 'id' in the chan_list */
> -static struct s3c_pl330_chan *id_to_chan(const enum dma_ch id)
> -{
> -       struct s3c_pl330_chan *ch;
> -
> -       list_for_each_entry(ch, &chan_list, node)
> -               if (ch->id == id)
> -                       return ch;
> -
> -       return NULL;
> -}
> -
> -/* Allocate a new channel with ID 'id' and add to chan_list */
> -static void chan_add(const enum dma_ch id)
> -{
> -       struct s3c_pl330_chan *ch = id_to_chan(id);
> -
> -       /* Return if the channel already exists */
> -       if (ch)
> -               return;
> -
> -       ch = kmalloc(sizeof(*ch), GFP_KERNEL);
> -       /* Return silently to work with other channels */
> -       if (!ch)
> -               return;
> -
> -       ch->id = id;
> -       ch->dmac = NULL;
> -
> -       list_add_tail(&ch->node, &chan_list);
> -}
> -
> -/* If the channel is not yet acquired by any client */
> -static bool chan_free(struct s3c_pl330_chan *ch)
> -{
> -       if (!ch)
> -               return false;
> -
> -       /* Channel points to some DMAC only when it's acquired */
> -       return ch->dmac ? false : true;
> -}
> -
> -/*
> - * Returns 0 is peripheral i/f is invalid or not present on the dmac.
> - * Index + 1, otherwise.
> - */
> -static unsigned iface_of_dmac(struct s3c_pl330_dmac *dmac, enum dma_ch ch_id)
> -{
> -       enum dma_ch *id = dmac->peri;
> -       int i;
> -
> -       /* Discount invalid markers */
> -       if (ch_id == DMACH_MAX)
> -               return 0;
> -
> -       for (i = 0; i < PL330_MAX_PERI; i++)
> -               if (id[i] == ch_id)
> -                       return i + 1;
> -
> -       return 0;
> -}
> -
> -/* If all channel threads of the DMAC are busy */
> -static inline bool dmac_busy(struct s3c_pl330_dmac *dmac)
> -{
> -       struct pl330_info *pi = dmac->pi;
> -
> -       return (dmac->busy_chan < pi->pcfg.num_chan) ? false : true;
> -}
> -
> -/*
> - * Returns the number of free channels that
> - * can be handled by this dmac only.
> - */
> -static unsigned ch_onlyby_dmac(struct s3c_pl330_dmac *dmac)
> -{
> -       enum dma_ch *id = dmac->peri;
> -       struct s3c_pl330_dmac *d;
> -       struct s3c_pl330_chan *ch;
> -       unsigned found, count = 0;
> -       enum dma_ch p;
> -       int i;
> -
> -       for (i = 0; i < PL330_MAX_PERI; i++) {
> -               p = id[i];
> -               ch = id_to_chan(p);
> -
> -               if (p == DMACH_MAX || !chan_free(ch))
> -                       continue;
> -
> -               found = 0;
> -               list_for_each_entry(d, &dmac_list, node) {
> -                       if (d != dmac && iface_of_dmac(d, ch->id)) {
> -                               found = 1;
> -                               break;
> -                       }
> -               }
> -               if (!found)
> -                       count++;
> -       }
> -
> -       return count;
> -}
> -
> -/*
> - * Measure of suitability of 'dmac' handling 'ch'
> - *
> - * 0 indicates 'dmac' can not handle 'ch' either
> - * because it is not supported by the hardware or
> - * because all dmac channels are currently busy.
> - *
> - * >0 vlaue indicates 'dmac' has the capability.
> - * The bigger the value the more suitable the dmac.
> - */
> -#define MAX_SUIT       UINT_MAX
> -#define MIN_SUIT       0
> -
> -static unsigned suitablility(struct s3c_pl330_dmac *dmac,
> -               struct s3c_pl330_chan *ch)
> -{
> -       struct pl330_info *pi = dmac->pi;
> -       enum dma_ch *id = dmac->peri;
> -       struct s3c_pl330_dmac *d;
> -       unsigned s;
> -       int i;
> -
> -       s = MIN_SUIT;
> -       /* If all the DMAC channel threads are busy */
> -       if (dmac_busy(dmac))
> -               return s;
> -
> -       for (i = 0; i < PL330_MAX_PERI; i++)
> -               if (id[i] == ch->id)
> -                       break;
> -
> -       /* If the 'dmac' can't talk to 'ch' */
> -       if (i == PL330_MAX_PERI)
> -               return s;
> -
> -       s = MAX_SUIT;
> -       list_for_each_entry(d, &dmac_list, node) {
> -               /*
> -                * If some other dmac can talk to this
> -                * peri and has some channel free.
> -                */
> -               if (d != dmac && iface_of_dmac(d, ch->id) && !dmac_busy(d)) {
> -                       s = 0;
> -                       break;
> -               }
> -       }
> -       if (s)
> -               return s;
> -
> -       s = 100;
> -
> -       /* Good if free chans are more, bad otherwise */
> -       s += (pi->pcfg.num_chan - dmac->busy_chan) - ch_onlyby_dmac(dmac);
> -
> -       return s;
> -}
> -
> -/* More than one DMAC may have capability to transfer data with the
> - * peripheral. This function assigns most suitable DMAC to manage the
> - * channel and hence communicate with the peripheral.
> - */
> -static struct s3c_pl330_dmac *map_chan_to_dmac(struct s3c_pl330_chan *ch)
> -{
> -       struct s3c_pl330_dmac *d, *dmac = NULL;
> -       unsigned sn, sl = MIN_SUIT;
> -
> -       list_for_each_entry(d, &dmac_list, node) {
> -               sn = suitablility(d, ch);
> -
> -               if (sn == MAX_SUIT)
> -                       return d;
> -
> -               if (sn > sl)
> -                       dmac = d;
> -       }
> -
> -       return dmac;
> -}
> -
> -/* Acquire the channel for peripheral 'id' */
> -static struct s3c_pl330_chan *chan_acquire(const enum dma_ch id)
> -{
> -       struct s3c_pl330_chan *ch = id_to_chan(id);
> -       struct s3c_pl330_dmac *dmac;
> -
> -       /* If the channel doesn't exist or is already acquired */
> -       if (!ch || !chan_free(ch)) {
> -               ch = NULL;
> -               goto acq_exit;
> -       }
> -
> -       dmac = map_chan_to_dmac(ch);
> -       /* If couldn't map */
> -       if (!dmac) {
> -               ch = NULL;
> -               goto acq_exit;
> -       }
> -
> -       dmac->busy_chan++;
> -       ch->dmac = dmac;
> -
> -acq_exit:
> -       return ch;
> -}
> -
> -/* Delete xfer from the queue */
> -static inline void del_from_queue(struct s3c_pl330_xfer *xfer)
> -{
> -       struct s3c_pl330_xfer *t;
> -       struct s3c_pl330_chan *ch;
> -       int found;
> -
> -       if (!xfer)
> -               return;
> -
> -       ch = xfer->chan;
> -
> -       /* Make sure xfer is in the queue */
> -       found = 0;
> -       list_for_each_entry(t, &ch->xfer_list, node)
> -               if (t == xfer) {
> -                       found = 1;
> -                       break;
> -               }
> -
> -       if (!found)
> -               return;
> -
> -       /* If xfer is last entry in the queue */
> -       if (xfer->node.next == &ch->xfer_list)
> -               t = list_entry(ch->xfer_list.next,
> -                               struct s3c_pl330_xfer, node);
> -       else
> -               t = list_entry(xfer->node.next,
> -                               struct s3c_pl330_xfer, node);
> -
> -       /* If there was only one node left */
> -       if (t == xfer)
> -               ch->xfer_head = NULL;
> -       else if (ch->xfer_head == xfer)
> -               ch->xfer_head = t;
> -
> -       list_del(&xfer->node);
> -}
> -
> -/* Provides pointer to the next xfer in the queue.
> - * If CIRCULAR option is set, the list is left intact,
> - * otherwise the xfer is removed from the list.
> - * Forced delete 'pluck' can be set to override the CIRCULAR option.
> - */
> -static struct s3c_pl330_xfer *get_from_queue(struct s3c_pl330_chan *ch,
> -               int pluck)
> -{
> -       struct s3c_pl330_xfer *xfer = ch->xfer_head;
> -
> -       if (!xfer)
> -               return NULL;
> -
> -       /* If xfer is last entry in the queue */
> -       if (xfer->node.next == &ch->xfer_list)
> -               ch->xfer_head = list_entry(ch->xfer_list.next,
> -                                       struct s3c_pl330_xfer, node);
> -       else
> -               ch->xfer_head = list_entry(xfer->node.next,
> -                                       struct s3c_pl330_xfer, node);
> -
> -       if (pluck || !(ch->options & S3C2410_DMAF_CIRCULAR))
> -               del_from_queue(xfer);
> -
> -       return xfer;
> -}
> -
> -static inline void add_to_queue(struct s3c_pl330_chan *ch,
> -               struct s3c_pl330_xfer *xfer, int front)
> -{
> -       struct pl330_xfer *xt;
> -
> -       /* If queue empty */
> -       if (ch->xfer_head == NULL)
> -               ch->xfer_head = xfer;
> -
> -       xt = &ch->xfer_head->px;
> -       /* If the head already submitted (CIRCULAR head) */
> -       if (ch->options & S3C2410_DMAF_CIRCULAR &&
> -               (xt == ch->req[0].x || xt == ch->req[1].x))
> -               ch->xfer_head = xfer;
> -
> -       /* If this is a resubmission, it should go at the head */
> -       if (front) {
> -               ch->xfer_head = xfer;
> -               list_add(&xfer->node, &ch->xfer_list);
> -       } else {
> -               list_add_tail(&xfer->node, &ch->xfer_list);
> -       }
> -}
> -
> -static inline void _finish_off(struct s3c_pl330_xfer *xfer,
> -               enum s3c2410_dma_buffresult res, int ffree)
> -{
> -       struct s3c_pl330_chan *ch;
> -
> -       if (!xfer)
> -               return;
> -
> -       ch = xfer->chan;
> -
> -       /* Do callback */
> -       if (ch->callback_fn)
> -               ch->callback_fn(NULL, xfer->token, xfer->px.bytes, res);
> -
> -       /* Force Free or if buffer is not needed anymore */
> -       if (ffree || !(ch->options & S3C2410_DMAF_CIRCULAR))
> -               kmem_cache_free(ch->dmac->kmcache, xfer);
> -}
> -
> -static inline int s3c_pl330_submit(struct s3c_pl330_chan *ch,
> -               struct pl330_req *r)
> -{
> -       struct s3c_pl330_xfer *xfer;
> -       int ret = 0;
> -
> -       /* If already submitted */
> -       if (r->x)
> -               return 0;
> -
> -       xfer = get_from_queue(ch, 0);
> -       if (xfer) {
> -               r->x = &xfer->px;
> -
> -               /* Use max bandwidth for M<->M xfers */
> -               if (r->rqtype == MEMTOMEM) {
> -                       struct pl330_info *pi = xfer->chan->dmac->pi;
> -                       int burst = 1 << ch->rqcfg.brst_size;
> -                       u32 bytes = r->x->bytes;
> -                       int bl;
> -
> -                       bl = pi->pcfg.data_bus_width / 8;
> -                       bl *= pi->pcfg.data_buf_dep;
> -                       bl /= burst;
> -
> -                       /* src/dst_burst_len can't be more than 16 */
> -                       if (bl > 16)
> -                               bl = 16;
> -
> -                       while (bl > 1) {
> -                               if (!(bytes % (bl * burst)))
> -                                       break;
> -                               bl--;
> -                       }
> -
> -                       ch->rqcfg.brst_len = bl;
> -               } else {
> -                       ch->rqcfg.brst_len = 1;
> -               }
> -
> -               ret = pl330_submit_req(ch->pl330_chan_id, r);
> -
> -               /* If submission was successful */
> -               if (!ret) {
> -                       ch->lrq = r; /* latest submitted req */
> -                       return 0;
> -               }
> -
> -               r->x = NULL;
> -
> -               /* If both of the PL330 ping-pong buffers filled */
> -               if (ret == -EAGAIN) {
> -                       dev_err(ch->dmac->pi->dev, "%s:%d!\n",
> -                               __func__, __LINE__);
> -                       /* Queue back again */
> -                       add_to_queue(ch, xfer, 1);
> -                       ret = 0;
> -               } else {
> -                       dev_err(ch->dmac->pi->dev, "%s:%d!\n",
> -                               __func__, __LINE__);
> -                       _finish_off(xfer, S3C2410_RES_ERR, 0);
> -               }
> -       }
> -
> -       return ret;
> -}
> -
> -static void s3c_pl330_rq(struct s3c_pl330_chan *ch,
> -       struct pl330_req *r, enum pl330_op_err err)
> -{
> -       unsigned long flags;
> -       struct s3c_pl330_xfer *xfer;
> -       struct pl330_xfer *xl = r->x;
> -       enum s3c2410_dma_buffresult res;
> -
> -       spin_lock_irqsave(&res_lock, flags);
> -
> -       r->x = NULL;
> -
> -       s3c_pl330_submit(ch, r);
> -
> -       spin_unlock_irqrestore(&res_lock, flags);
> -
> -       /* Map result to S3C DMA API */
> -       if (err == PL330_ERR_NONE)
> -               res = S3C2410_RES_OK;
> -       else if (err == PL330_ERR_ABORT)
> -               res = S3C2410_RES_ABORT;
> -       else
> -               res = S3C2410_RES_ERR;
> -
> -       /* If last request had some xfer */
> -       if (xl) {
> -               xfer = container_of(xl, struct s3c_pl330_xfer, px);
> -               _finish_off(xfer, res, 0);
> -       } else {
> -               dev_info(ch->dmac->pi->dev, "%s:%d No Xfer?!\n",
> -                       __func__, __LINE__);
> -       }
> -}
> -
> -static void s3c_pl330_rq0(void *token, enum pl330_op_err err)
> -{
> -       struct pl330_req *r = token;
> -       struct s3c_pl330_chan *ch = container_of(r,
> -                                       struct s3c_pl330_chan, req[0]);
> -       s3c_pl330_rq(ch, r, err);
> -}
> -
> -static void s3c_pl330_rq1(void *token, enum pl330_op_err err)
> -{
> -       struct pl330_req *r = token;
> -       struct s3c_pl330_chan *ch = container_of(r,
> -                                       struct s3c_pl330_chan, req[1]);
> -       s3c_pl330_rq(ch, r, err);
> -}
> -
> -/* Release an acquired channel */
> -static void chan_release(struct s3c_pl330_chan *ch)
> -{
> -       struct s3c_pl330_dmac *dmac;
> -
> -       if (chan_free(ch))
> -               return;
> -
> -       dmac = ch->dmac;
> -       ch->dmac = NULL;
> -       dmac->busy_chan--;
> -}
> -
> -int s3c2410_dma_ctrl(enum dma_ch id, enum s3c2410_chan_op op)
> -{
> -       struct s3c_pl330_xfer *xfer;
> -       enum pl330_chan_op pl330op;
> -       struct s3c_pl330_chan *ch;
> -       unsigned long flags;
> -       int idx, ret;
> -
> -       spin_lock_irqsave(&res_lock, flags);
> -
> -       ch = id_to_chan(id);
> -
> -       if (!ch || chan_free(ch)) {
> -               ret = -EINVAL;
> -               goto ctrl_exit;
> -       }
> -
> -       switch (op) {
> -       case S3C2410_DMAOP_START:
> -               /* Make sure both reqs are enqueued */
> -               idx = (ch->lrq == &ch->req[0]) ? 1 : 0;
> -               s3c_pl330_submit(ch, &ch->req[idx]);
> -               s3c_pl330_submit(ch, &ch->req[1 - idx]);
> -               pl330op = PL330_OP_START;
> -               break;
> -
> -       case S3C2410_DMAOP_STOP:
> -               pl330op = PL330_OP_ABORT;
> -               break;
> -
> -       case S3C2410_DMAOP_FLUSH:
> -               pl330op = PL330_OP_FLUSH;
> -               break;
> -
> -       case S3C2410_DMAOP_PAUSE:
> -       case S3C2410_DMAOP_RESUME:
> -       case S3C2410_DMAOP_TIMEOUT:
> -       case S3C2410_DMAOP_STARTED:
> -               spin_unlock_irqrestore(&res_lock, flags);
> -               return 0;
> -
> -       default:
> -               spin_unlock_irqrestore(&res_lock, flags);
> -               return -EINVAL;
> -       }
> -
> -       ret = pl330_chan_ctrl(ch->pl330_chan_id, pl330op);
> -
> -       if (pl330op == PL330_OP_START) {
> -               spin_unlock_irqrestore(&res_lock, flags);
> -               return ret;
> -       }
> -
> -       idx = (ch->lrq == &ch->req[0]) ? 1 : 0;
> -
> -       /* Abort the current xfer */
> -       if (ch->req[idx].x) {
> -               xfer = container_of(ch->req[idx].x,
> -                               struct s3c_pl330_xfer, px);
> -
> -               /* Drop xfer during FLUSH */
> -               if (pl330op == PL330_OP_FLUSH)
> -                       del_from_queue(xfer);
> -
> -               ch->req[idx].x = NULL;
> -
> -               spin_unlock_irqrestore(&res_lock, flags);
> -               _finish_off(xfer, S3C2410_RES_ABORT,
> -                               pl330op == PL330_OP_FLUSH ? 1 : 0);
> -               spin_lock_irqsave(&res_lock, flags);
> -       }
> -
> -       /* Flush the whole queue */
> -       if (pl330op == PL330_OP_FLUSH) {
> -
> -               if (ch->req[1 - idx].x) {
> -                       xfer = container_of(ch->req[1 - idx].x,
> -                                       struct s3c_pl330_xfer, px);
> -
> -                       del_from_queue(xfer);
> -
> -                       ch->req[1 - idx].x = NULL;
> -
> -                       spin_unlock_irqrestore(&res_lock, flags);
> -                       _finish_off(xfer, S3C2410_RES_ABORT, 1);
> -                       spin_lock_irqsave(&res_lock, flags);
> -               }
> -
> -               /* Finish off the remaining in the queue */
> -               xfer = ch->xfer_head;
> -               while (xfer) {
> -
> -                       del_from_queue(xfer);
> -
> -                       spin_unlock_irqrestore(&res_lock, flags);
> -                       _finish_off(xfer, S3C2410_RES_ABORT, 1);
> -                       spin_lock_irqsave(&res_lock, flags);
> -
> -                       xfer = ch->xfer_head;
> -               }
> -       }
> -
> -ctrl_exit:
> -       spin_unlock_irqrestore(&res_lock, flags);
> -
> -       return ret;
> -}
> -EXPORT_SYMBOL(s3c2410_dma_ctrl);
> -
> -int s3c2410_dma_enqueue(enum dma_ch id, void *token,
> -                       dma_addr_t addr, int size)
> -{
> -       struct s3c_pl330_chan *ch;
> -       struct s3c_pl330_xfer *xfer;
> -       unsigned long flags;
> -       int idx, ret = 0;
> -
> -       spin_lock_irqsave(&res_lock, flags);
> -
> -       ch = id_to_chan(id);
> -
> -       /* Error if invalid or free channel */
> -       if (!ch || chan_free(ch)) {
> -               ret = -EINVAL;
> -               goto enq_exit;
> -       }
> -
> -       /* Error if size is unaligned */
> -       if (ch->rqcfg.brst_size && size % (1 << ch->rqcfg.brst_size)) {
> -               ret = -EINVAL;
> -               goto enq_exit;
> -       }
> -
> -       xfer = kmem_cache_alloc(ch->dmac->kmcache, GFP_ATOMIC);
> -       if (!xfer) {
> -               ret = -ENOMEM;
> -               goto enq_exit;
> -       }
> -
> -       xfer->token = token;
> -       xfer->chan = ch;
> -       xfer->px.bytes = size;
> -       xfer->px.next = NULL; /* Single request */
> -
> -       /* For S3C DMA API, direction is always fixed for all xfers */
> -       if (ch->req[0].rqtype == MEMTODEV) {
> -               xfer->px.src_addr = addr;
> -               xfer->px.dst_addr = ch->sdaddr;
> -       } else {
> -               xfer->px.src_addr = ch->sdaddr;
> -               xfer->px.dst_addr = addr;
> -       }
> -
> -       add_to_queue(ch, xfer, 0);
> -
> -       /* Try submitting on either request */
> -       idx = (ch->lrq == &ch->req[0]) ? 1 : 0;
> -
> -       if (!ch->req[idx].x)
> -               s3c_pl330_submit(ch, &ch->req[idx]);
> -       else
> -               s3c_pl330_submit(ch, &ch->req[1 - idx]);
> -
> -       spin_unlock_irqrestore(&res_lock, flags);
> -
> -       if (ch->options & S3C2410_DMAF_AUTOSTART)
> -               s3c2410_dma_ctrl(id, S3C2410_DMAOP_START);
> -
> -       return 0;
> -
> -enq_exit:
> -       spin_unlock_irqrestore(&res_lock, flags);
> -
> -       return ret;
> -}
> -EXPORT_SYMBOL(s3c2410_dma_enqueue);
> -
> -int s3c2410_dma_request(enum dma_ch id,
> -                       struct s3c2410_dma_client *client,
> -                       void *dev)
> -{
> -       struct s3c_pl330_dmac *dmac;
> -       struct s3c_pl330_chan *ch;
> -       unsigned long flags;
> -       int ret = 0;
> -
> -       spin_lock_irqsave(&res_lock, flags);
> -
> -       ch = chan_acquire(id);
> -       if (!ch) {
> -               ret = -EBUSY;
> -               goto req_exit;
> -       }
> -
> -       dmac = ch->dmac;
> -
> -       ch->pl330_chan_id = pl330_request_channel(dmac->pi);
> -       if (!ch->pl330_chan_id) {
> -               chan_release(ch);
> -               ret = -EBUSY;
> -               goto req_exit;
> -       }
> -
> -       ch->client = client;
> -       ch->options = 0; /* Clear any option */
> -       ch->callback_fn = NULL; /* Clear any callback */
> -       ch->lrq = NULL;
> -
> -       ch->rqcfg.brst_size = 2; /* Default word size */
> -       ch->rqcfg.swap = SWAP_NO;
> -       ch->rqcfg.scctl = SCCTRL0; /* Noncacheable and nonbufferable */
> -       ch->rqcfg.dcctl = DCCTRL0; /* Noncacheable and nonbufferable */
> -       ch->rqcfg.privileged = 0;
> -       ch->rqcfg.insnaccess = 0;
> -
> -       /* Set invalid direction */
> -       ch->req[0].rqtype = DEVTODEV;
> -       ch->req[1].rqtype = ch->req[0].rqtype;
> -
> -       ch->req[0].cfg = &ch->rqcfg;
> -       ch->req[1].cfg = ch->req[0].cfg;
> -
> -       ch->req[0].peri = iface_of_dmac(dmac, id) - 1; /* Original index */
> -       ch->req[1].peri = ch->req[0].peri;
> -
> -       ch->req[0].token = &ch->req[0];
> -       ch->req[0].xfer_cb = s3c_pl330_rq0;
> -       ch->req[1].token = &ch->req[1];
> -       ch->req[1].xfer_cb = s3c_pl330_rq1;
> -
> -       ch->req[0].x = NULL;
> -       ch->req[1].x = NULL;
> -
> -       /* Reset xfer list */
> -       INIT_LIST_HEAD(&ch->xfer_list);
> -       ch->xfer_head = NULL;
> -
> -req_exit:
> -       spin_unlock_irqrestore(&res_lock, flags);
> -
> -       return ret;
> -}
> -EXPORT_SYMBOL(s3c2410_dma_request);
> -
> -int s3c2410_dma_free(enum dma_ch id, struct s3c2410_dma_client *client)
> -{
> -       struct s3c_pl330_chan *ch;
> -       struct s3c_pl330_xfer *xfer;
> -       unsigned long flags;
> -       int ret = 0;
> -       unsigned idx;
> -
> -       spin_lock_irqsave(&res_lock, flags);
> -
> -       ch = id_to_chan(id);
> -
> -       if (!ch || chan_free(ch))
> -               goto free_exit;
> -
> -       /* Refuse if someone else wanted to free the channel */
> -       if (ch->client != client) {
> -               ret = -EBUSY;
> -               goto free_exit;
> -       }
> -
> -       /* Stop any active xfer, Flushe the queue and do callbacks */
> -       pl330_chan_ctrl(ch->pl330_chan_id, PL330_OP_FLUSH);
> -
> -       /* Abort the submitted requests */
> -       idx = (ch->lrq == &ch->req[0]) ? 1 : 0;
> -
> -       if (ch->req[idx].x) {
> -               xfer = container_of(ch->req[idx].x,
> -                               struct s3c_pl330_xfer, px);
> -
> -               ch->req[idx].x = NULL;
> -               del_from_queue(xfer);
> -
> -               spin_unlock_irqrestore(&res_lock, flags);
> -               _finish_off(xfer, S3C2410_RES_ABORT, 1);
> -               spin_lock_irqsave(&res_lock, flags);
> -       }
> -
> -       if (ch->req[1 - idx].x) {
> -               xfer = container_of(ch->req[1 - idx].x,
> -                               struct s3c_pl330_xfer, px);
> -
> -               ch->req[1 - idx].x = NULL;
> -               del_from_queue(xfer);
> -
> -               spin_unlock_irqrestore(&res_lock, flags);
> -               _finish_off(xfer, S3C2410_RES_ABORT, 1);
> -               spin_lock_irqsave(&res_lock, flags);
> -       }
> -
> -       /* Pluck and Abort the queued requests in order */
> -       do {
> -               xfer = get_from_queue(ch, 1);
> -
> -               spin_unlock_irqrestore(&res_lock, flags);
> -               _finish_off(xfer, S3C2410_RES_ABORT, 1);
> -               spin_lock_irqsave(&res_lock, flags);
> -       } while (xfer);
> -
> -       ch->client = NULL;
> -
> -       pl330_release_channel(ch->pl330_chan_id);
> -
> -       ch->pl330_chan_id = NULL;
> -
> -       chan_release(ch);
> -
> -free_exit:
> -       spin_unlock_irqrestore(&res_lock, flags);
> -
> -       return ret;
> -}
> -EXPORT_SYMBOL(s3c2410_dma_free);
> -
> -int s3c2410_dma_config(enum dma_ch id, int xferunit)
> -{
> -       struct s3c_pl330_chan *ch;
> -       struct pl330_info *pi;
> -       unsigned long flags;
> -       int i, dbwidth, ret = 0;
> -
> -       spin_lock_irqsave(&res_lock, flags);
> -
> -       ch = id_to_chan(id);
> -
> -       if (!ch || chan_free(ch)) {
> -               ret = -EINVAL;
> -               goto cfg_exit;
> -       }
> -
> -       pi = ch->dmac->pi;
> -       dbwidth = pi->pcfg.data_bus_width / 8;
> -
> -       /* Max size of xfer can be pcfg.data_bus_width */
> -       if (xferunit > dbwidth) {
> -               ret = -EINVAL;
> -               goto cfg_exit;
> -       }
> -
> -       i = 0;
> -       while (xferunit != (1 << i))
> -               i++;
> -
> -       /* If valid value */
> -       if (xferunit == (1 << i))
> -               ch->rqcfg.brst_size = i;
> -       else
> -               ret = -EINVAL;
> -
> -cfg_exit:
> -       spin_unlock_irqrestore(&res_lock, flags);
> -
> -       return ret;
> -}
> -EXPORT_SYMBOL(s3c2410_dma_config);
> -
> -/* Options that are supported by this driver */
> -#define S3C_PL330_FLAGS (S3C2410_DMAF_CIRCULAR | S3C2410_DMAF_AUTOSTART)
> -
> -int s3c2410_dma_setflags(enum dma_ch id, unsigned int options)
> -{
> -       struct s3c_pl330_chan *ch;
> -       unsigned long flags;
> -       int ret = 0;
> -
> -       spin_lock_irqsave(&res_lock, flags);
> -
> -       ch = id_to_chan(id);
> -
> -       if (!ch || chan_free(ch) || options & ~(S3C_PL330_FLAGS))
> -               ret = -EINVAL;
> -       else
> -               ch->options = options;
> -
> -       spin_unlock_irqrestore(&res_lock, flags);
> -
> -       return 0;
> -}
> -EXPORT_SYMBOL(s3c2410_dma_setflags);
> -
> -int s3c2410_dma_set_buffdone_fn(enum dma_ch id, s3c2410_dma_cbfn_t rtn)
> -{
> -       struct s3c_pl330_chan *ch;
> -       unsigned long flags;
> -       int ret = 0;
> -
> -       spin_lock_irqsave(&res_lock, flags);
> -
> -       ch = id_to_chan(id);
> -
> -       if (!ch || chan_free(ch))
> -               ret = -EINVAL;
> -       else
> -               ch->callback_fn = rtn;
> -
> -       spin_unlock_irqrestore(&res_lock, flags);
> -
> -       return ret;
> -}
> -EXPORT_SYMBOL(s3c2410_dma_set_buffdone_fn);
> -
> -int s3c2410_dma_devconfig(enum dma_ch id, enum s3c2410_dmasrc source,
> -                         unsigned long address)
> -{
> -       struct s3c_pl330_chan *ch;
> -       unsigned long flags;
> -       int ret = 0;
> -
> -       spin_lock_irqsave(&res_lock, flags);
> -
> -       ch = id_to_chan(id);
> -
> -       if (!ch || chan_free(ch)) {
> -               ret = -EINVAL;
> -               goto devcfg_exit;
> -       }
> -
> -       switch (source) {
> -       case S3C2410_DMASRC_HW: /* P->M */
> -               ch->req[0].rqtype = DEVTOMEM;
> -               ch->req[1].rqtype = DEVTOMEM;
> -               ch->rqcfg.src_inc = 0;
> -               ch->rqcfg.dst_inc = 1;
> -               break;
> -       case S3C2410_DMASRC_MEM: /* M->P */
> -               ch->req[0].rqtype = MEMTODEV;
> -               ch->req[1].rqtype = MEMTODEV;
> -               ch->rqcfg.src_inc = 1;
> -               ch->rqcfg.dst_inc = 0;
> -               break;
> -       default:
> -               ret = -EINVAL;
> -               goto devcfg_exit;
> -       }
> -
> -       ch->sdaddr = address;
> -
> -devcfg_exit:
> -       spin_unlock_irqrestore(&res_lock, flags);
> -
> -       return ret;
> -}
> -EXPORT_SYMBOL(s3c2410_dma_devconfig);
> -
> -int s3c2410_dma_getposition(enum dma_ch id, dma_addr_t *src, dma_addr_t *dst)
> -{
> -       struct s3c_pl330_chan *ch = id_to_chan(id);
> -       struct pl330_chanstatus status;
> -       int ret;
> -
> -       if (!ch || chan_free(ch))
> -               return -EINVAL;
> -
> -       ret = pl330_chan_status(ch->pl330_chan_id, &status);
> -       if (ret < 0)
> -               return ret;
> -
> -       *src = status.src_addr;
> -       *dst = status.dst_addr;
> -
> -       return 0;
> -}
> -EXPORT_SYMBOL(s3c2410_dma_getposition);
> -
> -static irqreturn_t pl330_irq_handler(int irq, void *data)
> -{
> -       if (pl330_update(data))
> -               return IRQ_HANDLED;
> -       else
> -               return IRQ_NONE;
> -}
> -
> -static int pl330_probe(struct platform_device *pdev)
> -{
> -       struct s3c_pl330_dmac *s3c_pl330_dmac;
> -       struct s3c_pl330_platdata *pl330pd;
> -       struct pl330_info *pl330_info;
> -       struct resource *res;
> -       int i, ret, irq;
> -
> -       pl330pd = pdev->dev.platform_data;
> -
> -       /* Can't do without the list of _32_ peripherals */
> -       if (!pl330pd || !pl330pd->peri) {
> -               dev_err(&pdev->dev, "platform data missing!\n");
> -               return -ENODEV;
> -       }
> -
> -       pl330_info = kzalloc(sizeof(*pl330_info), GFP_KERNEL);
> -       if (!pl330_info)
> -               return -ENOMEM;
> -
> -       pl330_info->pl330_data = NULL;
> -       pl330_info->dev = &pdev->dev;
> -
> -       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
> -       if (!res) {
> -               ret = -ENODEV;
> -               goto probe_err1;
> -       }
> -
> -       request_mem_region(res->start, resource_size(res), pdev->name);
> -
> -       pl330_info->base = ioremap(res->start, resource_size(res));
> -       if (!pl330_info->base) {
> -               ret = -ENXIO;
> -               goto probe_err2;
> -       }
> -
> -       irq = platform_get_irq(pdev, 0);
> -       if (irq < 0) {
> -               ret = irq;
> -               goto probe_err3;
> -       }
> -
> -       ret = request_irq(irq, pl330_irq_handler, 0,
> -                       dev_name(&pdev->dev), pl330_info);
> -       if (ret)
> -               goto probe_err4;
> -
> -       /* Allocate a new DMAC */
> -       s3c_pl330_dmac = kmalloc(sizeof(*s3c_pl330_dmac), GFP_KERNEL);
> -       if (!s3c_pl330_dmac) {
> -               ret = -ENOMEM;
> -               goto probe_err5;
> -       }
> -
> -       /* Get operation clock and enable it */
> -       s3c_pl330_dmac->clk = clk_get(&pdev->dev, "pdma");
> -       if (IS_ERR(s3c_pl330_dmac->clk)) {
> -               dev_err(&pdev->dev, "Cannot get operation clock.\n");
> -               ret = -EINVAL;
> -               goto probe_err6;
> -       }
> -       clk_enable(s3c_pl330_dmac->clk);
> -
> -       ret = pl330_add(pl330_info);
> -       if (ret)
> -               goto probe_err7;
> -
> -       /* Hook the info */
> -       s3c_pl330_dmac->pi = pl330_info;
> -
> -       /* No busy channels */
> -       s3c_pl330_dmac->busy_chan = 0;
> -
> -       s3c_pl330_dmac->kmcache = kmem_cache_create(dev_name(&pdev->dev),
> -                               sizeof(struct s3c_pl330_xfer), 0, 0, NULL);
> -
> -       if (!s3c_pl330_dmac->kmcache) {
> -               ret = -ENOMEM;
> -               goto probe_err8;
> -       }
> -
> -       /* Get the list of peripherals */
> -       s3c_pl330_dmac->peri = pl330pd->peri;
> -
> -       /* Attach to the list of DMACs */
> -       list_add_tail(&s3c_pl330_dmac->node, &dmac_list);
> -
> -       /* Create a channel for each peripheral in the DMAC
> -        * that is, if it doesn't already exist
> -        */
> -       for (i = 0; i < PL330_MAX_PERI; i++)
> -               if (s3c_pl330_dmac->peri[i] != DMACH_MAX)
> -                       chan_add(s3c_pl330_dmac->peri[i]);
> -
> -       printk(KERN_INFO
> -               "Loaded driver for PL330 DMAC-%d %s\n", pdev->id, pdev->name);
> -       printk(KERN_INFO
> -               "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n",
> -               pl330_info->pcfg.data_buf_dep,
> -               pl330_info->pcfg.data_bus_width / 8, pl330_info->pcfg.num_chan,
> -               pl330_info->pcfg.num_peri, pl330_info->pcfg.num_events);
> -
> -       return 0;
> -
> -probe_err8:
> -       pl330_del(pl330_info);
> -probe_err7:
> -       clk_disable(s3c_pl330_dmac->clk);
> -       clk_put(s3c_pl330_dmac->clk);
> -probe_err6:
> -       kfree(s3c_pl330_dmac);
> -probe_err5:
> -       free_irq(irq, pl330_info);
> -probe_err4:
> -probe_err3:
> -       iounmap(pl330_info->base);
> -probe_err2:
> -       release_mem_region(res->start, resource_size(res));
> -probe_err1:
> -       kfree(pl330_info);
> -
> -       return ret;
> -}
> -
> -static int pl330_remove(struct platform_device *pdev)
> -{
> -       struct s3c_pl330_dmac *dmac, *d;
> -       struct s3c_pl330_chan *ch;
> -       unsigned long flags;
> -       int del, found;
> -
> -       if (!pdev->dev.platform_data)
> -               return -EINVAL;
> -
> -       spin_lock_irqsave(&res_lock, flags);
> -
> -       found = 0;
> -       list_for_each_entry(d, &dmac_list, node)
> -               if (d->pi->dev == &pdev->dev) {
> -                       found = 1;
> -                       break;
> -               }
> -
> -       if (!found) {
> -               spin_unlock_irqrestore(&res_lock, flags);
> -               return 0;
> -       }
> -
> -       dmac = d;
> -
> -       /* Remove all Channels that are managed only by this DMAC */
> -       list_for_each_entry(ch, &chan_list, node) {
> -
> -               /* Only channels that are handled by this DMAC */
> -               if (iface_of_dmac(dmac, ch->id))
> -                       del = 1;
> -               else
> -                       continue;
> -
> -               /* Don't remove if some other DMAC has it too */
> -               list_for_each_entry(d, &dmac_list, node)
> -                       if (d != dmac && iface_of_dmac(d, ch->id)) {
> -                               del = 0;
> -                               break;
> -                       }
> -
> -               if (del) {
> -                       spin_unlock_irqrestore(&res_lock, flags);
> -                       s3c2410_dma_free(ch->id, ch->client);
> -                       spin_lock_irqsave(&res_lock, flags);
> -                       list_del(&ch->node);
> -                       kfree(ch);
> -               }
> -       }
> -
> -       /* Disable operation clock */
> -       clk_disable(dmac->clk);
> -       clk_put(dmac->clk);
> -
> -       /* Remove the DMAC */
> -       list_del(&dmac->node);
> -       kfree(dmac);
> -
> -       spin_unlock_irqrestore(&res_lock, flags);
> -
> -       return 0;
> -}
> -
> -static struct platform_driver pl330_driver = {
> -       .driver         = {
> -               .owner  = THIS_MODULE,
> -               .name   = "s3c-pl330",
> -       },
> -       .probe          = pl330_probe,
> -       .remove         = pl330_remove,
> -};
> -
> -static int __init pl330_init(void)
> -{
> -       return platform_driver_register(&pl330_driver);
> -}
> -module_init(pl330_init);
> -
> -static void __exit pl330_exit(void)
> -{
> -       platform_driver_unregister(&pl330_driver);
> -       return;
> -}
> -module_exit(pl330_exit);
> -
> -MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
> -MODULE_DESCRIPTION("Driver for PL330 DMA Controller");
> -MODULE_LICENSE("GPL");
> diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
> index 25cf327..9a023e6 100644
> --- a/drivers/dma/Kconfig
> +++ b/drivers/dma/Kconfig
> @@ -199,6 +199,14 @@ config PL330_DMA
>          You need to provide platform specific settings via
>          platform_data for a dma-pl330 device.
>
> +config S3C_PL330_DMA
> +       bool "S3C DMA API Driver for PL330 DMAC"
> +       select DMA_ENGINE
> +       select PL330
> +       depends on PLAT_SAMSUNG
> +       help
> +         S3C DMA API Driver for PL330 DMAC.
> +
>  config PCH_DMA
>        tristate "Intel EG20T PCH / OKI Semi IOH(ML7213/ML7223) DMA support"
>        depends on PCI && X86
> diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
> index 836095a..6e81b5d 100644
> --- a/drivers/dma/Makefile
> +++ b/drivers/dma/Makefile
> @@ -25,3 +25,4 @@ obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
>  obj-$(CONFIG_PL330_DMA) += pl330.o
>  obj-$(CONFIG_PCH_DMA) += pch_dma.o
>  obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o
> +obj-$(CONFIG_S3C_PL330_DMA)    += s3c-pl330.o
> diff --git a/drivers/dma/s3c-pl330.c b/drivers/dma/s3c-pl330.c
> new file mode 100644
> index 0000000..f85638c
> --- /dev/null
> +++ b/drivers/dma/s3c-pl330.c
> @@ -0,0 +1,1244 @@
> +/* linux/arch/arm/plat-samsung/s3c-pl330.c
> + *
> + * Copyright (C) 2010 Samsung Electronics Co. Ltd.
> + *     Jaswinder Singh <jassi.brar@samsung.com>
> + *
> + * This program is free software; you can redistribute it and/or modify
> + * it under the terms of the GNU General Public License as published by
> + * the Free Software Foundation; either version 2 of the License, or
> + * (at your option) any later version.
> + */
> +
> +#include <linux/init.h>
> +#include <linux/module.h>
> +#include <linux/interrupt.h>
> +#include <linux/io.h>
> +#include <linux/slab.h>
> +#include <linux/platform_device.h>
> +#include <linux/clk.h>
> +#include <linux/err.h>
> +
> +#include <asm/hardware/pl330.h>
> +
> +#include <plat/s3c-pl330-pdata.h>
> +
> +/**
> + * struct s3c_pl330_dmac - Logical representation of a PL330 DMAC.
> + * @busy_chan: Number of channels currently busy.
> + * @peri: List of IDs of peripherals this DMAC can work with.
> + * @node: To attach to the global list of DMACs.
> + * @pi: PL330 configuration info for the DMAC.
> + * @kmcache: Pool to quickly allocate xfers for all channels in the dmac.
> + * @clk: Pointer of DMAC operation clock.
> + */
> +struct s3c_pl330_dmac {
> +       unsigned                busy_chan;
> +       enum dma_ch             *peri;
> +       struct list_head        node;
> +       struct pl330_info       *pi;
> +       struct kmem_cache       *kmcache;
> +       struct clk              *clk;
> +};
> +
> +/**
> + * struct s3c_pl330_xfer - A request submitted by S3C DMA clients.
> + * @token: Xfer ID provided by the client.
> + * @node: To attach to the list of xfers on a channel.
> + * @px: Xfer for PL330 core.
> + * @chan: Owner channel of this xfer.
> + */
> +struct s3c_pl330_xfer {
> +       void                    *token;
> +       struct list_head        node;
> +       struct pl330_xfer       px;
> +       struct s3c_pl330_chan   *chan;
> +};
> +
> +/**
> + * struct s3c_pl330_chan - Logical channel to communicate with
> + *     a Physical peripheral.
> + * @pl330_chan_id: Token of a hardware channel thread of PL330 DMAC.
> + *     NULL if the channel is available to be acquired.
> + * @id: ID of the peripheral that this channel can communicate with.
> + * @options: Options specified by the client.
> + * @sdaddr: Address provided via s3c2410_dma_devconfig.
> + * @node: To attach to the global list of channels.
> + * @lrq: Pointer to the last submitted pl330_req to PL330 core.
> + * @xfer_list: To manage list of xfers enqueued.
> + * @req: Two requests to communicate with the PL330 engine.
> + * @callback_fn: Callback function to the client.
> + * @rqcfg: Channel configuration for the xfers.
> + * @xfer_head: Pointer to the xfer to be next executed.
> + * @dmac: Pointer to the DMAC that manages this channel, NULL if the
> + *     channel is available to be acquired.
> + * @client: Client of this channel. NULL if the
> + *     channel is available to be acquired.
> + */
> +struct s3c_pl330_chan {
> +       void                            *pl330_chan_id;
> +       enum dma_ch                     id;
> +       unsigned int                    options;
> +       unsigned long                   sdaddr;
> +       struct list_head                node;
> +       struct pl330_req                *lrq;
> +       struct list_head                xfer_list;
> +       struct pl330_req                req[2];
> +       s3c2410_dma_cbfn_t              callback_fn;
> +       struct pl330_reqcfg             rqcfg;
> +       struct s3c_pl330_xfer           *xfer_head;
> +       struct s3c_pl330_dmac           *dmac;
> +       struct s3c2410_dma_client       *client;
> +};
> +
> +/* All DMACs in the platform */
> +static LIST_HEAD(dmac_list);
> +
> +/* All channels to peripherals in the platform */
> +static LIST_HEAD(chan_list);
> +
> +/*
> + * Since we add resources(DMACs and Channels) to the global pool,
> + * we need to guard access to the resources using a global lock
> + */
> +static DEFINE_SPINLOCK(res_lock);
> +
> +/* Returns the channel with ID 'id' in the chan_list */
> +static struct s3c_pl330_chan *id_to_chan(const enum dma_ch id)
> +{
> +       struct s3c_pl330_chan *ch;
> +
> +       list_for_each_entry(ch, &chan_list, node)
> +               if (ch->id == id)
> +                       return ch;
> +
> +       return NULL;
> +}
> +
> +/* Allocate a new channel with ID 'id' and add to chan_list */
> +static void chan_add(const enum dma_ch id)
> +{
> +       struct s3c_pl330_chan *ch = id_to_chan(id);
> +
> +       /* Return if the channel already exists */
> +       if (ch)
> +               return;
> +
> +       ch = kmalloc(sizeof(*ch), GFP_KERNEL);
> +       /* Return silently to work with other channels */
> +       if (!ch)
> +               return;
> +
> +       ch->id = id;
> +       ch->dmac = NULL;
> +
> +       list_add_tail(&ch->node, &chan_list);
> +}
> +
> +/* If the channel is not yet acquired by any client */
> +static bool chan_free(struct s3c_pl330_chan *ch)
> +{
> +       if (!ch)
> +               return false;
> +
> +       /* Channel points to some DMAC only when it's acquired */
> +       return ch->dmac ? false : true;
> +}
> +
> +/*
> + * Returns 0 is peripheral i/f is invalid or not present on the dmac.
> + * Index + 1, otherwise.
> + */
> +static unsigned iface_of_dmac(struct s3c_pl330_dmac *dmac, enum dma_ch ch_id)
> +{
> +       enum dma_ch *id = dmac->peri;
> +       int i;
> +
> +       /* Discount invalid markers */
> +       if (ch_id == DMACH_MAX)
> +               return 0;
> +
> +       for (i = 0; i < PL330_MAX_PERI; i++)
> +               if (id[i] == ch_id)
> +                       return i + 1;
> +
> +       return 0;
> +}
> +
> +/* If all channel threads of the DMAC are busy */
> +static inline bool dmac_busy(struct s3c_pl330_dmac *dmac)
> +{
> +       struct pl330_info *pi = dmac->pi;
> +
> +       return (dmac->busy_chan < pi->pcfg.num_chan) ? false : true;
> +}
> +
> +/*
> + * Returns the number of free channels that
> + * can be handled by this dmac only.
> + */
> +static unsigned ch_onlyby_dmac(struct s3c_pl330_dmac *dmac)
> +{
> +       enum dma_ch *id = dmac->peri;
> +       struct s3c_pl330_dmac *d;
> +       struct s3c_pl330_chan *ch;
> +       unsigned found, count = 0;
> +       enum dma_ch p;
> +       int i;
> +
> +       for (i = 0; i < PL330_MAX_PERI; i++) {
> +               p = id[i];
> +               ch = id_to_chan(p);
> +
> +               if (p == DMACH_MAX || !chan_free(ch))
> +                       continue;
> +
> +               found = 0;
> +               list_for_each_entry(d, &dmac_list, node) {
> +                       if (d != dmac && iface_of_dmac(d, ch->id)) {
> +                               found = 1;
> +                               break;
> +                       }
> +               }
> +               if (!found)
> +                       count++;
> +       }
> +
> +       return count;
> +}
> +
> +/*
> + * Measure of suitability of 'dmac' handling 'ch'
> + *
> + * 0 indicates 'dmac' can not handle 'ch' either
> + * because it is not supported by the hardware or
> + * because all dmac channels are currently busy.
> + *
> + * >0 vlaue indicates 'dmac' has the capability.
> + * The bigger the value the more suitable the dmac.
> + */
> +#define MAX_SUIT       UINT_MAX
> +#define MIN_SUIT       0
> +
> +static unsigned suitablility(struct s3c_pl330_dmac *dmac,
> +               struct s3c_pl330_chan *ch)
> +{
> +       struct pl330_info *pi = dmac->pi;
> +       enum dma_ch *id = dmac->peri;
> +       struct s3c_pl330_dmac *d;
> +       unsigned s;
> +       int i;
> +
> +       s = MIN_SUIT;
> +       /* If all the DMAC channel threads are busy */
> +       if (dmac_busy(dmac))
> +               return s;
> +
> +       for (i = 0; i < PL330_MAX_PERI; i++)
> +               if (id[i] == ch->id)
> +                       break;
> +
> +       /* If the 'dmac' can't talk to 'ch' */
> +       if (i == PL330_MAX_PERI)
> +               return s;
> +
> +       s = MAX_SUIT;
> +       list_for_each_entry(d, &dmac_list, node) {
> +               /*
> +                * If some other dmac can talk to this
> +                * peri and has some channel free.
> +                */
> +               if (d != dmac && iface_of_dmac(d, ch->id) && !dmac_busy(d)) {
> +                       s = 0;
> +                       break;
> +               }
> +       }
> +       if (s)
> +               return s;
> +
> +       s = 100;
> +
> +       /* Good if free chans are more, bad otherwise */
> +       s += (pi->pcfg.num_chan - dmac->busy_chan) - ch_onlyby_dmac(dmac);
> +
> +       return s;
> +}
> +
> +/* More than one DMAC may have capability to transfer data with the
> + * peripheral. This function assigns most suitable DMAC to manage the
> + * channel and hence communicate with the peripheral.
> + */
> +static struct s3c_pl330_dmac *map_chan_to_dmac(struct s3c_pl330_chan *ch)
> +{
> +       struct s3c_pl330_dmac *d, *dmac = NULL;
> +       unsigned sn, sl = MIN_SUIT;
> +
> +       list_for_each_entry(d, &dmac_list, node) {
> +               sn = suitablility(d, ch);
> +
> +               if (sn == MAX_SUIT)
> +                       return d;
> +
> +               if (sn > sl)
> +                       dmac = d;
> +       }
> +
> +       return dmac;
> +}
> +
> +/* Acquire the channel for peripheral 'id' */
> +static struct s3c_pl330_chan *chan_acquire(const enum dma_ch id)
> +{
> +       struct s3c_pl330_chan *ch = id_to_chan(id);
> +       struct s3c_pl330_dmac *dmac;
> +
> +       /* If the channel doesn't exist or is already acquired */
> +       if (!ch || !chan_free(ch)) {
> +               ch = NULL;
> +               goto acq_exit;
> +       }
> +
> +       dmac = map_chan_to_dmac(ch);
> +       /* If couldn't map */
> +       if (!dmac) {
> +               ch = NULL;
> +               goto acq_exit;
> +       }
> +
> +       dmac->busy_chan++;
> +       ch->dmac = dmac;
> +
> +acq_exit:
> +       return ch;
> +}
> +
> +/* Delete xfer from the queue */
> +static inline void del_from_queue(struct s3c_pl330_xfer *xfer)
> +{
> +       struct s3c_pl330_xfer *t;
> +       struct s3c_pl330_chan *ch;
> +       int found;
> +
> +       if (!xfer)
> +               return;
> +
> +       ch = xfer->chan;
> +
> +       /* Make sure xfer is in the queue */
> +       found = 0;
> +       list_for_each_entry(t, &ch->xfer_list, node)
> +               if (t == xfer) {
> +                       found = 1;
> +                       break;
> +               }
> +
> +       if (!found)
> +               return;
> +
> +       /* If xfer is last entry in the queue */
> +       if (xfer->node.next == &ch->xfer_list)
> +               t = list_entry(ch->xfer_list.next,
> +                               struct s3c_pl330_xfer, node);
> +       else
> +               t = list_entry(xfer->node.next,
> +                               struct s3c_pl330_xfer, node);
> +
> +       /* If there was only one node left */
> +       if (t == xfer)
> +               ch->xfer_head = NULL;
> +       else if (ch->xfer_head == xfer)
> +               ch->xfer_head = t;
> +
> +       list_del(&xfer->node);
> +}
> +
> +/* Provides pointer to the next xfer in the queue.
> + * If CIRCULAR option is set, the list is left intact,
> + * otherwise the xfer is removed from the list.
> + * Forced delete 'pluck' can be set to override the CIRCULAR option.
> + */
> +static struct s3c_pl330_xfer *get_from_queue(struct s3c_pl330_chan *ch,
> +               int pluck)
> +{
> +       struct s3c_pl330_xfer *xfer = ch->xfer_head;
> +
> +       if (!xfer)
> +               return NULL;
> +
> +       /* If xfer is last entry in the queue */
> +       if (xfer->node.next == &ch->xfer_list)
> +               ch->xfer_head = list_entry(ch->xfer_list.next,
> +                                       struct s3c_pl330_xfer, node);
> +       else
> +               ch->xfer_head = list_entry(xfer->node.next,
> +                                       struct s3c_pl330_xfer, node);
> +
> +       if (pluck || !(ch->options & S3C2410_DMAF_CIRCULAR))
> +               del_from_queue(xfer);
> +
> +       return xfer;
> +}
> +
> +static inline void add_to_queue(struct s3c_pl330_chan *ch,
> +               struct s3c_pl330_xfer *xfer, int front)
> +{
> +       struct pl330_xfer *xt;
> +
> +       /* If queue empty */
> +       if (ch->xfer_head == NULL)
> +               ch->xfer_head = xfer;
> +
> +       xt = &ch->xfer_head->px;
> +       /* If the head already submitted (CIRCULAR head) */
> +       if (ch->options & S3C2410_DMAF_CIRCULAR &&
> +               (xt == ch->req[0].x || xt == ch->req[1].x))
> +               ch->xfer_head = xfer;
> +
> +       /* If this is a resubmission, it should go at the head */
> +       if (front) {
> +               ch->xfer_head = xfer;
> +               list_add(&xfer->node, &ch->xfer_list);
> +       } else {
> +               list_add_tail(&xfer->node, &ch->xfer_list);
> +       }
> +}
> +
> +static inline void _finish_off(struct s3c_pl330_xfer *xfer,
> +               enum s3c2410_dma_buffresult res, int ffree)
> +{
> +       struct s3c_pl330_chan *ch;
> +
> +       if (!xfer)
> +               return;
> +
> +       ch = xfer->chan;
> +
> +       /* Do callback */
> +       if (ch->callback_fn)
> +               ch->callback_fn(NULL, xfer->token, xfer->px.bytes, res);
> +
> +       /* Force Free or if buffer is not needed anymore */
> +       if (ffree || !(ch->options & S3C2410_DMAF_CIRCULAR))
> +               kmem_cache_free(ch->dmac->kmcache, xfer);
> +}
> +
> +static inline int s3c_pl330_submit(struct s3c_pl330_chan *ch,
> +               struct pl330_req *r)
> +{
> +       struct s3c_pl330_xfer *xfer;
> +       int ret = 0;
> +
> +       /* If already submitted */
> +       if (r->x)
> +               return 0;
> +
> +       xfer = get_from_queue(ch, 0);
> +       if (xfer) {
> +               r->x = &xfer->px;
> +
> +               /* Use max bandwidth for M<->M xfers */
> +               if (r->rqtype == MEMTOMEM) {
> +                       struct pl330_info *pi = xfer->chan->dmac->pi;
> +                       int burst = 1 << ch->rqcfg.brst_size;
> +                       u32 bytes = r->x->bytes;
> +                       int bl;
> +
> +                       bl = pi->pcfg.data_bus_width / 8;
> +                       bl *= pi->pcfg.data_buf_dep;
> +                       bl /= burst;
> +
> +                       /* src/dst_burst_len can't be more than 16 */
> +                       if (bl > 16)
> +                               bl = 16;
> +
> +                       while (bl > 1) {
> +                               if (!(bytes % (bl * burst)))
> +                                       break;
> +                               bl--;
> +                       }
> +
> +                       ch->rqcfg.brst_len = bl;
> +               } else {
> +                       ch->rqcfg.brst_len = 1;
> +               }
> +
> +               ret = pl330_submit_req(ch->pl330_chan_id, r);
> +
> +               /* If submission was successful */
> +               if (!ret) {
> +                       ch->lrq = r; /* latest submitted req */
> +                       return 0;
> +               }
> +
> +               r->x = NULL;
> +
> +               /* If both of the PL330 ping-pong buffers filled */
> +               if (ret == -EAGAIN) {
> +                       dev_err(ch->dmac->pi->dev, "%s:%d!\n",
> +                               __func__, __LINE__);
> +                       /* Queue back again */
> +                       add_to_queue(ch, xfer, 1);
> +                       ret = 0;
> +               } else {
> +                       dev_err(ch->dmac->pi->dev, "%s:%d!\n",
> +                               __func__, __LINE__);
> +                       _finish_off(xfer, S3C2410_RES_ERR, 0);
> +               }
> +       }
> +
> +       return ret;
> +}
> +
> +static void s3c_pl330_rq(struct s3c_pl330_chan *ch,
> +       struct pl330_req *r, enum pl330_op_err err)
> +{
> +       unsigned long flags;
> +       struct s3c_pl330_xfer *xfer;
> +       struct pl330_xfer *xl = r->x;
> +       enum s3c2410_dma_buffresult res;
> +
> +       spin_lock_irqsave(&res_lock, flags);
> +
> +       r->x = NULL;
> +
> +       s3c_pl330_submit(ch, r);
> +
> +       spin_unlock_irqrestore(&res_lock, flags);
> +
> +       /* Map result to S3C DMA API */
> +       if (err == PL330_ERR_NONE)
> +               res = S3C2410_RES_OK;
> +       else if (err == PL330_ERR_ABORT)
> +               res = S3C2410_RES_ABORT;
> +       else
> +               res = S3C2410_RES_ERR;
> +
> +       /* If last request had some xfer */
> +       if (xl) {
> +               xfer = container_of(xl, struct s3c_pl330_xfer, px);
> +               _finish_off(xfer, res, 0);
> +       } else {
> +               dev_info(ch->dmac->pi->dev, "%s:%d No Xfer?!\n",
> +                       __func__, __LINE__);
> +       }
> +}
> +
> +static void s3c_pl330_rq0(void *token, enum pl330_op_err err)
> +{
> +       struct pl330_req *r = token;
> +       struct s3c_pl330_chan *ch = container_of(r,
> +                                       struct s3c_pl330_chan, req[0]);
> +       s3c_pl330_rq(ch, r, err);
> +}
> +
> +static void s3c_pl330_rq1(void *token, enum pl330_op_err err)
> +{
> +       struct pl330_req *r = token;
> +       struct s3c_pl330_chan *ch = container_of(r,
> +                                       struct s3c_pl330_chan, req[1]);
> +       s3c_pl330_rq(ch, r, err);
> +}
> +
> +/* Release an acquired channel */
> +static void chan_release(struct s3c_pl330_chan *ch)
> +{
> +       struct s3c_pl330_dmac *dmac;
> +
> +       if (chan_free(ch))
> +               return;
> +
> +       dmac = ch->dmac;
> +       ch->dmac = NULL;
> +       dmac->busy_chan--;
> +}
> +
> +int s3c2410_dma_ctrl(enum dma_ch id, enum s3c2410_chan_op op)
> +{
> +       struct s3c_pl330_xfer *xfer;
> +       enum pl330_chan_op pl330op;
> +       struct s3c_pl330_chan *ch;
> +       unsigned long flags;
> +       int idx, ret;
> +
> +       spin_lock_irqsave(&res_lock, flags);
> +
> +       ch = id_to_chan(id);
> +
> +       if (!ch || chan_free(ch)) {
> +               ret = -EINVAL;
> +               goto ctrl_exit;
> +       }
> +
> +       switch (op) {
> +       case S3C2410_DMAOP_START:
> +               /* Make sure both reqs are enqueued */
> +               idx = (ch->lrq == &ch->req[0]) ? 1 : 0;
> +               s3c_pl330_submit(ch, &ch->req[idx]);
> +               s3c_pl330_submit(ch, &ch->req[1 - idx]);
> +               pl330op = PL330_OP_START;
> +               break;
> +
> +       case S3C2410_DMAOP_STOP:
> +               pl330op = PL330_OP_ABORT;
> +               break;
> +
> +       case S3C2410_DMAOP_FLUSH:
> +               pl330op = PL330_OP_FLUSH;
> +               break;
> +
> +       case S3C2410_DMAOP_PAUSE:
> +       case S3C2410_DMAOP_RESUME:
> +       case S3C2410_DMAOP_TIMEOUT:
> +       case S3C2410_DMAOP_STARTED:
> +               spin_unlock_irqrestore(&res_lock, flags);
> +               return 0;
> +
> +       default:
> +               spin_unlock_irqrestore(&res_lock, flags);
> +               return -EINVAL;
> +       }
> +
> +       ret = pl330_chan_ctrl(ch->pl330_chan_id, pl330op);
> +
> +       if (pl330op == PL330_OP_START) {
> +               spin_unlock_irqrestore(&res_lock, flags);
> +               return ret;
> +       }
> +
> +       idx = (ch->lrq == &ch->req[0]) ? 1 : 0;
> +
> +       /* Abort the current xfer */
> +       if (ch->req[idx].x) {
> +               xfer = container_of(ch->req[idx].x,
> +                               struct s3c_pl330_xfer, px);
> +
> +               /* Drop xfer during FLUSH */
> +               if (pl330op == PL330_OP_FLUSH)
> +                       del_from_queue(xfer);
> +
> +               ch->req[idx].x = NULL;
> +
> +               spin_unlock_irqrestore(&res_lock, flags);
> +               _finish_off(xfer, S3C2410_RES_ABORT,
> +                               pl330op == PL330_OP_FLUSH ? 1 : 0);
> +               spin_lock_irqsave(&res_lock, flags);
> +       }
> +
> +       /* Flush the whole queue */
> +       if (pl330op == PL330_OP_FLUSH) {
> +
> +               if (ch->req[1 - idx].x) {
> +                       xfer = container_of(ch->req[1 - idx].x,
> +                                       struct s3c_pl330_xfer, px);
> +
> +                       del_from_queue(xfer);
> +
> +                       ch->req[1 - idx].x = NULL;
> +
> +                       spin_unlock_irqrestore(&res_lock, flags);
> +                       _finish_off(xfer, S3C2410_RES_ABORT, 1);
> +                       spin_lock_irqsave(&res_lock, flags);
> +               }
> +
> +               /* Finish off the remaining in the queue */
> +               xfer = ch->xfer_head;
> +               while (xfer) {
> +
> +                       del_from_queue(xfer);
> +
> +                       spin_unlock_irqrestore(&res_lock, flags);
> +                       _finish_off(xfer, S3C2410_RES_ABORT, 1);
> +                       spin_lock_irqsave(&res_lock, flags);
> +
> +                       xfer = ch->xfer_head;
> +               }
> +       }
> +
> +ctrl_exit:
> +       spin_unlock_irqrestore(&res_lock, flags);
> +
> +       return ret;
> +}
> +EXPORT_SYMBOL(s3c2410_dma_ctrl);
> +
> +int s3c2410_dma_enqueue(enum dma_ch id, void *token,
> +                       dma_addr_t addr, int size)
> +{
> +       struct s3c_pl330_chan *ch;
> +       struct s3c_pl330_xfer *xfer;
> +       unsigned long flags;
> +       int idx, ret = 0;
> +
> +       spin_lock_irqsave(&res_lock, flags);
> +
> +       ch = id_to_chan(id);
> +
> +       /* Error if invalid or free channel */
> +       if (!ch || chan_free(ch)) {
> +               ret = -EINVAL;
> +               goto enq_exit;
> +       }
> +
> +       /* Error if size is unaligned */
> +       if (ch->rqcfg.brst_size && size % (1 << ch->rqcfg.brst_size)) {
> +               ret = -EINVAL;
> +               goto enq_exit;
> +       }
> +
> +       xfer = kmem_cache_alloc(ch->dmac->kmcache, GFP_ATOMIC);
> +       if (!xfer) {
> +               ret = -ENOMEM;
> +               goto enq_exit;
> +       }
> +
> +       xfer->token = token;
> +       xfer->chan = ch;
> +       xfer->px.bytes = size;
> +       xfer->px.next = NULL; /* Single request */
> +
> +       /* For S3C DMA API, direction is always fixed for all xfers */
> +       if (ch->req[0].rqtype == MEMTODEV) {
> +               xfer->px.src_addr = addr;
> +               xfer->px.dst_addr = ch->sdaddr;
> +       } else {
> +               xfer->px.src_addr = ch->sdaddr;
> +               xfer->px.dst_addr = addr;
> +       }
> +
> +       add_to_queue(ch, xfer, 0);
> +
> +       /* Try submitting on either request */
> +       idx = (ch->lrq == &ch->req[0]) ? 1 : 0;
> +
> +       if (!ch->req[idx].x)
> +               s3c_pl330_submit(ch, &ch->req[idx]);
> +       else
> +               s3c_pl330_submit(ch, &ch->req[1 - idx]);
> +
> +       spin_unlock_irqrestore(&res_lock, flags);
> +
> +       if (ch->options & S3C2410_DMAF_AUTOSTART)
> +               s3c2410_dma_ctrl(id, S3C2410_DMAOP_START);
> +
> +       return 0;
> +
> +enq_exit:
> +       spin_unlock_irqrestore(&res_lock, flags);
> +
> +       return ret;
> +}
> +EXPORT_SYMBOL(s3c2410_dma_enqueue);
> +
> +int s3c2410_dma_request(enum dma_ch id,
> +                       struct s3c2410_dma_client *client,
> +                       void *dev)
> +{
> +       struct s3c_pl330_dmac *dmac;
> +       struct s3c_pl330_chan *ch;
> +       unsigned long flags;
> +       int ret = 0;
> +
> +       spin_lock_irqsave(&res_lock, flags);
> +
> +       ch = chan_acquire(id);
> +       if (!ch) {
> +               ret = -EBUSY;
> +               goto req_exit;
> +       }
> +
> +       dmac = ch->dmac;
> +
> +       ch->pl330_chan_id = pl330_request_channel(dmac->pi);
> +       if (!ch->pl330_chan_id) {
> +               chan_release(ch);
> +               ret = -EBUSY;
> +               goto req_exit;
> +       }
> +
> +       ch->client = client;
> +       ch->options = 0; /* Clear any option */
> +       ch->callback_fn = NULL; /* Clear any callback */
> +       ch->lrq = NULL;
> +
> +       ch->rqcfg.brst_size = 2; /* Default word size */
> +       ch->rqcfg.swap = SWAP_NO;
> +       ch->rqcfg.scctl = SCCTRL0; /* Noncacheable and nonbufferable */
> +       ch->rqcfg.dcctl = DCCTRL0; /* Noncacheable and nonbufferable */
> +       ch->rqcfg.privileged = 0;
> +       ch->rqcfg.insnaccess = 0;
> +
> +       /* Set invalid direction */
> +       ch->req[0].rqtype = DEVTODEV;
> +       ch->req[1].rqtype = ch->req[0].rqtype;
> +
> +       ch->req[0].cfg = &ch->rqcfg;
> +       ch->req[1].cfg = ch->req[0].cfg;
> +
> +       ch->req[0].peri = iface_of_dmac(dmac, id) - 1; /* Original index */
> +       ch->req[1].peri = ch->req[0].peri;
> +
> +       ch->req[0].token = &ch->req[0];
> +       ch->req[0].xfer_cb = s3c_pl330_rq0;
> +       ch->req[1].token = &ch->req[1];
> +       ch->req[1].xfer_cb = s3c_pl330_rq1;
> +
> +       ch->req[0].x = NULL;
> +       ch->req[1].x = NULL;
> +
> +       /* Reset xfer list */
> +       INIT_LIST_HEAD(&ch->xfer_list);
> +       ch->xfer_head = NULL;
> +
> +req_exit:
> +       spin_unlock_irqrestore(&res_lock, flags);
> +
> +       return ret;
> +}
> +EXPORT_SYMBOL(s3c2410_dma_request);
> +
> +int s3c2410_dma_free(enum dma_ch id, struct s3c2410_dma_client *client)
> +{
> +       struct s3c_pl330_chan *ch;
> +       struct s3c_pl330_xfer *xfer;
> +       unsigned long flags;
> +       int ret = 0;
> +       unsigned idx;
> +
> +       spin_lock_irqsave(&res_lock, flags);
> +
> +       ch = id_to_chan(id);
> +
> +       if (!ch || chan_free(ch))
> +               goto free_exit;
> +
> +       /* Refuse if someone else wanted to free the channel */
> +       if (ch->client != client) {
> +               ret = -EBUSY;
> +               goto free_exit;
> +       }
> +
> +       /* Stop any active xfer, Flushe the queue and do callbacks */
> +       pl330_chan_ctrl(ch->pl330_chan_id, PL330_OP_FLUSH);
> +
> +       /* Abort the submitted requests */
> +       idx = (ch->lrq == &ch->req[0]) ? 1 : 0;
> +
> +       if (ch->req[idx].x) {
> +               xfer = container_of(ch->req[idx].x,
> +                               struct s3c_pl330_xfer, px);
> +
> +               ch->req[idx].x = NULL;
> +               del_from_queue(xfer);
> +
> +               spin_unlock_irqrestore(&res_lock, flags);
> +               _finish_off(xfer, S3C2410_RES_ABORT, 1);
> +               spin_lock_irqsave(&res_lock, flags);
> +       }
> +
> +       if (ch->req[1 - idx].x) {
> +               xfer = container_of(ch->req[1 - idx].x,
> +                               struct s3c_pl330_xfer, px);
> +
> +               ch->req[1 - idx].x = NULL;
> +               del_from_queue(xfer);
> +
> +               spin_unlock_irqrestore(&res_lock, flags);
> +               _finish_off(xfer, S3C2410_RES_ABORT, 1);
> +               spin_lock_irqsave(&res_lock, flags);
> +       }
> +
> +       /* Pluck and Abort the queued requests in order */
> +       do {
> +               xfer = get_from_queue(ch, 1);
> +
> +               spin_unlock_irqrestore(&res_lock, flags);
> +               _finish_off(xfer, S3C2410_RES_ABORT, 1);
> +               spin_lock_irqsave(&res_lock, flags);
> +       } while (xfer);
> +
> +       ch->client = NULL;
> +
> +       pl330_release_channel(ch->pl330_chan_id);
> +
> +       ch->pl330_chan_id = NULL;
> +
> +       chan_release(ch);
> +
> +free_exit:
> +       spin_unlock_irqrestore(&res_lock, flags);
> +
> +       return ret;
> +}
> +EXPORT_SYMBOL(s3c2410_dma_free);
> +
> +int s3c2410_dma_config(enum dma_ch id, int xferunit)
> +{
> +       struct s3c_pl330_chan *ch;
> +       struct pl330_info *pi;
> +       unsigned long flags;
> +       int i, dbwidth, ret = 0;
> +
> +       spin_lock_irqsave(&res_lock, flags);
> +
> +       ch = id_to_chan(id);
> +
> +       if (!ch || chan_free(ch)) {
> +               ret = -EINVAL;
> +               goto cfg_exit;
> +       }
> +
> +       pi = ch->dmac->pi;
> +       dbwidth = pi->pcfg.data_bus_width / 8;
> +
> +       /* Max size of xfer can be pcfg.data_bus_width */
> +       if (xferunit > dbwidth) {
> +               ret = -EINVAL;
> +               goto cfg_exit;
> +       }
> +
> +       i = 0;
> +       while (xferunit != (1 << i))
> +               i++;
> +
> +       /* If valid value */
> +       if (xferunit == (1 << i))
> +               ch->rqcfg.brst_size = i;
> +       else
> +               ret = -EINVAL;
> +
> +cfg_exit:
> +       spin_unlock_irqrestore(&res_lock, flags);
> +
> +       return ret;
> +}
> +EXPORT_SYMBOL(s3c2410_dma_config);
> +
> +/* Options that are supported by this driver */
> +#define S3C_PL330_FLAGS (S3C2410_DMAF_CIRCULAR | S3C2410_DMAF_AUTOSTART)
> +
> +int s3c2410_dma_setflags(enum dma_ch id, unsigned int options)
> +{
> +       struct s3c_pl330_chan *ch;
> +       unsigned long flags;
> +       int ret = 0;
> +
> +       spin_lock_irqsave(&res_lock, flags);
> +
> +       ch = id_to_chan(id);
> +
> +       if (!ch || chan_free(ch) || options & ~(S3C_PL330_FLAGS))
> +               ret = -EINVAL;
> +       else
> +               ch->options = options;
> +
> +       spin_unlock_irqrestore(&res_lock, flags);
> +
> +       return 0;
> +}
> +EXPORT_SYMBOL(s3c2410_dma_setflags);
> +
> +int s3c2410_dma_set_buffdone_fn(enum dma_ch id, s3c2410_dma_cbfn_t rtn)
> +{
> +       struct s3c_pl330_chan *ch;
> +       unsigned long flags;
> +       int ret = 0;
> +
> +       spin_lock_irqsave(&res_lock, flags);
> +
> +       ch = id_to_chan(id);
> +
> +       if (!ch || chan_free(ch))
> +               ret = -EINVAL;
> +       else
> +               ch->callback_fn = rtn;
> +
> +       spin_unlock_irqrestore(&res_lock, flags);
> +
> +       return ret;
> +}
> +EXPORT_SYMBOL(s3c2410_dma_set_buffdone_fn);
> +
> +int s3c2410_dma_devconfig(enum dma_ch id, enum s3c2410_dmasrc source,
> +                         unsigned long address)
> +{
> +       struct s3c_pl330_chan *ch;
> +       unsigned long flags;
> +       int ret = 0;
> +
> +       spin_lock_irqsave(&res_lock, flags);
> +
> +       ch = id_to_chan(id);
> +
> +       if (!ch || chan_free(ch)) {
> +               ret = -EINVAL;
> +               goto devcfg_exit;
> +       }
> +
> +       switch (source) {
> +       case S3C2410_DMASRC_HW: /* P->M */
> +               ch->req[0].rqtype = DEVTOMEM;
> +               ch->req[1].rqtype = DEVTOMEM;
> +               ch->rqcfg.src_inc = 0;
> +               ch->rqcfg.dst_inc = 1;
> +               break;
> +       case S3C2410_DMASRC_MEM: /* M->P */
> +               ch->req[0].rqtype = MEMTODEV;
> +               ch->req[1].rqtype = MEMTODEV;
> +               ch->rqcfg.src_inc = 1;
> +               ch->rqcfg.dst_inc = 0;
> +               break;
> +       default:
> +               ret = -EINVAL;
> +               goto devcfg_exit;
> +       }
> +
> +       ch->sdaddr = address;
> +
> +devcfg_exit:
> +       spin_unlock_irqrestore(&res_lock, flags);
> +
> +       return ret;
> +}
> +EXPORT_SYMBOL(s3c2410_dma_devconfig);
> +
> +int s3c2410_dma_getposition(enum dma_ch id, dma_addr_t *src, dma_addr_t *dst)
> +{
> +       struct s3c_pl330_chan *ch = id_to_chan(id);
> +       struct pl330_chanstatus status;
> +       int ret;
> +
> +       if (!ch || chan_free(ch))
> +               return -EINVAL;
> +
> +       ret = pl330_chan_status(ch->pl330_chan_id, &status);
> +       if (ret < 0)
> +               return ret;
> +
> +       *src = status.src_addr;
> +       *dst = status.dst_addr;
> +
> +       return 0;
> +}
> +EXPORT_SYMBOL(s3c2410_dma_getposition);
> +
> +static irqreturn_t pl330_irq_handler(int irq, void *data)
> +{
> +       if (pl330_update(data))
> +               return IRQ_HANDLED;
> +       else
> +               return IRQ_NONE;
> +}
> +
> +static int pl330_probe(struct platform_device *pdev)
> +{
> +       struct s3c_pl330_dmac *s3c_pl330_dmac;
> +       struct s3c_pl330_platdata *pl330pd;
> +       struct pl330_info *pl330_info;
> +       struct resource *res;
> +       int i, ret, irq;
> +
> +       pl330pd = pdev->dev.platform_data;
> +
> +       /* Can't do without the list of _32_ peripherals */
> +       if (!pl330pd || !pl330pd->peri) {
> +               dev_err(&pdev->dev, "platform data missing!\n");
> +               return -ENODEV;
> +       }
> +
> +       pl330_info = kzalloc(sizeof(*pl330_info), GFP_KERNEL);
> +       if (!pl330_info)
> +               return -ENOMEM;
> +
> +       pl330_info->pl330_data = NULL;
> +       pl330_info->dev = &pdev->dev;
> +
> +       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
> +       if (!res) {
> +               ret = -ENODEV;
> +               goto probe_err1;
> +       }
> +
> +       request_mem_region(res->start, resource_size(res), pdev->name);
> +
> +       pl330_info->base = ioremap(res->start, resource_size(res));
> +       if (!pl330_info->base) {
> +               ret = -ENXIO;
> +               goto probe_err2;
> +       }
> +
> +       irq = platform_get_irq(pdev, 0);
> +       if (irq < 0) {
> +               ret = irq;
> +               goto probe_err3;
> +       }
> +
> +       ret = request_irq(irq, pl330_irq_handler, 0,
> +                       dev_name(&pdev->dev), pl330_info);
> +       if (ret)
> +               goto probe_err4;
> +
> +       /* Allocate a new DMAC */
> +       s3c_pl330_dmac = kmalloc(sizeof(*s3c_pl330_dmac), GFP_KERNEL);
> +       if (!s3c_pl330_dmac) {
> +               ret = -ENOMEM;
> +               goto probe_err5;
> +       }
> +
> +       /* Get operation clock and enable it */
> +       s3c_pl330_dmac->clk = clk_get(&pdev->dev, "pdma");
> +       if (IS_ERR(s3c_pl330_dmac->clk)) {
> +               dev_err(&pdev->dev, "Cannot get operation clock.\n");
> +               ret = -EINVAL;
> +               goto probe_err6;
> +       }
> +       clk_enable(s3c_pl330_dmac->clk);
> +
> +       ret = pl330_add(pl330_info);
> +       if (ret)
> +               goto probe_err7;
> +
> +       /* Hook the info */
> +       s3c_pl330_dmac->pi = pl330_info;
> +
> +       /* No busy channels */
> +       s3c_pl330_dmac->busy_chan = 0;
> +
> +       s3c_pl330_dmac->kmcache = kmem_cache_create(dev_name(&pdev->dev),
> +                               sizeof(struct s3c_pl330_xfer), 0, 0, NULL);
> +
> +       if (!s3c_pl330_dmac->kmcache) {
> +               ret = -ENOMEM;
> +               goto probe_err8;
> +       }
> +
> +       /* Get the list of peripherals */
> +       s3c_pl330_dmac->peri = pl330pd->peri;
> +
> +       /* Attach to the list of DMACs */
> +       list_add_tail(&s3c_pl330_dmac->node, &dmac_list);
> +
> +       /* Create a channel for each peripheral in the DMAC
> +        * that is, if it doesn't already exist
> +        */
> +       for (i = 0; i < PL330_MAX_PERI; i++)
> +               if (s3c_pl330_dmac->peri[i] != DMACH_MAX)
> +                       chan_add(s3c_pl330_dmac->peri[i]);
> +
> +       printk(KERN_INFO
> +               "Loaded driver for PL330 DMAC-%d %s\n", pdev->id, pdev->name);
> +       printk(KERN_INFO
> +               "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n",
> +               pl330_info->pcfg.data_buf_dep,
> +               pl330_info->pcfg.data_bus_width / 8, pl330_info->pcfg.num_chan,
> +               pl330_info->pcfg.num_peri, pl330_info->pcfg.num_events);
> +
> +       return 0;
> +
> +probe_err8:
> +       pl330_del(pl330_info);
> +probe_err7:
> +       clk_disable(s3c_pl330_dmac->clk);
> +       clk_put(s3c_pl330_dmac->clk);
> +probe_err6:
> +       kfree(s3c_pl330_dmac);
> +probe_err5:
> +       free_irq(irq, pl330_info);
> +probe_err4:
> +probe_err3:
> +       iounmap(pl330_info->base);
> +probe_err2:
> +       release_mem_region(res->start, resource_size(res));
> +probe_err1:
> +       kfree(pl330_info);
> +
> +       return ret;
> +}
> +
> +static int pl330_remove(struct platform_device *pdev)
> +{
> +       struct s3c_pl330_dmac *dmac, *d;
> +       struct s3c_pl330_chan *ch;
> +       unsigned long flags;
> +       int del, found;
> +
> +       if (!pdev->dev.platform_data)
> +               return -EINVAL;
> +
> +       spin_lock_irqsave(&res_lock, flags);
> +
> +       found = 0;
> +       list_for_each_entry(d, &dmac_list, node)
> +               if (d->pi->dev == &pdev->dev) {
> +                       found = 1;
> +                       break;
> +               }
> +
> +       if (!found) {
> +               spin_unlock_irqrestore(&res_lock, flags);
> +               return 0;
> +       }
> +
> +       dmac = d;
> +
> +       /* Remove all Channels that are managed only by this DMAC */
> +       list_for_each_entry(ch, &chan_list, node) {
> +
> +               /* Only channels that are handled by this DMAC */
> +               if (iface_of_dmac(dmac, ch->id))
> +                       del = 1;
> +               else
> +                       continue;
> +
> +               /* Don't remove if some other DMAC has it too */
> +               list_for_each_entry(d, &dmac_list, node)
> +                       if (d != dmac && iface_of_dmac(d, ch->id)) {
> +                               del = 0;
> +                               break;
> +                       }
> +
> +               if (del) {
> +                       spin_unlock_irqrestore(&res_lock, flags);
> +                       s3c2410_dma_free(ch->id, ch->client);
> +                       spin_lock_irqsave(&res_lock, flags);
> +                       list_del(&ch->node);
> +                       kfree(ch);
> +               }
> +       }
> +
> +       /* Disable operation clock */
> +       clk_disable(dmac->clk);
> +       clk_put(dmac->clk);
> +
> +       /* Remove the DMAC */
> +       list_del(&dmac->node);
> +       kfree(dmac);
> +
> +       spin_unlock_irqrestore(&res_lock, flags);
> +
> +       return 0;
> +}
> +
> +static struct platform_driver pl330_driver = {
> +       .driver         = {
> +               .owner  = THIS_MODULE,
> +               .name   = "s3c-pl330",
> +       },
> +       .probe          = pl330_probe,
> +       .remove         = pl330_remove,
> +};
> +
> +static int __init pl330_init(void)
> +{
> +       return platform_driver_register(&pl330_driver);
> +}
> +module_init(pl330_init);
> +
> +static void __exit pl330_exit(void)
> +{
> +       platform_driver_unregister(&pl330_driver);
> +       return;
> +}
> +module_exit(pl330_exit);
> +
> +MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
> +MODULE_DESCRIPTION("Driver for PL330 DMA Controller");
> +MODULE_LICENSE("GPL");
> --
> 1.7.2.3
>
> --
> To unsubscribe from this list: send the line "unsubscribe linux-samsung-soc" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
>
Jassi Brar June 7, 2011, 8:01 a.m. UTC | #2
On Tue, Jun 7, 2011 at 1:18 PM, root <alim.akhtar@samsung.com> wrote:
> Signed-off-by: alim.akhtar <alim.akhtar@samsung.com>
> ---
>  arch/arm/configs/exynos4_defconfig |    1 +
>  arch/arm/configs/s5p64x0_defconfig |    1 +
>  arch/arm/configs/s5pc100_defconfig |    1 +
>  arch/arm/configs/s5pv210_defconfig |    1 +
>  arch/arm/plat-samsung/Kconfig      |    6 -
>  arch/arm/plat-samsung/Makefile     |    2 -
>  arch/arm/plat-samsung/s3c-pl330.c  | 1244 ------------------------------------
>  drivers/dma/Kconfig                |    8 +
>  drivers/dma/Makefile               |    2 +
>  drivers/dma/s3c-pl330.c            | 1244 ++++++++++++++++++++++++++++++++++++
>  10 files changed, 1258 insertions(+), 1252 deletions(-)
>  delete mode 100644 arch/arm/plat-samsung/s3c-pl330.c
>  create mode 100644 drivers/dma/s3c-pl330.c

NAK
The drivers/dma/ is place for generic DMA API con formant drivers.
S3C-PL330.c is implementation of Samsung's DMA API.
Jassi Brar June 7, 2011, 8:09 a.m. UTC | #3
On Tue, Jun 7, 2011 at 1:30 PM, Kyungmin Park <kmpark@infradead.org> wrote:
>
> As I know there's are PL330 DMA implementation by
> MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
Thanks for CC'ing me, though my current contact is
jaswinder.singh@linaro.org or jassisinghbrar@gmail.com
Perhaps I need to change the email in all locations.

> Doesn't it better to use the generic PL330 instead of Samsung specific
> PL330 implementation?
Unfortunately, no. There are some features of Samsung's DMA API that
the drivers have come to rely upon.
Besides, I am not particularly a fan of the generic API. And IIRC,
neither is Ben Dooks, the designer of the
Samsung's DMA API.

> As I remember Jassi has a plan to use generic one?
No plans, sorry.

thnx
-j
Russell King - ARM Linux June 7, 2011, 8:15 a.m. UTC | #4
On Tue, Jun 07, 2011 at 01:39:43PM +0530, Jassi Brar wrote:
> Unfortunately, no. There are some features of Samsung's DMA API that
> the drivers have come to rely upon.
> Besides, I am not particularly a fan of the generic API. And IIRC,
> neither is Ben Dooks, the designer of the
> Samsung's DMA API.

We are now at the point where this is non-optional.  If the generic API
doesn't fit what Samsung needs, then that needs to be discussed and
whatever problems there are need to be resolved.

Continuing the platform specific DMA APIs is not sustainable.

I'm getting rather fed up with Samsung needing custom this and that, and
refusing to use the generic infrastructure such as clkdev.  This attitude
needs to change NOW.

Fix your platform specific crap and start using the generic services.
Kyungmin Park June 7, 2011, 8:35 a.m. UTC | #5
On Tue, Jun 7, 2011 at 5:15 PM, Russell King - ARM Linux
<linux@arm.linux.org.uk> wrote:
> On Tue, Jun 07, 2011 at 01:39:43PM +0530, Jassi Brar wrote:
>> Unfortunately, no. There are some features of Samsung's DMA API that
>> the drivers have come to rely upon.
>> Besides, I am not particularly a fan of the generic API. And IIRC,
>> neither is Ben Dooks, the designer of the
>> Samsung's DMA API.
>
> We are now at the point where this is non-optional.  If the generic API
> doesn't fit what Samsung needs, then that needs to be discussed and
> whatever problems there are need to be resolved.
>
> Continuing the platform specific DMA APIs is not sustainable.
>
> I'm getting rather fed up with Samsung needing custom this and that, and
> refusing to use the generic infrastructure such as clkdev.  This attitude
> needs to change NOW.
>
> Fix your platform specific crap and start using the generic services.
>

I think Jassi is the right person to handle this one even though you
don't like the generic DMA APIs

1) since you become a member of linaro. it's role of linaro.
2) know the how to use the s3c-pl330 at sound (most user of DMA is
sound, SPI and MMC)
3) now you're the Samsung Sound maintainer

If we move the generic DMA APIs, it's also need to modify the its
consumer, sound and SPI.

Thank you,
Kyungmin Park
Vinod Koul June 7, 2011, 10:05 a.m. UTC | #6
On Tue, 2011-06-07 at 15:45 +0530, Linus Walleij wrote:
> On Tue, Jun 7, 2011 at 10:09 AM, Jassi Brar <jassisinghbrar@gmail.com> wrote:
> 
> > There are some features of Samsung's DMA API that
> > the drivers have come to rely upon.
> 
> Can we get an overview of what these features are?
> 
> When I look at this specific code I cannot see anything that
> the dmaengine does not already support or can be made to
> support easily.
> 
> s3c2410_dma_getposition => device_tx_status()
> 
> s3c2410_dma_devconfig - not even needed with dmaengine
>  sematics, we already know the directions for any transfer.
> 
> s3c2410_dma_setflags => device_control() if it needs to be
>   runtime, else platform data. device_control() is prepared to
>   be extended for any custom weirdness flags if need be.
> 
> s3c2410_dma_set_buffdone_fn - no clue what this is, help
>   us. If related to cyclic transfer as one could guess, we
>   already have an API for that.
> 
> s3c2410_dma_ctrl => device_control() - there are two
>   weird controls named TIMEOUT and FLUSH, TIMEOUT
>   seem strange semantically for a ctrl* function, it seems like
>   something that should come from the hardware, and flush
>   is maybe an applicable extension, though I think any DMA
>   engine with a running channel will indeed flush anything
>   as quick as it can, if related to circular transfers we can
>   handle that.
> 
> The rest seems to be about retrieveing channels, putting them
> back on the heap and allocating resources for them as we
> already do in dmaengine.
> 
> > I am not particularly a fan of the generic API. And IIRC,
> > neither is Ben Dooks, the designer of the
> > Samsung's DMA API.
> 
> This is not soccer/football/cricket. ;-)
> 
> Seriously, if there are any longstanding issues with the API we
> need them nailed down, please help!
Looking back at the archives two main things seems to be of concern [1]
1) need of circular API
2) being able to do submit(issue_pending) in callback
Both of them are already available and people using them.

If you have any more requirement, lets discuss, there are drivers in
sound spi and mmc which use dmaengine. The API supports a generic
implementation as well as passing custom parameters, so not sure why it
wont work.
Jassi Brar June 7, 2011, 10:15 a.m. UTC | #7
On Tue, Jun 7, 2011 at 1:45 PM, Russell King - ARM Linux
<linux@arm.linux.org.uk> wrote:
> On Tue, Jun 07, 2011 at 01:39:43PM +0530, Jassi Brar wrote:
>> Unfortunately, no. There are some features of Samsung's DMA API that
>> the drivers have come to rely upon.
>> Besides, I am not particularly a fan of the generic API. And IIRC,
>> neither is Ben Dooks, the designer of the
>> Samsung's DMA API.
>
> We are now at the point where this is non-optional.  If the generic API
> doesn't fit what Samsung needs, then that needs to be discussed and
> whatever problems there are need to be resolved.
The discussion did take off a few months ago, but we didn't reach anywhere.
Being able to queue request from the 'done' callback, the need of
circular buffer
API (possibly a free-running one too) and callbacks in irq-context, as
they happen,
were a few requirements for having fast peripherals with shallow fifo
work without underruns.
Linus Walleij June 7, 2011, 10:15 a.m. UTC | #8
On Tue, Jun 7, 2011 at 10:09 AM, Jassi Brar <jassisinghbrar@gmail.com> wrote:

> There are some features of Samsung's DMA API that
> the drivers have come to rely upon.

Can we get an overview of what these features are?

When I look at this specific code I cannot see anything that
the dmaengine does not already support or can be made to
support easily.

s3c2410_dma_getposition => device_tx_status()

s3c2410_dma_devconfig - not even needed with dmaengine
 sematics, we already know the directions for any transfer.

s3c2410_dma_setflags => device_control() if it needs to be
  runtime, else platform data. device_control() is prepared to
  be extended for any custom weirdness flags if need be.

s3c2410_dma_set_buffdone_fn - no clue what this is, help
  us. If related to cyclic transfer as one could guess, we
  already have an API for that.

s3c2410_dma_ctrl => device_control() - there are two
  weird controls named TIMEOUT and FLUSH, TIMEOUT
  seem strange semantically for a ctrl* function, it seems like
  something that should come from the hardware, and flush
  is maybe an applicable extension, though I think any DMA
  engine with a running channel will indeed flush anything
  as quick as it can, if related to circular transfers we can
  handle that.

The rest seems to be about retrieveing channels, putting them
back on the heap and allocating resources for them as we
already do in dmaengine.

> I am not particularly a fan of the generic API. And IIRC,
> neither is Ben Dooks, the designer of the
> Samsung's DMA API.

This is not soccer/football/cricket. ;-)

Seriously, if there are any longstanding issues with the API we
need them nailed down, please help!

Thanks,
Linus Walleij
Tushar Behera June 7, 2011, 3:42 p.m. UTC | #9
Hi Alim,

On 7 June 2011 13:18, root <alim.akhtar@samsung.com> wrote:
> Signed-off-by: alim.akhtar <alim.akhtar@samsung.com>
> ---
>  arch/arm/configs/exynos4_defconfig |    1 +
>  arch/arm/configs/s5p64x0_defconfig |    1 +
>  arch/arm/configs/s5pc100_defconfig |    1 +
>  arch/arm/configs/s5pv210_defconfig |    1 +
>  arch/arm/plat-samsung/Kconfig      |    6 -
>  arch/arm/plat-samsung/Makefile     |    2 -
>  arch/arm/plat-samsung/s3c-pl330.c  | 1244 ------------------------------------
>  drivers/dma/Kconfig                |    8 +
>  drivers/dma/Makefile               |    2 +
>  drivers/dma/s3c-pl330.c            | 1244 ++++++++++++++++++++++++++++++++++++
>  10 files changed, 1258 insertions(+), 1252 deletions(-)
>  delete mode 100644 arch/arm/plat-samsung/s3c-pl330.c
>  create mode 100644 drivers/dma/s3c-pl330.c
>
If the patch involves file movement or file renaming, then use -M flag
while creating the patch. This would indicate the file movement and
would essentially make the patch smaller.
Russell King - ARM Linux June 7, 2011, 6:29 p.m. UTC | #10
On Tue, Jun 07, 2011 at 03:45:18PM +0530, Jassi Brar wrote:
> On Tue, Jun 7, 2011 at 1:45 PM, Russell King - ARM Linux
> <linux@arm.linux.org.uk> wrote:
> > On Tue, Jun 07, 2011 at 01:39:43PM +0530, Jassi Brar wrote:
> >> Unfortunately, no. There are some features of Samsung's DMA API that
> >> the drivers have come to rely upon.
> >> Besides, I am not particularly a fan of the generic API. And IIRC,
> >> neither is Ben Dooks, the designer of the
> >> Samsung's DMA API.
> >
> > We are now at the point where this is non-optional.  If the generic API
> > doesn't fit what Samsung needs, then that needs to be discussed and
> > whatever problems there are need to be resolved.
> The discussion did take off a few months ago, but we didn't reach anywhere.
> Being able to queue request from the 'done' callback, the need of
> circular buffer
> API (possibly a free-running one too) and callbacks in irq-context, as
> they happen,
> were a few requirements for having fast peripherals with shallow fifo
> work without underruns.

I can see why you have these concerns; the problem is the slave DMA
engine API was never properly documented.

1. The slave API does permit the done callback to submit new requests
   already (and any DMA engine driver which doesn't allow that is broken.)

Note that slave APIs _should_ permit several requests to be queued up
and as each finish, the next one should be started.  In other words,
once DMA has started it should continue until there is no more work for
the DMA engine to perform.

2. Circular buffer support has been added - see device_prep_dma_cyclic().

However, 2 is not really a requirement for audio - you can queue several
single slave transfers (one per period) initially, and then you get
callbacks as each transfer completes.  In the callback, you can submit
an additional buffer, and continue doing so causing DMA to never end.

I believe that this is a saner approach than the circular buffer support,
and its what I tried to put together for the AMBA PL041 AACI DMA (but
unfortunately, ARMs platforms are totally broken when it comes to DMA.)

This also removes the need for the callback to be in IRQ context.

So I don't see that anything you've mentioned is a problem with the API
as it stands today - there may be issues with the way the DMA engine
driver has been implemented which cause it not to conform to what I've
said above, but those are driver bugs and not a fault of the API.
Mark Brown June 7, 2011, 6:43 p.m. UTC | #11
On Tue, Jun 07, 2011 at 07:29:23PM +0100, Russell King - ARM Linux wrote:

> 2. Circular buffer support has been added - see device_prep_dma_cyclic().

> However, 2 is not really a requirement for audio - you can queue several
> single slave transfers (one per period) initially, and then you get
> callbacks as each transfer completes.  In the callback, you can submit
> an additional buffer, and continue doing so causing DMA to never end.

> I believe that this is a saner approach than the circular buffer support,
> and its what I tried to put together for the AMBA PL041 AACI DMA (but
> unfortunately, ARMs platforms are totally broken when it comes to DMA.)

Circular buffers are nice from the point of view of allowing you to
(providing the hardware supports it) totally disable the periodic audio
interrupts and leave the system to run for very long times off the
normal system timers.  This gives a small but non-zero power benefit
providing the hardware gives you enough information about where the DMA
is so you can find out if you need to mix in a notification, otherwise
you get obvious latency issues.

You can also do this with an circular chain of sequential buffers of
course.

> 
> This also removes the need for the callback to be in IRQ context.
> 
> So I don't see that anything you've mentioned is a problem with the API
> as it stands today - there may be issues with the way the DMA engine
> driver has been implemented which cause it not to conform to what I've
> said above, but those are driver bugs and not a fault of the API.
> 
> _______________________________________________
> linux-arm-kernel mailing list
> linux-arm-kernel@lists.infradead.org
> http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
>
Jassi Brar June 7, 2011, 7:01 p.m. UTC | #12
On Wed, Jun 8, 2011 at 12:13 AM, Mark Brown
<broonie@opensource.wolfsonmicro.com> wrote:
> On Tue, Jun 07, 2011 at 07:29:23PM +0100, Russell King - ARM Linux wrote:
>
>> 2. Circular buffer support has been added - see device_prep_dma_cyclic().
>
>> However, 2 is not really a requirement for audio - you can queue several
>> single slave transfers (one per period) initially, and then you get
>> callbacks as each transfer completes.  In the callback, you can submit
>> an additional buffer, and continue doing so causing DMA to never end.
>
>> I believe that this is a saner approach than the circular buffer support,
>> and its what I tried to put together for the AMBA PL041 AACI DMA (but
>> unfortunately, ARMs platforms are totally broken when it comes to DMA.)
>
> Circular buffers are nice from the point of view of allowing you to
> (providing the hardware supports it) totally disable the periodic audio
> interrupts and leave the system to run for very long times off the
> normal system timers.  This gives a small but non-zero power benefit
> providing the hardware gives you enough information about where the DMA
> is so you can find out if you need to mix in a notification, otherwise
> you get obvious latency issues.

This is what I called free-running circular buffer.
Besides power saving scenario, it is necessary for a fast peripheral
with shallow fifo.
The peripheral throws underrun errors, if the dma h/w doesn't support
LLI and cpu takes
a bit long loading-triggering the next transfer on DMA due to
irq-latency for some reason.


> You can also do this with an circular chain of sequential buffers of
> course.
This is what is called Circular buffer in Samsung's DMA API.
Jassi Brar June 7, 2011, 7:46 p.m. UTC | #13
On Tue, Jun 7, 2011 at 11:59 PM, Russell King - ARM Linux
<linux@arm.linux.org.uk> wrote:
>> The discussion did take off a few months ago, but we didn't reach anywhere.
>> Being able to queue request from the 'done' callback, the need of
>> circular buffer
>> API (possibly a free-running one too) and callbacks in irq-context, as
>> they happen,
>> were a few requirements for having fast peripherals with shallow fifo
>> work without underruns.
>
> I can see why you have these concerns; the problem is the slave DMA
> engine API was never properly documented.
>
> 1. The slave API does permit the done callback to submit new requests
>   already (and any DMA engine driver which doesn't allow that is broken.)
>
> Note that slave APIs _should_ permit several requests to be queued up
> and as each finish, the next one should be started.  In other words,
> once DMA has started it should continue until there is no more work for
> the DMA engine to perform.
>
> 2. Circular buffer support has been added - see device_prep_dma_cyclic().
>
> However, 2 is not really a requirement for audio - you can queue several
> single slave transfers (one per period) initially, and then you get
> callbacks as each transfer completes.  In the callback, you can submit
> an additional buffer, and continue doing so causing DMA to never end.
>
> I believe that this is a saner approach than the circular buffer support,
> and its what I tried to put together for the AMBA PL041 AACI DMA (but
> unfortunately, ARMs platforms are totally broken when it comes to DMA.)
>
> This also removes the need for the callback to be in IRQ context.
>
> So I don't see that anything you've mentioned is a problem with the API
> as it stands today - there may be issues with the way the DMA engine
> driver has been implemented which cause it not to conform to what I've
> said above, but those are driver bugs and not a fault of the API.

Yes, after reading Vinod's mail, I did see that CYCLIC option has been added.
Though I am still not sure how effective would that be for fast
peripherals(please
have a look at my reply to Mark's post), when doing callbacks from
tasklets scheduled
from irq-handlers is the norm with dma drivers of generic api.
Playing regular audio is no problem, but playing pro quality over
SPDIF with active multimedia h/w, sometimes is for some devices.

Besides, people might have different work priorities atm. I don't
think anybody believes we can do without common APIs.

Btw, Samsung DMA API doesn't support the 'free-running' circular
buffer either. But that was my planned TODO while I was there and I
think someone is working on it(?)

IMHO Samsung SoC team (not his majesty Mr Kyungmin Park) are justified if they
are not so eager right now. Since I no more would have to spend
sleepless nights over
SPDIF underruns, I can side with what Kukjin Kim(Samsung maintainer)
decides, who has to live with the results.

Thanks,
Jassi
Mark Brown June 7, 2011, 9:41 p.m. UTC | #14
On Wed, Jun 08, 2011 at 12:31:42AM +0530, Jassi Brar wrote:
> On Wed, Jun 8, 2011 at 12:13 AM, Mark Brown

> > Circular buffers are nice from the point of view of allowing you to
> > (providing the hardware supports it) totally disable the periodic audio
> > interrupts and leave the system to run for very long times off the

> This is what I called free-running circular buffer.
> Besides power saving scenario, it is necessary for a fast peripheral
> with shallow fifo.
> The peripheral throws underrun errors, if the dma h/w doesn't support
> LLI and cpu takes
> a bit long loading-triggering the next transfer on DMA due to
> irq-latency for some reason.

That's fairly unusual, though - usually DMA controllers seem to support
chaining requests before they support circular operation, at which point
unless the hardware is badly misdone you can just chain another buffer,
giving that buffer's worth of time for the CPU to respond.

> > You can also do this with an circular chain of sequential buffers of
> > course.

> This is what is called Circular buffer in Samsung's DMA API.

Which is a little bit unusual as it's basically a pure software
construct rather than a hardware feature.
Russell King - ARM Linux June 7, 2011, 10:28 p.m. UTC | #15
On Wed, Jun 08, 2011 at 12:31:42AM +0530, Jassi Brar wrote:
> On Wed, Jun 8, 2011 at 12:13 AM, Mark Brown
> <broonie@opensource.wolfsonmicro.com> wrote:
> > On Tue, Jun 07, 2011 at 07:29:23PM +0100, Russell King - ARM Linux wrote:
> >
> >> 2. Circular buffer support has been added - see device_prep_dma_cyclic().
> >
> >> However, 2 is not really a requirement for audio - you can queue several
> >> single slave transfers (one per period) initially, and then you get
> >> callbacks as each transfer completes.  In the callback, you can submit
> >> an additional buffer, and continue doing so causing DMA to never end.
> >
> >> I believe that this is a saner approach than the circular buffer support,
> >> and its what I tried to put together for the AMBA PL041 AACI DMA (but
> >> unfortunately, ARMs platforms are totally broken when it comes to DMA.)
> >
> > Circular buffers are nice from the point of view of allowing you to
> > (providing the hardware supports it) totally disable the periodic audio
> > interrupts and leave the system to run for very long times off the
> > normal system timers.  This gives a small but non-zero power benefit
> > providing the hardware gives you enough information about where the DMA
> > is so you can find out if you need to mix in a notification, otherwise
> > you get obvious latency issues.
> 
> This is what I called free-running circular buffer.
> Besides power saving scenario, it is necessary for a fast peripheral
> with shallow fifo.

Please stop perpetuating this myth.  It is not necessary for fast
peripherals with shallow fifos.

What is necessary for such peripherals is to have a large enough pending
DMA queue already in place that you don't encounter underrun errors.
That means chaining up several sequential smaller buffers so that you
can replenish the queue before the underrun occurs.

Eg, if you have eight 8K buffers, you submit 7 of the 8K buffers when
you start having filled those 7 with data.  You prepare the 8th and
when the 1st buffer completes, you submit the 8th buffer and start
re-filling the 1st buffer.  Then, when the 2nd buffer completes, you
re-submit the 1st buffer and start filling the 2nd buffer.  etc.

This means you always have at least 6 buffers of 8K available for the
peripheral to consume.

If you get to the end of all the pending buffers before you can service
the DMA interrupt, then you don't have the data in place to continue to
feed the peripheral, so DMA will stop and _correctly_ you will get an
underrun.  Hint: no more data prepared _is_ the underrun condition so
it is only right that DMA should stop at that point.

> The peripheral throws underrun errors, if the dma h/w doesn't support
> LLI and cpu takes
> a bit long loading-triggering the next transfer on DMA due to
> irq-latency for some reason.

There is no difference between moving to the next buffer in a chain of
buffers and having to re-load the DMA hardware to simulate a real
circular DMA buffer.

The only difference would be if the hardware provides you with support
for circular buffers itself, but it would also need some way of generating
an interrupt every X bytes transferred to support the requirements of
ALSA, or some other way to track progress.
Russell King - ARM Linux June 7, 2011, 10:36 p.m. UTC | #16
On Wed, Jun 08, 2011 at 01:16:40AM +0530, Jassi Brar wrote:
> IMHO Samsung SoC team (not his majesty Mr Kyungmin Park) are justified
> if they are not so eager right now. Since I no more would have to spend
> sleepless nights over
> SPDIF underruns, I can side with what Kukjin Kim(Samsung maintainer)
> decides, who has to live with the results.

Well, the question is _when_ will these things happen in the Samsung
world?

I see no effort from the Samsung folk to even start considering moving
to common APIs - I see precisely the opposite.  They seem to have a
strong desire to invent their own new APIs all the time rather than
look at existing APIs and discuss how they can be used or adapted so
they work for Samsung.

That is not sustainable, and if it continues, it will probably result
in Samsung stuff being chucked out of mainline.  We are most definitely
at the point where custom APIs are no longer permissible, especially
for any new SoCs.
Jassi Brar June 8, 2011, 2:51 a.m. UTC | #17
On Wed, Jun 8, 2011 at 3:11 AM, Mark Brown
<broonie@opensource.wolfsonmicro.com> wrote:
> On Wed, Jun 08, 2011 at 12:31:42AM +0530, Jassi Brar wrote:
>> On Wed, Jun 8, 2011 at 12:13 AM, Mark Brown
>
>> > Circular buffers are nice from the point of view of allowing you to
>> > (providing the hardware supports it) totally disable the periodic audio
>> > interrupts and leave the system to run for very long times off the
>
>> This is what I called free-running circular buffer.
>> Besides power saving scenario, it is necessary for a fast peripheral
>> with shallow fifo.
>> The peripheral throws underrun errors, if the dma h/w doesn't support
>> LLI and cpu takes
>> a bit long loading-triggering the next transfer on DMA due to
>> irq-latency for some reason.
>
> That's fairly unusual, though - usually DMA controllers seem to support
> chaining requests before they support circular operation, at which point
> unless the hardware is badly misdone you can just chain another buffer,
> giving that buffer's worth of time for the CPU to respond.

While writing PL330 driver, I wasn't able to figure out a way to implement LLI.
Please have a look at the PL330 trm and suggest if we can implement it in a
generic way without considering them special requests.
I know pl080 supports LLI and it's fine.


>> > You can also do this with an circular chain of sequential buffers of
>> > course.
>
>> This is what is called Circular buffer in Samsung's DMA API.
>
> Which is a little bit unusual as it's basically a pure software
> construct rather than a hardware feature.
Yes it is. And I didn't say it suffice.
Jassi Brar June 8, 2011, 4:05 a.m. UTC | #18
On Wed, Jun 8, 2011 at 3:58 AM, Russell King - ARM Linux
<linux@arm.linux.org.uk> wrote:
>> >
>> >> 2. Circular buffer support has been added - see device_prep_dma_cyclic().
>> >
>> >> However, 2 is not really a requirement for audio - you can queue several
>> >> single slave transfers (one per period) initially, and then you get
>> >> callbacks as each transfer completes.  In the callback, you can submit
>> >> an additional buffer, and continue doing so causing DMA to never end.
>> >
>> >> I believe that this is a saner approach than the circular buffer support,
>> >> and its what I tried to put together for the AMBA PL041 AACI DMA (but
>> >> unfortunately, ARMs platforms are totally broken when it comes to DMA.)
>> >
>> > Circular buffers are nice from the point of view of allowing you to
>> > (providing the hardware supports it) totally disable the periodic audio
>> > interrupts and leave the system to run for very long times off the
>> > normal system timers.  This gives a small but non-zero power benefit
>> > providing the hardware gives you enough information about where the DMA
>> > is so you can find out if you need to mix in a notification, otherwise
>> > you get obvious latency issues.
>>
>> This is what I called free-running circular buffer.
>> Besides power saving scenario, it is necessary for a fast peripheral
>> with shallow fifo.
>
> Please stop perpetuating this myth.  It is not necessary for fast
> peripherals with shallow fifos.

I would beg you to please spend some time understanding what exactly I say.
More so because I am not very good at communicating.


> What is necessary for such peripherals is to have a large enough pending
> DMA queue already in place that you don't encounter underrun errors.
> That means chaining up several sequential smaller buffers so that you
> can replenish the queue before the underrun occurs.
>
> Eg, if you have eight 8K buffers, you submit 7 of the 8K buffers when
> you start having filled those 7 with data.  You prepare the 8th and
> when the 1st buffer completes, you submit the 8th buffer and start
> re-filling the 1st buffer.  Then, when the 2nd buffer completes, you
> re-submit the 1st buffer and start filling the 2nd buffer.  etc.

In short, a simple ALSA ring buffer ?


> If you get to the end of all the pending buffers before you can service
> the DMA interrupt, then you don't have the data in place to continue to
> feed the peripheral, so DMA will stop and _correctly_ you will get an
> underrun.  Hint: no more data prepared _is_ the underrun condition so
> it is only right that DMA should stop at that point.

Of course. And I am not complaining about those s/w reported underruns/overruns.
BTW, instead of 7/8 we could also set the threshold in ALSA drivers to
require apps
to fill it 8/8 before triggering and that would keep the buffer filled
to the brim.


>> The peripheral throws underrun errors, if the dma h/w doesn't support
>> LLI and cpu takes
>> a bit long loading-triggering the next transfer on DMA due to
>> irq-latency for some reason.
>
> There is no difference between moving to the next buffer in a chain of
> buffers and having to re-load the DMA hardware to simulate a real
> circular DMA buffer.

There is difference as explained below.


> The only difference would be if the hardware provides you with support
> for circular buffers itself, but it would also need some way of generating
> an interrupt every X bytes transferred to support the requirements of
> ALSA, or some other way to track progress.

I am afraid not so. Ex, PL080 provides LLI mechanism using which can have
true circular buffer behaviour and yet get updates/irqs from the PL080.

Let me try to elaborate the difference ...

* In h/w supported Linked-List-Item(LLI), the DMA finishes one
transfer, triggers an irq and then continues with
transferring the next linked transfer item.
 Please note the dma is active and peripheral fifos always keep
receiving/providing data while cpu services the irq.  H/w like PL080
provides this LLI mechanism readily.

* In s/w emulated LLI (DMA API driver maintaining circularly linked
transfer requests)  the dma finishes one
programmed transfer, triggers an irq and _stop_ transferring data,
the cpu then program the DMA with next item in the list and triggers
the DMA operation.
 Please note, the peripheral fifos don't get any data after dma
transfer completes and before cpu program and trigger the dma again.
In this case, a 'fast peripheral with shallow fifo' might run out of
data before the next DMA transfer begins. And some IPs consider the
state as erroneous.
In real life, I saw that with the Samsung's SPDIF controller which has
fifo depth only for a few samples and it gets very demanding if pro
quality is expected. Aggravate that with high irq-latency under system
load.
And in such cases, it is much more preferable to employ system timers
to generate period_elapsed updates and queue the whole ring buffer as
one transfer item to be iterated endlessly by dma. Ex, PL330 doesn't
lend to LLI but can be programmed endlessly looping one transfer item.

In short, I am talking about FIFO underruns/overruns _between_ two DMA
transfers.
I am not talking about ring buffer overruns/underruns.

Thanks,
Jassi
Russell King - ARM Linux June 8, 2011, 7:44 a.m. UTC | #19
On Wed, Jun 08, 2011 at 09:35:34AM +0530, Jassi Brar wrote:
> In real life, I saw that with the Samsung's SPDIF controller which has
> fifo depth only for a few samples and it gets very demanding if pro
> quality is expected. Aggravate that with high irq-latency under system
> load.
> And in such cases, it is much more preferable to employ system timers
> to generate period_elapsed updates and queue the whole ring buffer as
> one transfer item to be iterated endlessly by dma. Ex, PL330 doesn't
> lend to LLI but can be programmed endlessly looping one transfer item.

So you're talking about the _hardware_ circular buffer case which I
mentioned in my email.  No problem with the DMA engine API as has
already been said - it supports preparation of circular buffers.

So, from everything discussed so far, there's nothing lacking from
the DMA engine API for your purposes.
Mark Brown June 8, 2011, 8:55 a.m. UTC | #20
On Wed, Jun 08, 2011 at 08:21:08AM +0530, Jassi Brar wrote:
> On Wed, Jun 8, 2011 at 3:11 AM, Mark Brown

> > That's fairly unusual, though - usually DMA controllers seem to support
> > chaining requests before they support circular operation, at which point
> > unless the hardware is badly misdone you can just chain another buffer,
> > giving that buffer's worth of time for the CPU to respond.

> While writing PL330 driver, I wasn't able to figure out a way to implement LLI.
> Please have a look at the PL330 trm and suggest if we can implement it in a
> generic way without considering them special requests.
> I know pl080 supports LLI and it's fine.

I'm perfectly prepared to believe that there's poorly designed hardware
out there (either due to just poor design or deploying a controller in
an inappropriate application) that requires us to do things in software;
it's just relatively unusual as users tend to run into issues which
either can't be resolved or are excessively painful to resolve.
Kim Kukjin June 9, 2011, 6:24 p.m. UTC | #21
On 06/07/11 15:36, Russell King - ARM Linux wrote:
> I see no effort from the Samsung folk to even start considering moving
> to common APIs - I see precisely the opposite.  They seem to have a
> strong desire to invent their own new APIs all the time rather than
> look at existing APIs and discuss how they can be used or adapted so
> they work for Samsung.
>
> That is not sustainable, and if it continues, it will probably result
> in Samsung stuff being chucked out of mainline.  We are most definitely
> at the point where custom APIs are no longer permissible, especially
> for any new SoCs.

Hi Russell and everyone,

Sorry for late participation on this :(
I'm out of my office for biz. trip so it was hard to check my e-mail
and will be back to my country this weekend.

Anyway, I and my colleagues know what we have to do for Linux world.
And as I know, we are preparing some stuff for it so please don't
expect the worst :)

I should be back on this after discussing with my colleagues.

Thanks.

Best regards,
Kgene.
--
Kukjin Kim <kgene.kim@samsung.com>, Senior Engineer,
SW Solution Development Team, Samsung Electronics Co., Ltd.
Kyungmin Park June 16, 2011, 12:56 p.m. UTC | #22
2011/6/10 Kukjin Kim <kgene.kim@samsung.com>:
> On 06/07/11 15:36, Russell King - ARM Linux wrote:
>> I see no effort from the Samsung folk to even start considering moving
>> to common APIs - I see precisely the opposite.  They seem to have a
>> strong desire to invent their own new APIs all the time rather than
>> look at existing APIs and discuss how they can be used or adapted so
>> they work for Samsung.
>>
>> That is not sustainable, and if it continues, it will probably result
>> in Samsung stuff being chucked out of mainline.  We are most definitely
>> at the point where custom APIs are no longer permissible, especially
>> for any new SoCs.
>
> Hi Russell and everyone,
>
> Sorry for late participation on this :(
> I'm out of my office for biz. trip so it was hard to check my e-mail
> and will be back to my country this weekend.
>
> Anyway, I and my colleagues know what we have to do for Linux world.
> And as I know, we are preparing some stuff for it so please don't
> expect the worst :)
>
> I should be back on this after discussing with my colleagues.
Any updates? still need more time?
>
> Thanks.
>
> Best regards,
> Kgene.
> --
> Kukjin Kim <kgene.kim@samsung.com>, Senior Engineer,
> SW Solution Development Team, Samsung Electronics Co., Ltd.
>
Kim Kukjin June 23, 2011, 6:47 a.m. UTC | #23
kyungmin78@gmail.com wrote:
> 
> 2011/6/10 Kukjin Kim <kgene.kim@samsung.com>:
> > On 06/07/11 15:36, Russell King - ARM Linux wrote:
> >> I see no effort from the Samsung folk to even start considering moving
> >> to common APIs - I see precisely the opposite.  They seem to have a
> >> strong desire to invent their own new APIs all the time rather than
> >> look at existing APIs and discuss how they can be used or adapted so
> >> they work for Samsung.
> >>
> >> That is not sustainable, and if it continues, it will probably result
> >> in Samsung stuff being chucked out of mainline.  We are most definitely
> >> at the point where custom APIs are no longer permissible, especially
> >> for any new SoCs.
> >
> > Hi Russell and everyone,
> >
> > Sorry for late participation on this :(
> > I'm out of my office for biz. trip so it was hard to check my e-mail
> > and will be back to my country this weekend.
> >
> > Anyway, I and my colleagues know what we have to do for Linux world.
> > And as I know, we are preparing some stuff for it so please don't
> > expect the worst :)
> >
> > I should be back on this after discussing with my colleagues.
> Any updates? still need more time?

As a note, our engineers are working on this now.
If any updates about that, will post...

Thanks.

Best regards,
Kgene.
--
Kukjin Kim <kgene.kim@samsung.com>, Senior Engineer,
SW Solution Development Team, Samsung Electronics Co., Ltd.
diff mbox

Patch

diff --git a/arch/arm/configs/exynos4_defconfig b/arch/arm/configs/exynos4_defconfig
index da53ff3..6421074 100644
--- a/arch/arm/configs/exynos4_defconfig
+++ b/arch/arm/configs/exynos4_defconfig
@@ -37,6 +37,7 @@  CONFIG_SERIAL_SAMSUNG=y
 CONFIG_SERIAL_SAMSUNG_CONSOLE=y
 CONFIG_HW_RANDOM=y
 CONFIG_I2C=y
+CONFIG_DMADEVICES=y
 # CONFIG_HWMON is not set
 # CONFIG_MFD_SUPPORT is not set
 # CONFIG_HID_SUPPORT is not set
diff --git a/arch/arm/configs/s5p64x0_defconfig b/arch/arm/configs/s5p64x0_defconfig
index ad6b61b..9340ffc 100644
--- a/arch/arm/configs/s5p64x0_defconfig
+++ b/arch/arm/configs/s5p64x0_defconfig
@@ -31,6 +31,7 @@  CONFIG_SERIAL_8250_NR_UARTS=3
 CONFIG_SERIAL_SAMSUNG=y
 CONFIG_SERIAL_SAMSUNG_CONSOLE=y
 CONFIG_HW_RANDOM=y
+CONFIG_DMADEVICES=y
 # CONFIG_HWMON is not set
 CONFIG_DISPLAY_SUPPORT=y
 # CONFIG_VGA_CONSOLE is not set
diff --git a/arch/arm/configs/s5pc100_defconfig b/arch/arm/configs/s5pc100_defconfig
index 41bafc9..694ef97 100644
--- a/arch/arm/configs/s5pc100_defconfig
+++ b/arch/arm/configs/s5pc100_defconfig
@@ -20,6 +20,7 @@  CONFIG_SERIAL_SAMSUNG_CONSOLE=y
 CONFIG_HW_RANDOM=y
 CONFIG_I2C=y
 CONFIG_I2C_CHARDEV=y
+CONFIG_DMADEVICES=y
 # CONFIG_VGA_CONSOLE is not set
 CONFIG_MMC=y
 CONFIG_MMC_DEBUG=y
diff --git a/arch/arm/configs/s5pv210_defconfig b/arch/arm/configs/s5pv210_defconfig
index fa98990..0013593 100644
--- a/arch/arm/configs/s5pv210_defconfig
+++ b/arch/arm/configs/s5pv210_defconfig
@@ -37,6 +37,7 @@  CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_SAMSUNG=y
 CONFIG_SERIAL_SAMSUNG_CONSOLE=y
 CONFIG_HW_RANDOM=y
+CONFIG_DMADEVICES=y
 # CONFIG_HWMON is not set
 # CONFIG_VGA_CONSOLE is not set
 # CONFIG_HID_SUPPORT is not set
diff --git a/arch/arm/plat-samsung/Kconfig b/arch/arm/plat-samsung/Kconfig
index 4d79519..9607ac4 100644
--- a/arch/arm/plat-samsung/Kconfig
+++ b/arch/arm/plat-samsung/Kconfig
@@ -294,12 +294,6 @@  config S3C_DMA
 	help
 	  Internal configuration for S3C DMA core
 
-config S3C_PL330_DMA
-	bool
-	select PL330
-	help
-	  S3C DMA API Driver for PL330 DMAC.
-
 comment "Power management"
 
 config SAMSUNG_PM_DEBUG
diff --git a/arch/arm/plat-samsung/Makefile b/arch/arm/plat-samsung/Makefile
index 53eb15b..895c697 100644
--- a/arch/arm/plat-samsung/Makefile
+++ b/arch/arm/plat-samsung/Makefile
@@ -64,8 +64,6 @@  obj-$(CONFIG_SAMSUNG_DEV_PWM)	+= dev-pwm.o
 
 obj-$(CONFIG_S3C_DMA)		+= dma.o
 
-obj-$(CONFIG_S3C_PL330_DMA)	+= s3c-pl330.o
-
 # PM support
 
 obj-$(CONFIG_PM)		+= pm.o
diff --git a/arch/arm/plat-samsung/s3c-pl330.c b/arch/arm/plat-samsung/s3c-pl330.c
deleted file mode 100644
index f85638c..0000000
--- a/arch/arm/plat-samsung/s3c-pl330.c
+++ /dev/null
@@ -1,1244 +0,0 @@ 
-/* linux/arch/arm/plat-samsung/s3c-pl330.c
- *
- * Copyright (C) 2010 Samsung Electronics Co. Ltd.
- *	Jaswinder Singh <jassi.brar@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/platform_device.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-
-#include <asm/hardware/pl330.h>
-
-#include <plat/s3c-pl330-pdata.h>
-
-/**
- * struct s3c_pl330_dmac - Logical representation of a PL330 DMAC.
- * @busy_chan: Number of channels currently busy.
- * @peri: List of IDs of peripherals this DMAC can work with.
- * @node: To attach to the global list of DMACs.
- * @pi: PL330 configuration info for the DMAC.
- * @kmcache: Pool to quickly allocate xfers for all channels in the dmac.
- * @clk: Pointer of DMAC operation clock.
- */
-struct s3c_pl330_dmac {
-	unsigned		busy_chan;
-	enum dma_ch		*peri;
-	struct list_head	node;
-	struct pl330_info	*pi;
-	struct kmem_cache	*kmcache;
-	struct clk		*clk;
-};
-
-/**
- * struct s3c_pl330_xfer - A request submitted by S3C DMA clients.
- * @token: Xfer ID provided by the client.
- * @node: To attach to the list of xfers on a channel.
- * @px: Xfer for PL330 core.
- * @chan: Owner channel of this xfer.
- */
-struct s3c_pl330_xfer {
-	void			*token;
-	struct list_head	node;
-	struct pl330_xfer	px;
-	struct s3c_pl330_chan	*chan;
-};
-
-/**
- * struct s3c_pl330_chan - Logical channel to communicate with
- * 	a Physical peripheral.
- * @pl330_chan_id: Token of a hardware channel thread of PL330 DMAC.
- * 	NULL if the channel is available to be acquired.
- * @id: ID of the peripheral that this channel can communicate with.
- * @options: Options specified by the client.
- * @sdaddr: Address provided via s3c2410_dma_devconfig.
- * @node: To attach to the global list of channels.
- * @lrq: Pointer to the last submitted pl330_req to PL330 core.
- * @xfer_list: To manage list of xfers enqueued.
- * @req: Two requests to communicate with the PL330 engine.
- * @callback_fn: Callback function to the client.
- * @rqcfg: Channel configuration for the xfers.
- * @xfer_head: Pointer to the xfer to be next executed.
- * @dmac: Pointer to the DMAC that manages this channel, NULL if the
- * 	channel is available to be acquired.
- * @client: Client of this channel. NULL if the
- * 	channel is available to be acquired.
- */
-struct s3c_pl330_chan {
-	void				*pl330_chan_id;
-	enum dma_ch			id;
-	unsigned int			options;
-	unsigned long			sdaddr;
-	struct list_head		node;
-	struct pl330_req		*lrq;
-	struct list_head		xfer_list;
-	struct pl330_req		req[2];
-	s3c2410_dma_cbfn_t		callback_fn;
-	struct pl330_reqcfg		rqcfg;
-	struct s3c_pl330_xfer		*xfer_head;
-	struct s3c_pl330_dmac		*dmac;
-	struct s3c2410_dma_client	*client;
-};
-
-/* All DMACs in the platform */
-static LIST_HEAD(dmac_list);
-
-/* All channels to peripherals in the platform */
-static LIST_HEAD(chan_list);
-
-/*
- * Since we add resources(DMACs and Channels) to the global pool,
- * we need to guard access to the resources using a global lock
- */
-static DEFINE_SPINLOCK(res_lock);
-
-/* Returns the channel with ID 'id' in the chan_list */
-static struct s3c_pl330_chan *id_to_chan(const enum dma_ch id)
-{
-	struct s3c_pl330_chan *ch;
-
-	list_for_each_entry(ch, &chan_list, node)
-		if (ch->id == id)
-			return ch;
-
-	return NULL;
-}
-
-/* Allocate a new channel with ID 'id' and add to chan_list */
-static void chan_add(const enum dma_ch id)
-{
-	struct s3c_pl330_chan *ch = id_to_chan(id);
-
-	/* Return if the channel already exists */
-	if (ch)
-		return;
-
-	ch = kmalloc(sizeof(*ch), GFP_KERNEL);
-	/* Return silently to work with other channels */
-	if (!ch)
-		return;
-
-	ch->id = id;
-	ch->dmac = NULL;
-
-	list_add_tail(&ch->node, &chan_list);
-}
-
-/* If the channel is not yet acquired by any client */
-static bool chan_free(struct s3c_pl330_chan *ch)
-{
-	if (!ch)
-		return false;
-
-	/* Channel points to some DMAC only when it's acquired */
-	return ch->dmac ? false : true;
-}
-
-/*
- * Returns 0 is peripheral i/f is invalid or not present on the dmac.
- * Index + 1, otherwise.
- */
-static unsigned iface_of_dmac(struct s3c_pl330_dmac *dmac, enum dma_ch ch_id)
-{
-	enum dma_ch *id = dmac->peri;
-	int i;
-
-	/* Discount invalid markers */
-	if (ch_id == DMACH_MAX)
-		return 0;
-
-	for (i = 0; i < PL330_MAX_PERI; i++)
-		if (id[i] == ch_id)
-			return i + 1;
-
-	return 0;
-}
-
-/* If all channel threads of the DMAC are busy */
-static inline bool dmac_busy(struct s3c_pl330_dmac *dmac)
-{
-	struct pl330_info *pi = dmac->pi;
-
-	return (dmac->busy_chan < pi->pcfg.num_chan) ? false : true;
-}
-
-/*
- * Returns the number of free channels that
- * can be handled by this dmac only.
- */
-static unsigned ch_onlyby_dmac(struct s3c_pl330_dmac *dmac)
-{
-	enum dma_ch *id = dmac->peri;
-	struct s3c_pl330_dmac *d;
-	struct s3c_pl330_chan *ch;
-	unsigned found, count = 0;
-	enum dma_ch p;
-	int i;
-
-	for (i = 0; i < PL330_MAX_PERI; i++) {
-		p = id[i];
-		ch = id_to_chan(p);
-
-		if (p == DMACH_MAX || !chan_free(ch))
-			continue;
-
-		found = 0;
-		list_for_each_entry(d, &dmac_list, node) {
-			if (d != dmac && iface_of_dmac(d, ch->id)) {
-				found = 1;
-				break;
-			}
-		}
-		if (!found)
-			count++;
-	}
-
-	return count;
-}
-
-/*
- * Measure of suitability of 'dmac' handling 'ch'
- *
- * 0 indicates 'dmac' can not handle 'ch' either
- * because it is not supported by the hardware or
- * because all dmac channels are currently busy.
- *
- * >0 vlaue indicates 'dmac' has the capability.
- * The bigger the value the more suitable the dmac.
- */
-#define MAX_SUIT	UINT_MAX
-#define MIN_SUIT	0
-
-static unsigned suitablility(struct s3c_pl330_dmac *dmac,
-		struct s3c_pl330_chan *ch)
-{
-	struct pl330_info *pi = dmac->pi;
-	enum dma_ch *id = dmac->peri;
-	struct s3c_pl330_dmac *d;
-	unsigned s;
-	int i;
-
-	s = MIN_SUIT;
-	/* If all the DMAC channel threads are busy */
-	if (dmac_busy(dmac))
-		return s;
-
-	for (i = 0; i < PL330_MAX_PERI; i++)
-		if (id[i] == ch->id)
-			break;
-
-	/* If the 'dmac' can't talk to 'ch' */
-	if (i == PL330_MAX_PERI)
-		return s;
-
-	s = MAX_SUIT;
-	list_for_each_entry(d, &dmac_list, node) {
-		/*
-		 * If some other dmac can talk to this
-		 * peri and has some channel free.
-		 */
-		if (d != dmac && iface_of_dmac(d, ch->id) && !dmac_busy(d)) {
-			s = 0;
-			break;
-		}
-	}
-	if (s)
-		return s;
-
-	s = 100;
-
-	/* Good if free chans are more, bad otherwise */
-	s += (pi->pcfg.num_chan - dmac->busy_chan) - ch_onlyby_dmac(dmac);
-
-	return s;
-}
-
-/* More than one DMAC may have capability to transfer data with the
- * peripheral. This function assigns most suitable DMAC to manage the
- * channel and hence communicate with the peripheral.
- */
-static struct s3c_pl330_dmac *map_chan_to_dmac(struct s3c_pl330_chan *ch)
-{
-	struct s3c_pl330_dmac *d, *dmac = NULL;
-	unsigned sn, sl = MIN_SUIT;
-
-	list_for_each_entry(d, &dmac_list, node) {
-		sn = suitablility(d, ch);
-
-		if (sn == MAX_SUIT)
-			return d;
-
-		if (sn > sl)
-			dmac = d;
-	}
-
-	return dmac;
-}
-
-/* Acquire the channel for peripheral 'id' */
-static struct s3c_pl330_chan *chan_acquire(const enum dma_ch id)
-{
-	struct s3c_pl330_chan *ch = id_to_chan(id);
-	struct s3c_pl330_dmac *dmac;
-
-	/* If the channel doesn't exist or is already acquired */
-	if (!ch || !chan_free(ch)) {
-		ch = NULL;
-		goto acq_exit;
-	}
-
-	dmac = map_chan_to_dmac(ch);
-	/* If couldn't map */
-	if (!dmac) {
-		ch = NULL;
-		goto acq_exit;
-	}
-
-	dmac->busy_chan++;
-	ch->dmac = dmac;
-
-acq_exit:
-	return ch;
-}
-
-/* Delete xfer from the queue */
-static inline void del_from_queue(struct s3c_pl330_xfer *xfer)
-{
-	struct s3c_pl330_xfer *t;
-	struct s3c_pl330_chan *ch;
-	int found;
-
-	if (!xfer)
-		return;
-
-	ch = xfer->chan;
-
-	/* Make sure xfer is in the queue */
-	found = 0;
-	list_for_each_entry(t, &ch->xfer_list, node)
-		if (t == xfer) {
-			found = 1;
-			break;
-		}
-
-	if (!found)
-		return;
-
-	/* If xfer is last entry in the queue */
-	if (xfer->node.next == &ch->xfer_list)
-		t = list_entry(ch->xfer_list.next,
-				struct s3c_pl330_xfer, node);
-	else
-		t = list_entry(xfer->node.next,
-				struct s3c_pl330_xfer, node);
-
-	/* If there was only one node left */
-	if (t == xfer)
-		ch->xfer_head = NULL;
-	else if (ch->xfer_head == xfer)
-		ch->xfer_head = t;
-
-	list_del(&xfer->node);
-}
-
-/* Provides pointer to the next xfer in the queue.
- * If CIRCULAR option is set, the list is left intact,
- * otherwise the xfer is removed from the list.
- * Forced delete 'pluck' can be set to override the CIRCULAR option.
- */
-static struct s3c_pl330_xfer *get_from_queue(struct s3c_pl330_chan *ch,
-		int pluck)
-{
-	struct s3c_pl330_xfer *xfer = ch->xfer_head;
-
-	if (!xfer)
-		return NULL;
-
-	/* If xfer is last entry in the queue */
-	if (xfer->node.next == &ch->xfer_list)
-		ch->xfer_head = list_entry(ch->xfer_list.next,
-					struct s3c_pl330_xfer, node);
-	else
-		ch->xfer_head = list_entry(xfer->node.next,
-					struct s3c_pl330_xfer, node);
-
-	if (pluck || !(ch->options & S3C2410_DMAF_CIRCULAR))
-		del_from_queue(xfer);
-
-	return xfer;
-}
-
-static inline void add_to_queue(struct s3c_pl330_chan *ch,
-		struct s3c_pl330_xfer *xfer, int front)
-{
-	struct pl330_xfer *xt;
-
-	/* If queue empty */
-	if (ch->xfer_head == NULL)
-		ch->xfer_head = xfer;
-
-	xt = &ch->xfer_head->px;
-	/* If the head already submitted (CIRCULAR head) */
-	if (ch->options & S3C2410_DMAF_CIRCULAR &&
-		(xt == ch->req[0].x || xt == ch->req[1].x))
-		ch->xfer_head = xfer;
-
-	/* If this is a resubmission, it should go at the head */
-	if (front) {
-		ch->xfer_head = xfer;
-		list_add(&xfer->node, &ch->xfer_list);
-	} else {
-		list_add_tail(&xfer->node, &ch->xfer_list);
-	}
-}
-
-static inline void _finish_off(struct s3c_pl330_xfer *xfer,
-		enum s3c2410_dma_buffresult res, int ffree)
-{
-	struct s3c_pl330_chan *ch;
-
-	if (!xfer)
-		return;
-
-	ch = xfer->chan;
-
-	/* Do callback */
-	if (ch->callback_fn)
-		ch->callback_fn(NULL, xfer->token, xfer->px.bytes, res);
-
-	/* Force Free or if buffer is not needed anymore */
-	if (ffree || !(ch->options & S3C2410_DMAF_CIRCULAR))
-		kmem_cache_free(ch->dmac->kmcache, xfer);
-}
-
-static inline int s3c_pl330_submit(struct s3c_pl330_chan *ch,
-		struct pl330_req *r)
-{
-	struct s3c_pl330_xfer *xfer;
-	int ret = 0;
-
-	/* If already submitted */
-	if (r->x)
-		return 0;
-
-	xfer = get_from_queue(ch, 0);
-	if (xfer) {
-		r->x = &xfer->px;
-
-		/* Use max bandwidth for M<->M xfers */
-		if (r->rqtype == MEMTOMEM) {
-			struct pl330_info *pi = xfer->chan->dmac->pi;
-			int burst = 1 << ch->rqcfg.brst_size;
-			u32 bytes = r->x->bytes;
-			int bl;
-
-			bl = pi->pcfg.data_bus_width / 8;
-			bl *= pi->pcfg.data_buf_dep;
-			bl /= burst;
-
-			/* src/dst_burst_len can't be more than 16 */
-			if (bl > 16)
-				bl = 16;
-
-			while (bl > 1) {
-				if (!(bytes % (bl * burst)))
-					break;
-				bl--;
-			}
-
-			ch->rqcfg.brst_len = bl;
-		} else {
-			ch->rqcfg.brst_len = 1;
-		}
-
-		ret = pl330_submit_req(ch->pl330_chan_id, r);
-
-		/* If submission was successful */
-		if (!ret) {
-			ch->lrq = r; /* latest submitted req */
-			return 0;
-		}
-
-		r->x = NULL;
-
-		/* If both of the PL330 ping-pong buffers filled */
-		if (ret == -EAGAIN) {
-			dev_err(ch->dmac->pi->dev, "%s:%d!\n",
-				__func__, __LINE__);
-			/* Queue back again */
-			add_to_queue(ch, xfer, 1);
-			ret = 0;
-		} else {
-			dev_err(ch->dmac->pi->dev, "%s:%d!\n",
-				__func__, __LINE__);
-			_finish_off(xfer, S3C2410_RES_ERR, 0);
-		}
-	}
-
-	return ret;
-}
-
-static void s3c_pl330_rq(struct s3c_pl330_chan *ch,
-	struct pl330_req *r, enum pl330_op_err err)
-{
-	unsigned long flags;
-	struct s3c_pl330_xfer *xfer;
-	struct pl330_xfer *xl = r->x;
-	enum s3c2410_dma_buffresult res;
-
-	spin_lock_irqsave(&res_lock, flags);
-
-	r->x = NULL;
-
-	s3c_pl330_submit(ch, r);
-
-	spin_unlock_irqrestore(&res_lock, flags);
-
-	/* Map result to S3C DMA API */
-	if (err == PL330_ERR_NONE)
-		res = S3C2410_RES_OK;
-	else if (err == PL330_ERR_ABORT)
-		res = S3C2410_RES_ABORT;
-	else
-		res = S3C2410_RES_ERR;
-
-	/* If last request had some xfer */
-	if (xl) {
-		xfer = container_of(xl, struct s3c_pl330_xfer, px);
-		_finish_off(xfer, res, 0);
-	} else {
-		dev_info(ch->dmac->pi->dev, "%s:%d No Xfer?!\n",
-			__func__, __LINE__);
-	}
-}
-
-static void s3c_pl330_rq0(void *token, enum pl330_op_err err)
-{
-	struct pl330_req *r = token;
-	struct s3c_pl330_chan *ch = container_of(r,
-					struct s3c_pl330_chan, req[0]);
-	s3c_pl330_rq(ch, r, err);
-}
-
-static void s3c_pl330_rq1(void *token, enum pl330_op_err err)
-{
-	struct pl330_req *r = token;
-	struct s3c_pl330_chan *ch = container_of(r,
-					struct s3c_pl330_chan, req[1]);
-	s3c_pl330_rq(ch, r, err);
-}
-
-/* Release an acquired channel */
-static void chan_release(struct s3c_pl330_chan *ch)
-{
-	struct s3c_pl330_dmac *dmac;
-
-	if (chan_free(ch))
-		return;
-
-	dmac = ch->dmac;
-	ch->dmac = NULL;
-	dmac->busy_chan--;
-}
-
-int s3c2410_dma_ctrl(enum dma_ch id, enum s3c2410_chan_op op)
-{
-	struct s3c_pl330_xfer *xfer;
-	enum pl330_chan_op pl330op;
-	struct s3c_pl330_chan *ch;
-	unsigned long flags;
-	int idx, ret;
-
-	spin_lock_irqsave(&res_lock, flags);
-
-	ch = id_to_chan(id);
-
-	if (!ch || chan_free(ch)) {
-		ret = -EINVAL;
-		goto ctrl_exit;
-	}
-
-	switch (op) {
-	case S3C2410_DMAOP_START:
-		/* Make sure both reqs are enqueued */
-		idx = (ch->lrq == &ch->req[0]) ? 1 : 0;
-		s3c_pl330_submit(ch, &ch->req[idx]);
-		s3c_pl330_submit(ch, &ch->req[1 - idx]);
-		pl330op = PL330_OP_START;
-		break;
-
-	case S3C2410_DMAOP_STOP:
-		pl330op = PL330_OP_ABORT;
-		break;
-
-	case S3C2410_DMAOP_FLUSH:
-		pl330op = PL330_OP_FLUSH;
-		break;
-
-	case S3C2410_DMAOP_PAUSE:
-	case S3C2410_DMAOP_RESUME:
-	case S3C2410_DMAOP_TIMEOUT:
-	case S3C2410_DMAOP_STARTED:
-		spin_unlock_irqrestore(&res_lock, flags);
-		return 0;
-
-	default:
-		spin_unlock_irqrestore(&res_lock, flags);
-		return -EINVAL;
-	}
-
-	ret = pl330_chan_ctrl(ch->pl330_chan_id, pl330op);
-
-	if (pl330op == PL330_OP_START) {
-		spin_unlock_irqrestore(&res_lock, flags);
-		return ret;
-	}
-
-	idx = (ch->lrq == &ch->req[0]) ? 1 : 0;
-
-	/* Abort the current xfer */
-	if (ch->req[idx].x) {
-		xfer = container_of(ch->req[idx].x,
-				struct s3c_pl330_xfer, px);
-
-		/* Drop xfer during FLUSH */
-		if (pl330op == PL330_OP_FLUSH)
-			del_from_queue(xfer);
-
-		ch->req[idx].x = NULL;
-
-		spin_unlock_irqrestore(&res_lock, flags);
-		_finish_off(xfer, S3C2410_RES_ABORT,
-				pl330op == PL330_OP_FLUSH ? 1 : 0);
-		spin_lock_irqsave(&res_lock, flags);
-	}
-
-	/* Flush the whole queue */
-	if (pl330op == PL330_OP_FLUSH) {
-
-		if (ch->req[1 - idx].x) {
-			xfer = container_of(ch->req[1 - idx].x,
-					struct s3c_pl330_xfer, px);
-
-			del_from_queue(xfer);
-
-			ch->req[1 - idx].x = NULL;
-
-			spin_unlock_irqrestore(&res_lock, flags);
-			_finish_off(xfer, S3C2410_RES_ABORT, 1);
-			spin_lock_irqsave(&res_lock, flags);
-		}
-
-		/* Finish off the remaining in the queue */
-		xfer = ch->xfer_head;
-		while (xfer) {
-
-			del_from_queue(xfer);
-
-			spin_unlock_irqrestore(&res_lock, flags);
-			_finish_off(xfer, S3C2410_RES_ABORT, 1);
-			spin_lock_irqsave(&res_lock, flags);
-
-			xfer = ch->xfer_head;
-		}
-	}
-
-ctrl_exit:
-	spin_unlock_irqrestore(&res_lock, flags);
-
-	return ret;
-}
-EXPORT_SYMBOL(s3c2410_dma_ctrl);
-
-int s3c2410_dma_enqueue(enum dma_ch id, void *token,
-			dma_addr_t addr, int size)
-{
-	struct s3c_pl330_chan *ch;
-	struct s3c_pl330_xfer *xfer;
-	unsigned long flags;
-	int idx, ret = 0;
-
-	spin_lock_irqsave(&res_lock, flags);
-
-	ch = id_to_chan(id);
-
-	/* Error if invalid or free channel */
-	if (!ch || chan_free(ch)) {
-		ret = -EINVAL;
-		goto enq_exit;
-	}
-
-	/* Error if size is unaligned */
-	if (ch->rqcfg.brst_size && size % (1 << ch->rqcfg.brst_size)) {
-		ret = -EINVAL;
-		goto enq_exit;
-	}
-
-	xfer = kmem_cache_alloc(ch->dmac->kmcache, GFP_ATOMIC);
-	if (!xfer) {
-		ret = -ENOMEM;
-		goto enq_exit;
-	}
-
-	xfer->token = token;
-	xfer->chan = ch;
-	xfer->px.bytes = size;
-	xfer->px.next = NULL; /* Single request */
-
-	/* For S3C DMA API, direction is always fixed for all xfers */
-	if (ch->req[0].rqtype == MEMTODEV) {
-		xfer->px.src_addr = addr;
-		xfer->px.dst_addr = ch->sdaddr;
-	} else {
-		xfer->px.src_addr = ch->sdaddr;
-		xfer->px.dst_addr = addr;
-	}
-
-	add_to_queue(ch, xfer, 0);
-
-	/* Try submitting on either request */
-	idx = (ch->lrq == &ch->req[0]) ? 1 : 0;
-
-	if (!ch->req[idx].x)
-		s3c_pl330_submit(ch, &ch->req[idx]);
-	else
-		s3c_pl330_submit(ch, &ch->req[1 - idx]);
-
-	spin_unlock_irqrestore(&res_lock, flags);
-
-	if (ch->options & S3C2410_DMAF_AUTOSTART)
-		s3c2410_dma_ctrl(id, S3C2410_DMAOP_START);
-
-	return 0;
-
-enq_exit:
-	spin_unlock_irqrestore(&res_lock, flags);
-
-	return ret;
-}
-EXPORT_SYMBOL(s3c2410_dma_enqueue);
-
-int s3c2410_dma_request(enum dma_ch id,
-			struct s3c2410_dma_client *client,
-			void *dev)
-{
-	struct s3c_pl330_dmac *dmac;
-	struct s3c_pl330_chan *ch;
-	unsigned long flags;
-	int ret = 0;
-
-	spin_lock_irqsave(&res_lock, flags);
-
-	ch = chan_acquire(id);
-	if (!ch) {
-		ret = -EBUSY;
-		goto req_exit;
-	}
-
-	dmac = ch->dmac;
-
-	ch->pl330_chan_id = pl330_request_channel(dmac->pi);
-	if (!ch->pl330_chan_id) {
-		chan_release(ch);
-		ret = -EBUSY;
-		goto req_exit;
-	}
-
-	ch->client = client;
-	ch->options = 0; /* Clear any option */
-	ch->callback_fn = NULL; /* Clear any callback */
-	ch->lrq = NULL;
-
-	ch->rqcfg.brst_size = 2; /* Default word size */
-	ch->rqcfg.swap = SWAP_NO;
-	ch->rqcfg.scctl = SCCTRL0; /* Noncacheable and nonbufferable */
-	ch->rqcfg.dcctl = DCCTRL0; /* Noncacheable and nonbufferable */
-	ch->rqcfg.privileged = 0;
-	ch->rqcfg.insnaccess = 0;
-
-	/* Set invalid direction */
-	ch->req[0].rqtype = DEVTODEV;
-	ch->req[1].rqtype = ch->req[0].rqtype;
-
-	ch->req[0].cfg = &ch->rqcfg;
-	ch->req[1].cfg = ch->req[0].cfg;
-
-	ch->req[0].peri = iface_of_dmac(dmac, id) - 1; /* Original index */
-	ch->req[1].peri = ch->req[0].peri;
-
-	ch->req[0].token = &ch->req[0];
-	ch->req[0].xfer_cb = s3c_pl330_rq0;
-	ch->req[1].token = &ch->req[1];
-	ch->req[1].xfer_cb = s3c_pl330_rq1;
-
-	ch->req[0].x = NULL;
-	ch->req[1].x = NULL;
-
-	/* Reset xfer list */
-	INIT_LIST_HEAD(&ch->xfer_list);
-	ch->xfer_head = NULL;
-
-req_exit:
-	spin_unlock_irqrestore(&res_lock, flags);
-
-	return ret;
-}
-EXPORT_SYMBOL(s3c2410_dma_request);
-
-int s3c2410_dma_free(enum dma_ch id, struct s3c2410_dma_client *client)
-{
-	struct s3c_pl330_chan *ch;
-	struct s3c_pl330_xfer *xfer;
-	unsigned long flags;
-	int ret = 0;
-	unsigned idx;
-
-	spin_lock_irqsave(&res_lock, flags);
-
-	ch = id_to_chan(id);
-
-	if (!ch || chan_free(ch))
-		goto free_exit;
-
-	/* Refuse if someone else wanted to free the channel */
-	if (ch->client != client) {
-		ret = -EBUSY;
-		goto free_exit;
-	}
-
-	/* Stop any active xfer, Flushe the queue and do callbacks */
-	pl330_chan_ctrl(ch->pl330_chan_id, PL330_OP_FLUSH);
-
-	/* Abort the submitted requests */
-	idx = (ch->lrq == &ch->req[0]) ? 1 : 0;
-
-	if (ch->req[idx].x) {
-		xfer = container_of(ch->req[idx].x,
-				struct s3c_pl330_xfer, px);
-
-		ch->req[idx].x = NULL;
-		del_from_queue(xfer);
-
-		spin_unlock_irqrestore(&res_lock, flags);
-		_finish_off(xfer, S3C2410_RES_ABORT, 1);
-		spin_lock_irqsave(&res_lock, flags);
-	}
-
-	if (ch->req[1 - idx].x) {
-		xfer = container_of(ch->req[1 - idx].x,
-				struct s3c_pl330_xfer, px);
-
-		ch->req[1 - idx].x = NULL;
-		del_from_queue(xfer);
-
-		spin_unlock_irqrestore(&res_lock, flags);
-		_finish_off(xfer, S3C2410_RES_ABORT, 1);
-		spin_lock_irqsave(&res_lock, flags);
-	}
-
-	/* Pluck and Abort the queued requests in order */
-	do {
-		xfer = get_from_queue(ch, 1);
-
-		spin_unlock_irqrestore(&res_lock, flags);
-		_finish_off(xfer, S3C2410_RES_ABORT, 1);
-		spin_lock_irqsave(&res_lock, flags);
-	} while (xfer);
-
-	ch->client = NULL;
-
-	pl330_release_channel(ch->pl330_chan_id);
-
-	ch->pl330_chan_id = NULL;
-
-	chan_release(ch);
-
-free_exit:
-	spin_unlock_irqrestore(&res_lock, flags);
-
-	return ret;
-}
-EXPORT_SYMBOL(s3c2410_dma_free);
-
-int s3c2410_dma_config(enum dma_ch id, int xferunit)
-{
-	struct s3c_pl330_chan *ch;
-	struct pl330_info *pi;
-	unsigned long flags;
-	int i, dbwidth, ret = 0;
-
-	spin_lock_irqsave(&res_lock, flags);
-
-	ch = id_to_chan(id);
-
-	if (!ch || chan_free(ch)) {
-		ret = -EINVAL;
-		goto cfg_exit;
-	}
-
-	pi = ch->dmac->pi;
-	dbwidth = pi->pcfg.data_bus_width / 8;
-
-	/* Max size of xfer can be pcfg.data_bus_width */
-	if (xferunit > dbwidth) {
-		ret = -EINVAL;
-		goto cfg_exit;
-	}
-
-	i = 0;
-	while (xferunit != (1 << i))
-		i++;
-
-	/* If valid value */
-	if (xferunit == (1 << i))
-		ch->rqcfg.brst_size = i;
-	else
-		ret = -EINVAL;
-
-cfg_exit:
-	spin_unlock_irqrestore(&res_lock, flags);
-
-	return ret;
-}
-EXPORT_SYMBOL(s3c2410_dma_config);
-
-/* Options that are supported by this driver */
-#define S3C_PL330_FLAGS (S3C2410_DMAF_CIRCULAR | S3C2410_DMAF_AUTOSTART)
-
-int s3c2410_dma_setflags(enum dma_ch id, unsigned int options)
-{
-	struct s3c_pl330_chan *ch;
-	unsigned long flags;
-	int ret = 0;
-
-	spin_lock_irqsave(&res_lock, flags);
-
-	ch = id_to_chan(id);
-
-	if (!ch || chan_free(ch) || options & ~(S3C_PL330_FLAGS))
-		ret = -EINVAL;
-	else
-		ch->options = options;
-
-	spin_unlock_irqrestore(&res_lock, flags);
-
-	return 0;
-}
-EXPORT_SYMBOL(s3c2410_dma_setflags);
-
-int s3c2410_dma_set_buffdone_fn(enum dma_ch id, s3c2410_dma_cbfn_t rtn)
-{
-	struct s3c_pl330_chan *ch;
-	unsigned long flags;
-	int ret = 0;
-
-	spin_lock_irqsave(&res_lock, flags);
-
-	ch = id_to_chan(id);
-
-	if (!ch || chan_free(ch))
-		ret = -EINVAL;
-	else
-		ch->callback_fn = rtn;
-
-	spin_unlock_irqrestore(&res_lock, flags);
-
-	return ret;
-}
-EXPORT_SYMBOL(s3c2410_dma_set_buffdone_fn);
-
-int s3c2410_dma_devconfig(enum dma_ch id, enum s3c2410_dmasrc source,
-			  unsigned long address)
-{
-	struct s3c_pl330_chan *ch;
-	unsigned long flags;
-	int ret = 0;
-
-	spin_lock_irqsave(&res_lock, flags);
-
-	ch = id_to_chan(id);
-
-	if (!ch || chan_free(ch)) {
-		ret = -EINVAL;
-		goto devcfg_exit;
-	}
-
-	switch (source) {
-	case S3C2410_DMASRC_HW: /* P->M */
-		ch->req[0].rqtype = DEVTOMEM;
-		ch->req[1].rqtype = DEVTOMEM;
-		ch->rqcfg.src_inc = 0;
-		ch->rqcfg.dst_inc = 1;
-		break;
-	case S3C2410_DMASRC_MEM: /* M->P */
-		ch->req[0].rqtype = MEMTODEV;
-		ch->req[1].rqtype = MEMTODEV;
-		ch->rqcfg.src_inc = 1;
-		ch->rqcfg.dst_inc = 0;
-		break;
-	default:
-		ret = -EINVAL;
-		goto devcfg_exit;
-	}
-
-	ch->sdaddr = address;
-
-devcfg_exit:
-	spin_unlock_irqrestore(&res_lock, flags);
-
-	return ret;
-}
-EXPORT_SYMBOL(s3c2410_dma_devconfig);
-
-int s3c2410_dma_getposition(enum dma_ch id, dma_addr_t *src, dma_addr_t *dst)
-{
-	struct s3c_pl330_chan *ch = id_to_chan(id);
-	struct pl330_chanstatus status;
-	int ret;
-
-	if (!ch || chan_free(ch))
-		return -EINVAL;
-
-	ret = pl330_chan_status(ch->pl330_chan_id, &status);
-	if (ret < 0)
-		return ret;
-
-	*src = status.src_addr;
-	*dst = status.dst_addr;
-
-	return 0;
-}
-EXPORT_SYMBOL(s3c2410_dma_getposition);
-
-static irqreturn_t pl330_irq_handler(int irq, void *data)
-{
-	if (pl330_update(data))
-		return IRQ_HANDLED;
-	else
-		return IRQ_NONE;
-}
-
-static int pl330_probe(struct platform_device *pdev)
-{
-	struct s3c_pl330_dmac *s3c_pl330_dmac;
-	struct s3c_pl330_platdata *pl330pd;
-	struct pl330_info *pl330_info;
-	struct resource *res;
-	int i, ret, irq;
-
-	pl330pd = pdev->dev.platform_data;
-
-	/* Can't do without the list of _32_ peripherals */
-	if (!pl330pd || !pl330pd->peri) {
-		dev_err(&pdev->dev, "platform data missing!\n");
-		return -ENODEV;
-	}
-
-	pl330_info = kzalloc(sizeof(*pl330_info), GFP_KERNEL);
-	if (!pl330_info)
-		return -ENOMEM;
-
-	pl330_info->pl330_data = NULL;
-	pl330_info->dev = &pdev->dev;
-
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res) {
-		ret = -ENODEV;
-		goto probe_err1;
-	}
-
-	request_mem_region(res->start, resource_size(res), pdev->name);
-
-	pl330_info->base = ioremap(res->start, resource_size(res));
-	if (!pl330_info->base) {
-		ret = -ENXIO;
-		goto probe_err2;
-	}
-
-	irq = platform_get_irq(pdev, 0);
-	if (irq < 0) {
-		ret = irq;
-		goto probe_err3;
-	}
-
-	ret = request_irq(irq, pl330_irq_handler, 0,
-			dev_name(&pdev->dev), pl330_info);
-	if (ret)
-		goto probe_err4;
-
-	/* Allocate a new DMAC */
-	s3c_pl330_dmac = kmalloc(sizeof(*s3c_pl330_dmac), GFP_KERNEL);
-	if (!s3c_pl330_dmac) {
-		ret = -ENOMEM;
-		goto probe_err5;
-	}
-
-	/* Get operation clock and enable it */
-	s3c_pl330_dmac->clk = clk_get(&pdev->dev, "pdma");
-	if (IS_ERR(s3c_pl330_dmac->clk)) {
-		dev_err(&pdev->dev, "Cannot get operation clock.\n");
-		ret = -EINVAL;
-		goto probe_err6;
-	}
-	clk_enable(s3c_pl330_dmac->clk);
-
-	ret = pl330_add(pl330_info);
-	if (ret)
-		goto probe_err7;
-
-	/* Hook the info */
-	s3c_pl330_dmac->pi = pl330_info;
-
-	/* No busy channels */
-	s3c_pl330_dmac->busy_chan = 0;
-
-	s3c_pl330_dmac->kmcache = kmem_cache_create(dev_name(&pdev->dev),
-				sizeof(struct s3c_pl330_xfer), 0, 0, NULL);
-
-	if (!s3c_pl330_dmac->kmcache) {
-		ret = -ENOMEM;
-		goto probe_err8;
-	}
-
-	/* Get the list of peripherals */
-	s3c_pl330_dmac->peri = pl330pd->peri;
-
-	/* Attach to the list of DMACs */
-	list_add_tail(&s3c_pl330_dmac->node, &dmac_list);
-
-	/* Create a channel for each peripheral in the DMAC
-	 * that is, if it doesn't already exist
-	 */
-	for (i = 0; i < PL330_MAX_PERI; i++)
-		if (s3c_pl330_dmac->peri[i] != DMACH_MAX)
-			chan_add(s3c_pl330_dmac->peri[i]);
-
-	printk(KERN_INFO
-		"Loaded driver for PL330 DMAC-%d %s\n",	pdev->id, pdev->name);
-	printk(KERN_INFO
-		"\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n",
-		pl330_info->pcfg.data_buf_dep,
-		pl330_info->pcfg.data_bus_width / 8, pl330_info->pcfg.num_chan,
-		pl330_info->pcfg.num_peri, pl330_info->pcfg.num_events);
-
-	return 0;
-
-probe_err8:
-	pl330_del(pl330_info);
-probe_err7:
-	clk_disable(s3c_pl330_dmac->clk);
-	clk_put(s3c_pl330_dmac->clk);
-probe_err6:
-	kfree(s3c_pl330_dmac);
-probe_err5:
-	free_irq(irq, pl330_info);
-probe_err4:
-probe_err3:
-	iounmap(pl330_info->base);
-probe_err2:
-	release_mem_region(res->start, resource_size(res));
-probe_err1:
-	kfree(pl330_info);
-
-	return ret;
-}
-
-static int pl330_remove(struct platform_device *pdev)
-{
-	struct s3c_pl330_dmac *dmac, *d;
-	struct s3c_pl330_chan *ch;
-	unsigned long flags;
-	int del, found;
-
-	if (!pdev->dev.platform_data)
-		return -EINVAL;
-
-	spin_lock_irqsave(&res_lock, flags);
-
-	found = 0;
-	list_for_each_entry(d, &dmac_list, node)
-		if (d->pi->dev == &pdev->dev) {
-			found = 1;
-			break;
-		}
-
-	if (!found) {
-		spin_unlock_irqrestore(&res_lock, flags);
-		return 0;
-	}
-
-	dmac = d;
-
-	/* Remove all Channels that are managed only by this DMAC */
-	list_for_each_entry(ch, &chan_list, node) {
-
-		/* Only channels that are handled by this DMAC */
-		if (iface_of_dmac(dmac, ch->id))
-			del = 1;
-		else
-			continue;
-
-		/* Don't remove if some other DMAC has it too */
-		list_for_each_entry(d, &dmac_list, node)
-			if (d != dmac && iface_of_dmac(d, ch->id)) {
-				del = 0;
-				break;
-			}
-
-		if (del) {
-			spin_unlock_irqrestore(&res_lock, flags);
-			s3c2410_dma_free(ch->id, ch->client);
-			spin_lock_irqsave(&res_lock, flags);
-			list_del(&ch->node);
-			kfree(ch);
-		}
-	}
-
-	/* Disable operation clock */
-	clk_disable(dmac->clk);
-	clk_put(dmac->clk);
-
-	/* Remove the DMAC */
-	list_del(&dmac->node);
-	kfree(dmac);
-
-	spin_unlock_irqrestore(&res_lock, flags);
-
-	return 0;
-}
-
-static struct platform_driver pl330_driver = {
-	.driver		= {
-		.owner	= THIS_MODULE,
-		.name	= "s3c-pl330",
-	},
-	.probe		= pl330_probe,
-	.remove		= pl330_remove,
-};
-
-static int __init pl330_init(void)
-{
-	return platform_driver_register(&pl330_driver);
-}
-module_init(pl330_init);
-
-static void __exit pl330_exit(void)
-{
-	platform_driver_unregister(&pl330_driver);
-	return;
-}
-module_exit(pl330_exit);
-
-MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
-MODULE_DESCRIPTION("Driver for PL330 DMA Controller");
-MODULE_LICENSE("GPL");
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 25cf327..9a023e6 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -199,6 +199,14 @@  config PL330_DMA
 	  You need to provide platform specific settings via
 	  platform_data for a dma-pl330 device.
 
+config S3C_PL330_DMA
+	bool "S3C DMA API Driver for PL330 DMAC"
+	select DMA_ENGINE
+	select PL330
+	depends on PLAT_SAMSUNG
+	help
+	  S3C DMA API Driver for PL330 DMAC.
+
 config PCH_DMA
 	tristate "Intel EG20T PCH / OKI Semi IOH(ML7213/ML7223) DMA support"
 	depends on PCI && X86
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 836095a..6e81b5d 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -25,3 +25,4 @@  obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
 obj-$(CONFIG_PL330_DMA) += pl330.o
 obj-$(CONFIG_PCH_DMA) += pch_dma.o
 obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o
+obj-$(CONFIG_S3C_PL330_DMA)	+= s3c-pl330.o
diff --git a/drivers/dma/s3c-pl330.c b/drivers/dma/s3c-pl330.c
new file mode 100644
index 0000000..f85638c
--- /dev/null
+++ b/drivers/dma/s3c-pl330.c
@@ -0,0 +1,1244 @@ 
+/* linux/arch/arm/plat-samsung/s3c-pl330.c
+ *
+ * Copyright (C) 2010 Samsung Electronics Co. Ltd.
+ *	Jaswinder Singh <jassi.brar@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+
+#include <asm/hardware/pl330.h>
+
+#include <plat/s3c-pl330-pdata.h>
+
+/**
+ * struct s3c_pl330_dmac - Logical representation of a PL330 DMAC.
+ * @busy_chan: Number of channels currently busy.
+ * @peri: List of IDs of peripherals this DMAC can work with.
+ * @node: To attach to the global list of DMACs.
+ * @pi: PL330 configuration info for the DMAC.
+ * @kmcache: Pool to quickly allocate xfers for all channels in the dmac.
+ * @clk: Pointer of DMAC operation clock.
+ */
+struct s3c_pl330_dmac {
+	unsigned		busy_chan;
+	enum dma_ch		*peri;
+	struct list_head	node;
+	struct pl330_info	*pi;
+	struct kmem_cache	*kmcache;
+	struct clk		*clk;
+};
+
+/**
+ * struct s3c_pl330_xfer - A request submitted by S3C DMA clients.
+ * @token: Xfer ID provided by the client.
+ * @node: To attach to the list of xfers on a channel.
+ * @px: Xfer for PL330 core.
+ * @chan: Owner channel of this xfer.
+ */
+struct s3c_pl330_xfer {
+	void			*token;
+	struct list_head	node;
+	struct pl330_xfer	px;
+	struct s3c_pl330_chan	*chan;
+};
+
+/**
+ * struct s3c_pl330_chan - Logical channel to communicate with
+ *	a Physical peripheral.
+ * @pl330_chan_id: Token of a hardware channel thread of PL330 DMAC.
+ *	NULL if the channel is available to be acquired.
+ * @id: ID of the peripheral that this channel can communicate with.
+ * @options: Options specified by the client.
+ * @sdaddr: Address provided via s3c2410_dma_devconfig.
+ * @node: To attach to the global list of channels.
+ * @lrq: Pointer to the last submitted pl330_req to PL330 core.
+ * @xfer_list: To manage list of xfers enqueued.
+ * @req: Two requests to communicate with the PL330 engine.
+ * @callback_fn: Callback function to the client.
+ * @rqcfg: Channel configuration for the xfers.
+ * @xfer_head: Pointer to the xfer to be next executed.
+ * @dmac: Pointer to the DMAC that manages this channel, NULL if the
+ *	channel is available to be acquired.
+ * @client: Client of this channel. NULL if the
+ *	channel is available to be acquired.
+ */
+struct s3c_pl330_chan {
+	void				*pl330_chan_id;
+	enum dma_ch			id;
+	unsigned int			options;
+	unsigned long			sdaddr;
+	struct list_head		node;
+	struct pl330_req		*lrq;
+	struct list_head		xfer_list;
+	struct pl330_req		req[2];
+	s3c2410_dma_cbfn_t		callback_fn;
+	struct pl330_reqcfg		rqcfg;
+	struct s3c_pl330_xfer		*xfer_head;
+	struct s3c_pl330_dmac		*dmac;
+	struct s3c2410_dma_client	*client;
+};
+
+/* All DMACs in the platform */
+static LIST_HEAD(dmac_list);
+
+/* All channels to peripherals in the platform */
+static LIST_HEAD(chan_list);
+
+/*
+ * Since we add resources(DMACs and Channels) to the global pool,
+ * we need to guard access to the resources using a global lock
+ */
+static DEFINE_SPINLOCK(res_lock);
+
+/* Returns the channel with ID 'id' in the chan_list */
+static struct s3c_pl330_chan *id_to_chan(const enum dma_ch id)
+{
+	struct s3c_pl330_chan *ch;
+
+	list_for_each_entry(ch, &chan_list, node)
+		if (ch->id == id)
+			return ch;
+
+	return NULL;
+}
+
+/* Allocate a new channel with ID 'id' and add to chan_list */
+static void chan_add(const enum dma_ch id)
+{
+	struct s3c_pl330_chan *ch = id_to_chan(id);
+
+	/* Return if the channel already exists */
+	if (ch)
+		return;
+
+	ch = kmalloc(sizeof(*ch), GFP_KERNEL);
+	/* Return silently to work with other channels */
+	if (!ch)
+		return;
+
+	ch->id = id;
+	ch->dmac = NULL;
+
+	list_add_tail(&ch->node, &chan_list);
+}
+
+/* If the channel is not yet acquired by any client */
+static bool chan_free(struct s3c_pl330_chan *ch)
+{
+	if (!ch)
+		return false;
+
+	/* Channel points to some DMAC only when it's acquired */
+	return ch->dmac ? false : true;
+}
+
+/*
+ * Returns 0 is peripheral i/f is invalid or not present on the dmac.
+ * Index + 1, otherwise.
+ */
+static unsigned iface_of_dmac(struct s3c_pl330_dmac *dmac, enum dma_ch ch_id)
+{
+	enum dma_ch *id = dmac->peri;
+	int i;
+
+	/* Discount invalid markers */
+	if (ch_id == DMACH_MAX)
+		return 0;
+
+	for (i = 0; i < PL330_MAX_PERI; i++)
+		if (id[i] == ch_id)
+			return i + 1;
+
+	return 0;
+}
+
+/* If all channel threads of the DMAC are busy */
+static inline bool dmac_busy(struct s3c_pl330_dmac *dmac)
+{
+	struct pl330_info *pi = dmac->pi;
+
+	return (dmac->busy_chan < pi->pcfg.num_chan) ? false : true;
+}
+
+/*
+ * Returns the number of free channels that
+ * can be handled by this dmac only.
+ */
+static unsigned ch_onlyby_dmac(struct s3c_pl330_dmac *dmac)
+{
+	enum dma_ch *id = dmac->peri;
+	struct s3c_pl330_dmac *d;
+	struct s3c_pl330_chan *ch;
+	unsigned found, count = 0;
+	enum dma_ch p;
+	int i;
+
+	for (i = 0; i < PL330_MAX_PERI; i++) {
+		p = id[i];
+		ch = id_to_chan(p);
+
+		if (p == DMACH_MAX || !chan_free(ch))
+			continue;
+
+		found = 0;
+		list_for_each_entry(d, &dmac_list, node) {
+			if (d != dmac && iface_of_dmac(d, ch->id)) {
+				found = 1;
+				break;
+			}
+		}
+		if (!found)
+			count++;
+	}
+
+	return count;
+}
+
+/*
+ * Measure of suitability of 'dmac' handling 'ch'
+ *
+ * 0 indicates 'dmac' can not handle 'ch' either
+ * because it is not supported by the hardware or
+ * because all dmac channels are currently busy.
+ *
+ * >0 vlaue indicates 'dmac' has the capability.
+ * The bigger the value the more suitable the dmac.
+ */
+#define MAX_SUIT	UINT_MAX
+#define MIN_SUIT	0
+
+static unsigned suitablility(struct s3c_pl330_dmac *dmac,
+		struct s3c_pl330_chan *ch)
+{
+	struct pl330_info *pi = dmac->pi;
+	enum dma_ch *id = dmac->peri;
+	struct s3c_pl330_dmac *d;
+	unsigned s;
+	int i;
+
+	s = MIN_SUIT;
+	/* If all the DMAC channel threads are busy */
+	if (dmac_busy(dmac))
+		return s;
+
+	for (i = 0; i < PL330_MAX_PERI; i++)
+		if (id[i] == ch->id)
+			break;
+
+	/* If the 'dmac' can't talk to 'ch' */
+	if (i == PL330_MAX_PERI)
+		return s;
+
+	s = MAX_SUIT;
+	list_for_each_entry(d, &dmac_list, node) {
+		/*
+		 * If some other dmac can talk to this
+		 * peri and has some channel free.
+		 */
+		if (d != dmac && iface_of_dmac(d, ch->id) && !dmac_busy(d)) {
+			s = 0;
+			break;
+		}
+	}
+	if (s)
+		return s;
+
+	s = 100;
+
+	/* Good if free chans are more, bad otherwise */
+	s += (pi->pcfg.num_chan - dmac->busy_chan) - ch_onlyby_dmac(dmac);
+
+	return s;
+}
+
+/* More than one DMAC may have capability to transfer data with the
+ * peripheral. This function assigns most suitable DMAC to manage the
+ * channel and hence communicate with the peripheral.
+ */
+static struct s3c_pl330_dmac *map_chan_to_dmac(struct s3c_pl330_chan *ch)
+{
+	struct s3c_pl330_dmac *d, *dmac = NULL;
+	unsigned sn, sl = MIN_SUIT;
+
+	list_for_each_entry(d, &dmac_list, node) {
+		sn = suitablility(d, ch);
+
+		if (sn == MAX_SUIT)
+			return d;
+
+		if (sn > sl)
+			dmac = d;
+	}
+
+	return dmac;
+}
+
+/* Acquire the channel for peripheral 'id' */
+static struct s3c_pl330_chan *chan_acquire(const enum dma_ch id)
+{
+	struct s3c_pl330_chan *ch = id_to_chan(id);
+	struct s3c_pl330_dmac *dmac;
+
+	/* If the channel doesn't exist or is already acquired */
+	if (!ch || !chan_free(ch)) {
+		ch = NULL;
+		goto acq_exit;
+	}
+
+	dmac = map_chan_to_dmac(ch);
+	/* If couldn't map */
+	if (!dmac) {
+		ch = NULL;
+		goto acq_exit;
+	}
+
+	dmac->busy_chan++;
+	ch->dmac = dmac;
+
+acq_exit:
+	return ch;
+}
+
+/* Delete xfer from the queue */
+static inline void del_from_queue(struct s3c_pl330_xfer *xfer)
+{
+	struct s3c_pl330_xfer *t;
+	struct s3c_pl330_chan *ch;
+	int found;
+
+	if (!xfer)
+		return;
+
+	ch = xfer->chan;
+
+	/* Make sure xfer is in the queue */
+	found = 0;
+	list_for_each_entry(t, &ch->xfer_list, node)
+		if (t == xfer) {
+			found = 1;
+			break;
+		}
+
+	if (!found)
+		return;
+
+	/* If xfer is last entry in the queue */
+	if (xfer->node.next == &ch->xfer_list)
+		t = list_entry(ch->xfer_list.next,
+				struct s3c_pl330_xfer, node);
+	else
+		t = list_entry(xfer->node.next,
+				struct s3c_pl330_xfer, node);
+
+	/* If there was only one node left */
+	if (t == xfer)
+		ch->xfer_head = NULL;
+	else if (ch->xfer_head == xfer)
+		ch->xfer_head = t;
+
+	list_del(&xfer->node);
+}
+
+/* Provides pointer to the next xfer in the queue.
+ * If CIRCULAR option is set, the list is left intact,
+ * otherwise the xfer is removed from the list.
+ * Forced delete 'pluck' can be set to override the CIRCULAR option.
+ */
+static struct s3c_pl330_xfer *get_from_queue(struct s3c_pl330_chan *ch,
+		int pluck)
+{
+	struct s3c_pl330_xfer *xfer = ch->xfer_head;
+
+	if (!xfer)
+		return NULL;
+
+	/* If xfer is last entry in the queue */
+	if (xfer->node.next == &ch->xfer_list)
+		ch->xfer_head = list_entry(ch->xfer_list.next,
+					struct s3c_pl330_xfer, node);
+	else
+		ch->xfer_head = list_entry(xfer->node.next,
+					struct s3c_pl330_xfer, node);
+
+	if (pluck || !(ch->options & S3C2410_DMAF_CIRCULAR))
+		del_from_queue(xfer);
+
+	return xfer;
+}
+
+static inline void add_to_queue(struct s3c_pl330_chan *ch,
+		struct s3c_pl330_xfer *xfer, int front)
+{
+	struct pl330_xfer *xt;
+
+	/* If queue empty */
+	if (ch->xfer_head == NULL)
+		ch->xfer_head = xfer;
+
+	xt = &ch->xfer_head->px;
+	/* If the head already submitted (CIRCULAR head) */
+	if (ch->options & S3C2410_DMAF_CIRCULAR &&
+		(xt == ch->req[0].x || xt == ch->req[1].x))
+		ch->xfer_head = xfer;
+
+	/* If this is a resubmission, it should go at the head */
+	if (front) {
+		ch->xfer_head = xfer;
+		list_add(&xfer->node, &ch->xfer_list);
+	} else {
+		list_add_tail(&xfer->node, &ch->xfer_list);
+	}
+}
+
+static inline void _finish_off(struct s3c_pl330_xfer *xfer,
+		enum s3c2410_dma_buffresult res, int ffree)
+{
+	struct s3c_pl330_chan *ch;
+
+	if (!xfer)
+		return;
+
+	ch = xfer->chan;
+
+	/* Do callback */
+	if (ch->callback_fn)
+		ch->callback_fn(NULL, xfer->token, xfer->px.bytes, res);
+
+	/* Force Free or if buffer is not needed anymore */
+	if (ffree || !(ch->options & S3C2410_DMAF_CIRCULAR))
+		kmem_cache_free(ch->dmac->kmcache, xfer);
+}
+
+static inline int s3c_pl330_submit(struct s3c_pl330_chan *ch,
+		struct pl330_req *r)
+{
+	struct s3c_pl330_xfer *xfer;
+	int ret = 0;
+
+	/* If already submitted */
+	if (r->x)
+		return 0;
+
+	xfer = get_from_queue(ch, 0);
+	if (xfer) {
+		r->x = &xfer->px;
+
+		/* Use max bandwidth for M<->M xfers */
+		if (r->rqtype == MEMTOMEM) {
+			struct pl330_info *pi = xfer->chan->dmac->pi;
+			int burst = 1 << ch->rqcfg.brst_size;
+			u32 bytes = r->x->bytes;
+			int bl;
+
+			bl = pi->pcfg.data_bus_width / 8;
+			bl *= pi->pcfg.data_buf_dep;
+			bl /= burst;
+
+			/* src/dst_burst_len can't be more than 16 */
+			if (bl > 16)
+				bl = 16;
+
+			while (bl > 1) {
+				if (!(bytes % (bl * burst)))
+					break;
+				bl--;
+			}
+
+			ch->rqcfg.brst_len = bl;
+		} else {
+			ch->rqcfg.brst_len = 1;
+		}
+
+		ret = pl330_submit_req(ch->pl330_chan_id, r);
+
+		/* If submission was successful */
+		if (!ret) {
+			ch->lrq = r; /* latest submitted req */
+			return 0;
+		}
+
+		r->x = NULL;
+
+		/* If both of the PL330 ping-pong buffers filled */
+		if (ret == -EAGAIN) {
+			dev_err(ch->dmac->pi->dev, "%s:%d!\n",
+				__func__, __LINE__);
+			/* Queue back again */
+			add_to_queue(ch, xfer, 1);
+			ret = 0;
+		} else {
+			dev_err(ch->dmac->pi->dev, "%s:%d!\n",
+				__func__, __LINE__);
+			_finish_off(xfer, S3C2410_RES_ERR, 0);
+		}
+	}
+
+	return ret;
+}
+
+static void s3c_pl330_rq(struct s3c_pl330_chan *ch,
+	struct pl330_req *r, enum pl330_op_err err)
+{
+	unsigned long flags;
+	struct s3c_pl330_xfer *xfer;
+	struct pl330_xfer *xl = r->x;
+	enum s3c2410_dma_buffresult res;
+
+	spin_lock_irqsave(&res_lock, flags);
+
+	r->x = NULL;
+
+	s3c_pl330_submit(ch, r);
+
+	spin_unlock_irqrestore(&res_lock, flags);
+
+	/* Map result to S3C DMA API */
+	if (err == PL330_ERR_NONE)
+		res = S3C2410_RES_OK;
+	else if (err == PL330_ERR_ABORT)
+		res = S3C2410_RES_ABORT;
+	else
+		res = S3C2410_RES_ERR;
+
+	/* If last request had some xfer */
+	if (xl) {
+		xfer = container_of(xl, struct s3c_pl330_xfer, px);
+		_finish_off(xfer, res, 0);
+	} else {
+		dev_info(ch->dmac->pi->dev, "%s:%d No Xfer?!\n",
+			__func__, __LINE__);
+	}
+}
+
+static void s3c_pl330_rq0(void *token, enum pl330_op_err err)
+{
+	struct pl330_req *r = token;
+	struct s3c_pl330_chan *ch = container_of(r,
+					struct s3c_pl330_chan, req[0]);
+	s3c_pl330_rq(ch, r, err);
+}
+
+static void s3c_pl330_rq1(void *token, enum pl330_op_err err)
+{
+	struct pl330_req *r = token;
+	struct s3c_pl330_chan *ch = container_of(r,
+					struct s3c_pl330_chan, req[1]);
+	s3c_pl330_rq(ch, r, err);
+}
+
+/* Release an acquired channel */
+static void chan_release(struct s3c_pl330_chan *ch)
+{
+	struct s3c_pl330_dmac *dmac;
+
+	if (chan_free(ch))
+		return;
+
+	dmac = ch->dmac;
+	ch->dmac = NULL;
+	dmac->busy_chan--;
+}
+
+int s3c2410_dma_ctrl(enum dma_ch id, enum s3c2410_chan_op op)
+{
+	struct s3c_pl330_xfer *xfer;
+	enum pl330_chan_op pl330op;
+	struct s3c_pl330_chan *ch;
+	unsigned long flags;
+	int idx, ret;
+
+	spin_lock_irqsave(&res_lock, flags);
+
+	ch = id_to_chan(id);
+
+	if (!ch || chan_free(ch)) {
+		ret = -EINVAL;
+		goto ctrl_exit;
+	}
+
+	switch (op) {
+	case S3C2410_DMAOP_START:
+		/* Make sure both reqs are enqueued */
+		idx = (ch->lrq == &ch->req[0]) ? 1 : 0;
+		s3c_pl330_submit(ch, &ch->req[idx]);
+		s3c_pl330_submit(ch, &ch->req[1 - idx]);
+		pl330op = PL330_OP_START;
+		break;
+
+	case S3C2410_DMAOP_STOP:
+		pl330op = PL330_OP_ABORT;
+		break;
+
+	case S3C2410_DMAOP_FLUSH:
+		pl330op = PL330_OP_FLUSH;
+		break;
+
+	case S3C2410_DMAOP_PAUSE:
+	case S3C2410_DMAOP_RESUME:
+	case S3C2410_DMAOP_TIMEOUT:
+	case S3C2410_DMAOP_STARTED:
+		spin_unlock_irqrestore(&res_lock, flags);
+		return 0;
+
+	default:
+		spin_unlock_irqrestore(&res_lock, flags);
+		return -EINVAL;
+	}
+
+	ret = pl330_chan_ctrl(ch->pl330_chan_id, pl330op);
+
+	if (pl330op == PL330_OP_START) {
+		spin_unlock_irqrestore(&res_lock, flags);
+		return ret;
+	}
+
+	idx = (ch->lrq == &ch->req[0]) ? 1 : 0;
+
+	/* Abort the current xfer */
+	if (ch->req[idx].x) {
+		xfer = container_of(ch->req[idx].x,
+				struct s3c_pl330_xfer, px);
+
+		/* Drop xfer during FLUSH */
+		if (pl330op == PL330_OP_FLUSH)
+			del_from_queue(xfer);
+
+		ch->req[idx].x = NULL;
+
+		spin_unlock_irqrestore(&res_lock, flags);
+		_finish_off(xfer, S3C2410_RES_ABORT,
+				pl330op == PL330_OP_FLUSH ? 1 : 0);
+		spin_lock_irqsave(&res_lock, flags);
+	}
+
+	/* Flush the whole queue */
+	if (pl330op == PL330_OP_FLUSH) {
+
+		if (ch->req[1 - idx].x) {
+			xfer = container_of(ch->req[1 - idx].x,
+					struct s3c_pl330_xfer, px);
+
+			del_from_queue(xfer);
+
+			ch->req[1 - idx].x = NULL;
+
+			spin_unlock_irqrestore(&res_lock, flags);
+			_finish_off(xfer, S3C2410_RES_ABORT, 1);
+			spin_lock_irqsave(&res_lock, flags);
+		}
+
+		/* Finish off the remaining in the queue */
+		xfer = ch->xfer_head;
+		while (xfer) {
+
+			del_from_queue(xfer);
+
+			spin_unlock_irqrestore(&res_lock, flags);
+			_finish_off(xfer, S3C2410_RES_ABORT, 1);
+			spin_lock_irqsave(&res_lock, flags);
+
+			xfer = ch->xfer_head;
+		}
+	}
+
+ctrl_exit:
+	spin_unlock_irqrestore(&res_lock, flags);
+
+	return ret;
+}
+EXPORT_SYMBOL(s3c2410_dma_ctrl);
+
+int s3c2410_dma_enqueue(enum dma_ch id, void *token,
+			dma_addr_t addr, int size)
+{
+	struct s3c_pl330_chan *ch;
+	struct s3c_pl330_xfer *xfer;
+	unsigned long flags;
+	int idx, ret = 0;
+
+	spin_lock_irqsave(&res_lock, flags);
+
+	ch = id_to_chan(id);
+
+	/* Error if invalid or free channel */
+	if (!ch || chan_free(ch)) {
+		ret = -EINVAL;
+		goto enq_exit;
+	}
+
+	/* Error if size is unaligned */
+	if (ch->rqcfg.brst_size && size % (1 << ch->rqcfg.brst_size)) {
+		ret = -EINVAL;
+		goto enq_exit;
+	}
+
+	xfer = kmem_cache_alloc(ch->dmac->kmcache, GFP_ATOMIC);
+	if (!xfer) {
+		ret = -ENOMEM;
+		goto enq_exit;
+	}
+
+	xfer->token = token;
+	xfer->chan = ch;
+	xfer->px.bytes = size;
+	xfer->px.next = NULL; /* Single request */
+
+	/* For S3C DMA API, direction is always fixed for all xfers */
+	if (ch->req[0].rqtype == MEMTODEV) {
+		xfer->px.src_addr = addr;
+		xfer->px.dst_addr = ch->sdaddr;
+	} else {
+		xfer->px.src_addr = ch->sdaddr;
+		xfer->px.dst_addr = addr;
+	}
+
+	add_to_queue(ch, xfer, 0);
+
+	/* Try submitting on either request */
+	idx = (ch->lrq == &ch->req[0]) ? 1 : 0;
+
+	if (!ch->req[idx].x)
+		s3c_pl330_submit(ch, &ch->req[idx]);
+	else
+		s3c_pl330_submit(ch, &ch->req[1 - idx]);
+
+	spin_unlock_irqrestore(&res_lock, flags);
+
+	if (ch->options & S3C2410_DMAF_AUTOSTART)
+		s3c2410_dma_ctrl(id, S3C2410_DMAOP_START);
+
+	return 0;
+
+enq_exit:
+	spin_unlock_irqrestore(&res_lock, flags);
+
+	return ret;
+}
+EXPORT_SYMBOL(s3c2410_dma_enqueue);
+
+int s3c2410_dma_request(enum dma_ch id,
+			struct s3c2410_dma_client *client,
+			void *dev)
+{
+	struct s3c_pl330_dmac *dmac;
+	struct s3c_pl330_chan *ch;
+	unsigned long flags;
+	int ret = 0;
+
+	spin_lock_irqsave(&res_lock, flags);
+
+	ch = chan_acquire(id);
+	if (!ch) {
+		ret = -EBUSY;
+		goto req_exit;
+	}
+
+	dmac = ch->dmac;
+
+	ch->pl330_chan_id = pl330_request_channel(dmac->pi);
+	if (!ch->pl330_chan_id) {
+		chan_release(ch);
+		ret = -EBUSY;
+		goto req_exit;
+	}
+
+	ch->client = client;
+	ch->options = 0; /* Clear any option */
+	ch->callback_fn = NULL; /* Clear any callback */
+	ch->lrq = NULL;
+
+	ch->rqcfg.brst_size = 2; /* Default word size */
+	ch->rqcfg.swap = SWAP_NO;
+	ch->rqcfg.scctl = SCCTRL0; /* Noncacheable and nonbufferable */
+	ch->rqcfg.dcctl = DCCTRL0; /* Noncacheable and nonbufferable */
+	ch->rqcfg.privileged = 0;
+	ch->rqcfg.insnaccess = 0;
+
+	/* Set invalid direction */
+	ch->req[0].rqtype = DEVTODEV;
+	ch->req[1].rqtype = ch->req[0].rqtype;
+
+	ch->req[0].cfg = &ch->rqcfg;
+	ch->req[1].cfg = ch->req[0].cfg;
+
+	ch->req[0].peri = iface_of_dmac(dmac, id) - 1; /* Original index */
+	ch->req[1].peri = ch->req[0].peri;
+
+	ch->req[0].token = &ch->req[0];
+	ch->req[0].xfer_cb = s3c_pl330_rq0;
+	ch->req[1].token = &ch->req[1];
+	ch->req[1].xfer_cb = s3c_pl330_rq1;
+
+	ch->req[0].x = NULL;
+	ch->req[1].x = NULL;
+
+	/* Reset xfer list */
+	INIT_LIST_HEAD(&ch->xfer_list);
+	ch->xfer_head = NULL;
+
+req_exit:
+	spin_unlock_irqrestore(&res_lock, flags);
+
+	return ret;
+}
+EXPORT_SYMBOL(s3c2410_dma_request);
+
+int s3c2410_dma_free(enum dma_ch id, struct s3c2410_dma_client *client)
+{
+	struct s3c_pl330_chan *ch;
+	struct s3c_pl330_xfer *xfer;
+	unsigned long flags;
+	int ret = 0;
+	unsigned idx;
+
+	spin_lock_irqsave(&res_lock, flags);
+
+	ch = id_to_chan(id);
+
+	if (!ch || chan_free(ch))
+		goto free_exit;
+
+	/* Refuse if someone else wanted to free the channel */
+	if (ch->client != client) {
+		ret = -EBUSY;
+		goto free_exit;
+	}
+
+	/* Stop any active xfer, Flushe the queue and do callbacks */
+	pl330_chan_ctrl(ch->pl330_chan_id, PL330_OP_FLUSH);
+
+	/* Abort the submitted requests */
+	idx = (ch->lrq == &ch->req[0]) ? 1 : 0;
+
+	if (ch->req[idx].x) {
+		xfer = container_of(ch->req[idx].x,
+				struct s3c_pl330_xfer, px);
+
+		ch->req[idx].x = NULL;
+		del_from_queue(xfer);
+
+		spin_unlock_irqrestore(&res_lock, flags);
+		_finish_off(xfer, S3C2410_RES_ABORT, 1);
+		spin_lock_irqsave(&res_lock, flags);
+	}
+
+	if (ch->req[1 - idx].x) {
+		xfer = container_of(ch->req[1 - idx].x,
+				struct s3c_pl330_xfer, px);
+
+		ch->req[1 - idx].x = NULL;
+		del_from_queue(xfer);
+
+		spin_unlock_irqrestore(&res_lock, flags);
+		_finish_off(xfer, S3C2410_RES_ABORT, 1);
+		spin_lock_irqsave(&res_lock, flags);
+	}
+
+	/* Pluck and Abort the queued requests in order */
+	do {
+		xfer = get_from_queue(ch, 1);
+
+		spin_unlock_irqrestore(&res_lock, flags);
+		_finish_off(xfer, S3C2410_RES_ABORT, 1);
+		spin_lock_irqsave(&res_lock, flags);
+	} while (xfer);
+
+	ch->client = NULL;
+
+	pl330_release_channel(ch->pl330_chan_id);
+
+	ch->pl330_chan_id = NULL;
+
+	chan_release(ch);
+
+free_exit:
+	spin_unlock_irqrestore(&res_lock, flags);
+
+	return ret;
+}
+EXPORT_SYMBOL(s3c2410_dma_free);
+
+int s3c2410_dma_config(enum dma_ch id, int xferunit)
+{
+	struct s3c_pl330_chan *ch;
+	struct pl330_info *pi;
+	unsigned long flags;
+	int i, dbwidth, ret = 0;
+
+	spin_lock_irqsave(&res_lock, flags);
+
+	ch = id_to_chan(id);
+
+	if (!ch || chan_free(ch)) {
+		ret = -EINVAL;
+		goto cfg_exit;
+	}
+
+	pi = ch->dmac->pi;
+	dbwidth = pi->pcfg.data_bus_width / 8;
+
+	/* Max size of xfer can be pcfg.data_bus_width */
+	if (xferunit > dbwidth) {
+		ret = -EINVAL;
+		goto cfg_exit;
+	}
+
+	i = 0;
+	while (xferunit != (1 << i))
+		i++;
+
+	/* If valid value */
+	if (xferunit == (1 << i))
+		ch->rqcfg.brst_size = i;
+	else
+		ret = -EINVAL;
+
+cfg_exit:
+	spin_unlock_irqrestore(&res_lock, flags);
+
+	return ret;
+}
+EXPORT_SYMBOL(s3c2410_dma_config);
+
+/* Options that are supported by this driver */
+#define S3C_PL330_FLAGS (S3C2410_DMAF_CIRCULAR | S3C2410_DMAF_AUTOSTART)
+
+int s3c2410_dma_setflags(enum dma_ch id, unsigned int options)
+{
+	struct s3c_pl330_chan *ch;
+	unsigned long flags;
+	int ret = 0;
+
+	spin_lock_irqsave(&res_lock, flags);
+
+	ch = id_to_chan(id);
+
+	if (!ch || chan_free(ch) || options & ~(S3C_PL330_FLAGS))
+		ret = -EINVAL;
+	else
+		ch->options = options;
+
+	spin_unlock_irqrestore(&res_lock, flags);
+
+	return 0;
+}
+EXPORT_SYMBOL(s3c2410_dma_setflags);
+
+int s3c2410_dma_set_buffdone_fn(enum dma_ch id, s3c2410_dma_cbfn_t rtn)
+{
+	struct s3c_pl330_chan *ch;
+	unsigned long flags;
+	int ret = 0;
+
+	spin_lock_irqsave(&res_lock, flags);
+
+	ch = id_to_chan(id);
+
+	if (!ch || chan_free(ch))
+		ret = -EINVAL;
+	else
+		ch->callback_fn = rtn;
+
+	spin_unlock_irqrestore(&res_lock, flags);
+
+	return ret;
+}
+EXPORT_SYMBOL(s3c2410_dma_set_buffdone_fn);
+
+int s3c2410_dma_devconfig(enum dma_ch id, enum s3c2410_dmasrc source,
+			  unsigned long address)
+{
+	struct s3c_pl330_chan *ch;
+	unsigned long flags;
+	int ret = 0;
+
+	spin_lock_irqsave(&res_lock, flags);
+
+	ch = id_to_chan(id);
+
+	if (!ch || chan_free(ch)) {
+		ret = -EINVAL;
+		goto devcfg_exit;
+	}
+
+	switch (source) {
+	case S3C2410_DMASRC_HW: /* P->M */
+		ch->req[0].rqtype = DEVTOMEM;
+		ch->req[1].rqtype = DEVTOMEM;
+		ch->rqcfg.src_inc = 0;
+		ch->rqcfg.dst_inc = 1;
+		break;
+	case S3C2410_DMASRC_MEM: /* M->P */
+		ch->req[0].rqtype = MEMTODEV;
+		ch->req[1].rqtype = MEMTODEV;
+		ch->rqcfg.src_inc = 1;
+		ch->rqcfg.dst_inc = 0;
+		break;
+	default:
+		ret = -EINVAL;
+		goto devcfg_exit;
+	}
+
+	ch->sdaddr = address;
+
+devcfg_exit:
+	spin_unlock_irqrestore(&res_lock, flags);
+
+	return ret;
+}
+EXPORT_SYMBOL(s3c2410_dma_devconfig);
+
+int s3c2410_dma_getposition(enum dma_ch id, dma_addr_t *src, dma_addr_t *dst)
+{
+	struct s3c_pl330_chan *ch = id_to_chan(id);
+	struct pl330_chanstatus status;
+	int ret;
+
+	if (!ch || chan_free(ch))
+		return -EINVAL;
+
+	ret = pl330_chan_status(ch->pl330_chan_id, &status);
+	if (ret < 0)
+		return ret;
+
+	*src = status.src_addr;
+	*dst = status.dst_addr;
+
+	return 0;
+}
+EXPORT_SYMBOL(s3c2410_dma_getposition);
+
+static irqreturn_t pl330_irq_handler(int irq, void *data)
+{
+	if (pl330_update(data))
+		return IRQ_HANDLED;
+	else
+		return IRQ_NONE;
+}
+
+static int pl330_probe(struct platform_device *pdev)
+{
+	struct s3c_pl330_dmac *s3c_pl330_dmac;
+	struct s3c_pl330_platdata *pl330pd;
+	struct pl330_info *pl330_info;
+	struct resource *res;
+	int i, ret, irq;
+
+	pl330pd = pdev->dev.platform_data;
+
+	/* Can't do without the list of _32_ peripherals */
+	if (!pl330pd || !pl330pd->peri) {
+		dev_err(&pdev->dev, "platform data missing!\n");
+		return -ENODEV;
+	}
+
+	pl330_info = kzalloc(sizeof(*pl330_info), GFP_KERNEL);
+	if (!pl330_info)
+		return -ENOMEM;
+
+	pl330_info->pl330_data = NULL;
+	pl330_info->dev = &pdev->dev;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		ret = -ENODEV;
+		goto probe_err1;
+	}
+
+	request_mem_region(res->start, resource_size(res), pdev->name);
+
+	pl330_info->base = ioremap(res->start, resource_size(res));
+	if (!pl330_info->base) {
+		ret = -ENXIO;
+		goto probe_err2;
+	}
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		ret = irq;
+		goto probe_err3;
+	}
+
+	ret = request_irq(irq, pl330_irq_handler, 0,
+			dev_name(&pdev->dev), pl330_info);
+	if (ret)
+		goto probe_err4;
+
+	/* Allocate a new DMAC */
+	s3c_pl330_dmac = kmalloc(sizeof(*s3c_pl330_dmac), GFP_KERNEL);
+	if (!s3c_pl330_dmac) {
+		ret = -ENOMEM;
+		goto probe_err5;
+	}
+
+	/* Get operation clock and enable it */
+	s3c_pl330_dmac->clk = clk_get(&pdev->dev, "pdma");
+	if (IS_ERR(s3c_pl330_dmac->clk)) {
+		dev_err(&pdev->dev, "Cannot get operation clock.\n");
+		ret = -EINVAL;
+		goto probe_err6;
+	}
+	clk_enable(s3c_pl330_dmac->clk);
+
+	ret = pl330_add(pl330_info);
+	if (ret)
+		goto probe_err7;
+
+	/* Hook the info */
+	s3c_pl330_dmac->pi = pl330_info;
+
+	/* No busy channels */
+	s3c_pl330_dmac->busy_chan = 0;
+
+	s3c_pl330_dmac->kmcache = kmem_cache_create(dev_name(&pdev->dev),
+				sizeof(struct s3c_pl330_xfer), 0, 0, NULL);
+
+	if (!s3c_pl330_dmac->kmcache) {
+		ret = -ENOMEM;
+		goto probe_err8;
+	}
+
+	/* Get the list of peripherals */
+	s3c_pl330_dmac->peri = pl330pd->peri;
+
+	/* Attach to the list of DMACs */
+	list_add_tail(&s3c_pl330_dmac->node, &dmac_list);
+
+	/* Create a channel for each peripheral in the DMAC
+	 * that is, if it doesn't already exist
+	 */
+	for (i = 0; i < PL330_MAX_PERI; i++)
+		if (s3c_pl330_dmac->peri[i] != DMACH_MAX)
+			chan_add(s3c_pl330_dmac->peri[i]);
+
+	printk(KERN_INFO
+		"Loaded driver for PL330 DMAC-%d %s\n",	pdev->id, pdev->name);
+	printk(KERN_INFO
+		"\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n",
+		pl330_info->pcfg.data_buf_dep,
+		pl330_info->pcfg.data_bus_width / 8, pl330_info->pcfg.num_chan,
+		pl330_info->pcfg.num_peri, pl330_info->pcfg.num_events);
+
+	return 0;
+
+probe_err8:
+	pl330_del(pl330_info);
+probe_err7:
+	clk_disable(s3c_pl330_dmac->clk);
+	clk_put(s3c_pl330_dmac->clk);
+probe_err6:
+	kfree(s3c_pl330_dmac);
+probe_err5:
+	free_irq(irq, pl330_info);
+probe_err4:
+probe_err3:
+	iounmap(pl330_info->base);
+probe_err2:
+	release_mem_region(res->start, resource_size(res));
+probe_err1:
+	kfree(pl330_info);
+
+	return ret;
+}
+
+static int pl330_remove(struct platform_device *pdev)
+{
+	struct s3c_pl330_dmac *dmac, *d;
+	struct s3c_pl330_chan *ch;
+	unsigned long flags;
+	int del, found;
+
+	if (!pdev->dev.platform_data)
+		return -EINVAL;
+
+	spin_lock_irqsave(&res_lock, flags);
+
+	found = 0;
+	list_for_each_entry(d, &dmac_list, node)
+		if (d->pi->dev == &pdev->dev) {
+			found = 1;
+			break;
+		}
+
+	if (!found) {
+		spin_unlock_irqrestore(&res_lock, flags);
+		return 0;
+	}
+
+	dmac = d;
+
+	/* Remove all Channels that are managed only by this DMAC */
+	list_for_each_entry(ch, &chan_list, node) {
+
+		/* Only channels that are handled by this DMAC */
+		if (iface_of_dmac(dmac, ch->id))
+			del = 1;
+		else
+			continue;
+
+		/* Don't remove if some other DMAC has it too */
+		list_for_each_entry(d, &dmac_list, node)
+			if (d != dmac && iface_of_dmac(d, ch->id)) {
+				del = 0;
+				break;
+			}
+
+		if (del) {
+			spin_unlock_irqrestore(&res_lock, flags);
+			s3c2410_dma_free(ch->id, ch->client);
+			spin_lock_irqsave(&res_lock, flags);
+			list_del(&ch->node);
+			kfree(ch);
+		}
+	}
+
+	/* Disable operation clock */
+	clk_disable(dmac->clk);
+	clk_put(dmac->clk);
+
+	/* Remove the DMAC */
+	list_del(&dmac->node);
+	kfree(dmac);
+
+	spin_unlock_irqrestore(&res_lock, flags);
+
+	return 0;
+}
+
+static struct platform_driver pl330_driver = {
+	.driver		= {
+		.owner	= THIS_MODULE,
+		.name	= "s3c-pl330",
+	},
+	.probe		= pl330_probe,
+	.remove		= pl330_remove,
+};
+
+static int __init pl330_init(void)
+{
+	return platform_driver_register(&pl330_driver);
+}
+module_init(pl330_init);
+
+static void __exit pl330_exit(void)
+{
+	platform_driver_unregister(&pl330_driver);
+	return;
+}
+module_exit(pl330_exit);
+
+MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
+MODULE_DESCRIPTION("Driver for PL330 DMA Controller");
+MODULE_LICENSE("GPL");