diff mbox series

[2/2] drm/lima: driver for ARM Mali4xx GPUs

Message ID 20190206131457.1072-3-yuq825@gmail.com (mailing list archive)
State New, archived
Headers show
Series Lima DRM driver | expand

Commit Message

Qiang Yu Feb. 6, 2019, 1:14 p.m. UTC
From: Lima Project Developers <lima@lists.freedesktop.org>

Signed-off-by: Andreas Baierl <ichgeh@imkreisrum.de>
Signed-off-by: Erico Nunes <nunes.erico@gmail.com>
Signed-off-by: Heiko Stuebner <heiko@sntech.de>
Signed-off-by: Marek Vasut <marex@denx.de>
Signed-off-by: Neil Armstrong <narmstrong@baylibre.com>
Signed-off-by: Qiang Yu <yuq825@gmail.com>
Signed-off-by: Simon Shields <simon@lineageos.org>
Signed-off-by: Vasily Khoruzhick <anarsoul@gmail.com>
---
 drivers/gpu/drm/Kconfig               |   2 +
 drivers/gpu/drm/Makefile              |   1 +
 drivers/gpu/drm/lima/Kconfig          |  10 +
 drivers/gpu/drm/lima/Makefile         |  22 ++
 drivers/gpu/drm/lima/lima_bcast.c     |  46 +++
 drivers/gpu/drm/lima/lima_bcast.h     |  14 +
 drivers/gpu/drm/lima/lima_ctx.c       | 124 +++++++
 drivers/gpu/drm/lima/lima_ctx.h       |  33 ++
 drivers/gpu/drm/lima/lima_device.c    | 384 ++++++++++++++++++++
 drivers/gpu/drm/lima/lima_device.h    | 116 ++++++
 drivers/gpu/drm/lima/lima_dlbu.c      |  56 +++
 drivers/gpu/drm/lima/lima_dlbu.h      |  18 +
 drivers/gpu/drm/lima/lima_drv.c       | 459 ++++++++++++++++++++++++
 drivers/gpu/drm/lima/lima_drv.h       |  59 ++++
 drivers/gpu/drm/lima/lima_gem.c       | 485 +++++++++++++++++++++++++
 drivers/gpu/drm/lima/lima_gem.h       |  25 ++
 drivers/gpu/drm/lima/lima_gem_prime.c | 144 ++++++++
 drivers/gpu/drm/lima/lima_gem_prime.h |  18 +
 drivers/gpu/drm/lima/lima_gp.c        | 280 +++++++++++++++
 drivers/gpu/drm/lima/lima_gp.h        |  16 +
 drivers/gpu/drm/lima/lima_l2_cache.c  |  79 +++++
 drivers/gpu/drm/lima/lima_l2_cache.h  |  14 +
 drivers/gpu/drm/lima/lima_mmu.c       | 135 +++++++
 drivers/gpu/drm/lima/lima_mmu.h       |  16 +
 drivers/gpu/drm/lima/lima_object.c    | 103 ++++++
 drivers/gpu/drm/lima/lima_object.h    |  72 ++++
 drivers/gpu/drm/lima/lima_pmu.c       |  61 ++++
 drivers/gpu/drm/lima/lima_pmu.h       |  12 +
 drivers/gpu/drm/lima/lima_pp.c        | 419 ++++++++++++++++++++++
 drivers/gpu/drm/lima/lima_pp.h        |  19 +
 drivers/gpu/drm/lima/lima_regs.h      | 298 ++++++++++++++++
 drivers/gpu/drm/lima/lima_sched.c     | 486 ++++++++++++++++++++++++++
 drivers/gpu/drm/lima/lima_sched.h     | 108 ++++++
 drivers/gpu/drm/lima/lima_ttm.c       | 319 +++++++++++++++++
 drivers/gpu/drm/lima/lima_ttm.h       |  24 ++
 drivers/gpu/drm/lima/lima_vm.c        | 354 +++++++++++++++++++
 drivers/gpu/drm/lima/lima_vm.h        |  59 ++++
 include/uapi/drm/lima_drm.h           | 193 ++++++++++
 38 files changed, 5083 insertions(+)
 create mode 100644 drivers/gpu/drm/lima/Kconfig
 create mode 100644 drivers/gpu/drm/lima/Makefile
 create mode 100644 drivers/gpu/drm/lima/lima_bcast.c
 create mode 100644 drivers/gpu/drm/lima/lima_bcast.h
 create mode 100644 drivers/gpu/drm/lima/lima_ctx.c
 create mode 100644 drivers/gpu/drm/lima/lima_ctx.h
 create mode 100644 drivers/gpu/drm/lima/lima_device.c
 create mode 100644 drivers/gpu/drm/lima/lima_device.h
 create mode 100644 drivers/gpu/drm/lima/lima_dlbu.c
 create mode 100644 drivers/gpu/drm/lima/lima_dlbu.h
 create mode 100644 drivers/gpu/drm/lima/lima_drv.c
 create mode 100644 drivers/gpu/drm/lima/lima_drv.h
 create mode 100644 drivers/gpu/drm/lima/lima_gem.c
 create mode 100644 drivers/gpu/drm/lima/lima_gem.h
 create mode 100644 drivers/gpu/drm/lima/lima_gem_prime.c
 create mode 100644 drivers/gpu/drm/lima/lima_gem_prime.h
 create mode 100644 drivers/gpu/drm/lima/lima_gp.c
 create mode 100644 drivers/gpu/drm/lima/lima_gp.h
 create mode 100644 drivers/gpu/drm/lima/lima_l2_cache.c
 create mode 100644 drivers/gpu/drm/lima/lima_l2_cache.h
 create mode 100644 drivers/gpu/drm/lima/lima_mmu.c
 create mode 100644 drivers/gpu/drm/lima/lima_mmu.h
 create mode 100644 drivers/gpu/drm/lima/lima_object.c
 create mode 100644 drivers/gpu/drm/lima/lima_object.h
 create mode 100644 drivers/gpu/drm/lima/lima_pmu.c
 create mode 100644 drivers/gpu/drm/lima/lima_pmu.h
 create mode 100644 drivers/gpu/drm/lima/lima_pp.c
 create mode 100644 drivers/gpu/drm/lima/lima_pp.h
 create mode 100644 drivers/gpu/drm/lima/lima_regs.h
 create mode 100644 drivers/gpu/drm/lima/lima_sched.c
 create mode 100644 drivers/gpu/drm/lima/lima_sched.h
 create mode 100644 drivers/gpu/drm/lima/lima_ttm.c
 create mode 100644 drivers/gpu/drm/lima/lima_ttm.h
 create mode 100644 drivers/gpu/drm/lima/lima_vm.c
 create mode 100644 drivers/gpu/drm/lima/lima_vm.h
 create mode 100644 include/uapi/drm/lima_drm.h

Comments

Eric Anholt Feb. 6, 2019, 7:17 p.m. UTC | #1
Qiang Yu <yuq825@gmail.com> writes:

> From: Lima Project Developers <lima@lists.freedesktop.org>
>
> Signed-off-by: Andreas Baierl <ichgeh@imkreisrum.de>
> Signed-off-by: Erico Nunes <nunes.erico@gmail.com>
> Signed-off-by: Heiko Stuebner <heiko@sntech.de>
> Signed-off-by: Marek Vasut <marex@denx.de>
> Signed-off-by: Neil Armstrong <narmstrong@baylibre.com>
> Signed-off-by: Qiang Yu <yuq825@gmail.com>
> Signed-off-by: Simon Shields <simon@lineageos.org>
> Signed-off-by: Vasily Khoruzhick <anarsoul@gmail.com>
> ---

Some comments to follow.  Of them, the integer overflow and flags checks
definitely need fixing, I strongly recommend changing your timeout
handling, and would not block on any of my other suggestions.

> diff --git a/drivers/gpu/drm/lima/lima_ctx.c b/drivers/gpu/drm/lima/lima_ctx.c
> new file mode 100644
> index 000000000000..724ac4051f7a
> --- /dev/null
> +++ b/drivers/gpu/drm/lima/lima_ctx.c
> @@ -0,0 +1,124 @@
> +// SPDX-License-Identifier: GPL-2.0 OR MIT
> +/* Copyright 2018 Qiang Yu <yuq825@gmail.com> */
> +
> +#include <linux/slab.h>
> +
> +#include "lima_device.h"
> +#include "lima_ctx.h"
> +
> +int lima_ctx_create(struct lima_device *dev, struct lima_ctx_mgr *mgr, u32 *id)
> +{
> +	struct lima_ctx *ctx;
> +	int i, err;
> +
> +	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
> +	if (!ctx)
> +		return -ENOMEM;
> +	ctx->dev = dev;
> +	kref_init(&ctx->refcnt);
> +
> +	for (i = 0; i < lima_pipe_num; i++) {
> +		err = lima_sched_context_init(dev->pipe + i, ctx->context + i, &ctx->guilty);
> +		if (err)
> +			goto err_out0;
> +	}
> +
> +	idr_preload(GFP_KERNEL);
> +	spin_lock(&mgr->lock);
> +	err = idr_alloc(&mgr->handles, ctx, 1, 0, GFP_ATOMIC);
> +	spin_unlock(&mgr->lock);
> +	idr_preload_end();
> +	if (err < 0)
> +		goto err_out0;

You might enjoy using the new xa_alloc() api instead of idrs.

> +static int lima_ioctl_gem_submit(struct drm_device *dev, void *data, struct drm_file *file)
> +{
> +	struct drm_lima_gem_submit_in *args = data;
> +	struct lima_device *ldev = to_lima_dev(dev);
> +	struct lima_drm_priv *priv = file->driver_priv;
> +	struct drm_lima_gem_submit_bo *bos;
> +	struct ttm_validate_buffer *vbs;
> +	union drm_lima_gem_submit_dep *deps = NULL;
> +	struct lima_sched_pipe *pipe;
> +	struct lima_sched_task *task;
> +	struct lima_ctx *ctx;
> +	struct lima_submit submit = {0};
> +	int err = 0, size;
> +
> +	if (args->pipe >= lima_pipe_num || args->nr_bos == 0)
> +		return -EINVAL;
> +
> +	if (args->flags & ~(LIMA_SUBMIT_FLAG_EXPLICIT_FENCE |
> +			    LIMA_SUBMIT_FLAG_SYNC_FD_OUT))
> +		return -EINVAL;
> +
> +	pipe = ldev->pipe + args->pipe;
> +	if (args->frame_size != pipe->frame_size)
> +		return -EINVAL;
> +
> +	size = args->nr_bos * (sizeof(*submit.bos) + sizeof(*submit.vbs)) +
> +		args->nr_deps * sizeof(*submit.deps);

Needs checking for integer overflow with user-submitted args here.

(Having done overflow math for the equivalent in vc4, I'd say: don't
bother, do more kmallocs.)

> +	bos = kzalloc(size, GFP_KERNEL);
> +	if (!bos)
> +		return -ENOMEM;
> +
> +	size = args->nr_bos * sizeof(*submit.bos);
> +	if (copy_from_user(bos, u64_to_user_ptr(args->bos), size)) {
> +		err = -EFAULT;
> +		goto out0;
> +	}
> +
> +	vbs = (void *)bos + size;
> +
> +	if (args->nr_deps) {
> +		deps = (void *)vbs + args->nr_bos * sizeof(*submit.vbs);
> +		size = args->nr_deps * sizeof(*submit.deps);
> +		if (copy_from_user(deps, u64_to_user_ptr(args->deps), size)) {
> +			err = -EFAULT;
> +			goto out0;
> +		}
> +	}


> +static int lima_ioctl_gem_wait(struct drm_device *dev, void *data, struct drm_file *file)
> +{
> +	struct drm_lima_gem_wait *args = data;
> +
> +	if (!(args->op & (LIMA_GEM_WAIT_READ|LIMA_GEM_WAIT_WRITE)))
> +	    return -EINVAL;

I think you want if (args->op &
~(LIMA_GEM_WAIT_READ|LIMA_GEM_WAIT_WRITE)) so that you can be sure
userspace is doing the right thing today so that you can extend with
other flags in the future.

> +static int lima_ioctl_ctx(struct drm_device *dev, void *data, struct drm_file *file)
> +{
> +	struct drm_lima_ctx *args = data;
> +	struct lima_drm_priv *priv = file->driver_priv;
> +	struct lima_device *ldev = to_lima_dev(dev);
> +
> +	if (args->op == LIMA_CTX_OP_CREATE)
> +		return lima_ctx_create(ldev, &priv->ctx_mgr, &args->id);
> +	else if (args->op == LIMA_CTX_OP_FREE)
> +		return lima_ctx_free(&priv->ctx_mgr, args->id);
> +
> +	return -EINVAL;
> +}

Overall UAPI suggestion: Having these muxing ioctls means that your
debug logs are harder to parse.  ioctls already are already a mux based
on the command number, so just have separate ctx_create/ctx_free,
gem_get/gem_set ioctls, etc.

> +static int lima_drm_driver_open(struct drm_device *dev, struct drm_file *file)
> +{
> +	int err;
> +	struct lima_drm_priv *priv;
> +	struct lima_device *ldev = to_lima_dev(dev);
> +
> +	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
> +	if (!priv)
> +		return -ENOMEM;
> +
> +	priv->vm = lima_vm_create(ldev);
> +	if (!priv->vm) {
> +		err = -ENOMEM;
> +		goto err_out0;
> +	}
> +
> +        lima_ctx_mgr_init(&priv->ctx_mgr);

whitespace issue


> +int lima_gem_wait(struct drm_file *file, u32 handle, u32 op, u64 timeout_ns)
> +{
> +	bool write = op & LIMA_GEM_WAIT_WRITE;
> +	struct drm_gem_object *obj;
> +	struct lima_bo *bo;
> +	signed long ret;
> +	unsigned long timeout;
> +
> +	obj = drm_gem_object_lookup(file, handle);
> +	if (!obj)
> +		return -ENOENT;
> +
> +	bo = to_lima_bo(obj);
> +
> +	timeout = timeout_ns ? lima_timeout_to_jiffies(timeout_ns) : 0;
> +
> +	ret = lima_bo_reserve(bo, true);
> +	if (ret)
> +		goto out;
> +
> +	/* must use long for result check because in 64bit arch int
> +	 * will overflow if timeout is too large and get <0 result
> +	 */
> +	ret = reservation_object_wait_timeout_rcu(bo->tbo.resv, write, true, timeout);
> +	if (ret == 0)
> +		ret = timeout ? -ETIMEDOUT : -EBUSY;
> +	else if (ret > 0)
> +		ret = 0;
> +
> +	lima_bo_unreserve(bo);
> +out:
> +	drm_gem_object_put_unlocked(obj);
> +	return ret;
> +}

From Documentation/botching-up-ioctls.txt:

 * For timeouts, use absolute times. If you're a good fellow and made your
   ioctl restartable relative timeouts tend to be too coarse and can
   indefinitely extend your wait time due to rounding on each restart.
   Especially if your reference clock is something really slow like the display
   frame counter. With a spec lawyer hat on this isn't a bug since timeouts can
   always be extended - but users will surely hate you if their neat animations
   starts to stutter due to this.

(I made v3d's timeouts relative, but decrement the timeout value the
user passed by how much I waited so that the timeout probably gets
reduced after a restartable signal.  I should have done absolute.)

> diff --git a/include/uapi/drm/lima_drm.h b/include/uapi/drm/lima_drm.h
> new file mode 100644
> index 000000000000..c44757b4be39
> --- /dev/null
> +++ b/include/uapi/drm/lima_drm.h

> +
> +#define LIMA_SUBMIT_DEP_FENCE   0x00
> +#define LIMA_SUBMIT_DEP_SYNC_FD 0x01
> +
> +struct drm_lima_gem_submit_dep_fence {
> +	__u32 type;
> +	__u32 ctx;
> +	__u32 pipe;
> +	__u32 seq;
> +};
> +
> +struct drm_lima_gem_submit_dep_sync_fd {
> +	__u32 type;
> +	__u32 fd;
> +};
> +
> +union drm_lima_gem_submit_dep {
> +	__u32 type;
> +	struct drm_lima_gem_submit_dep_fence fence;
> +	struct drm_lima_gem_submit_dep_sync_fd sync_fd;
> +};

I've been using gem sync objects for exposing my fences in v3d.  You can
import/export fences from sync files into syncobjs, and then you don't
need a custom driver fence type in the uapi or your own ioctls for it if
the submit just takes syncobjs in and out.

> +#define LIMA_GEM_MOD_OP_GET 0
> +#define LIMA_GEM_MOD_OP_SET 1
> +
> +struct drm_lima_gem_mod {
> +	__u32 handle;      /* in */
> +	__u32 op;          /* in */
> +	__u64 modifier;    /* in/out */
> +};

I thought the whole idea with the DRI3 modifiers stuff was that the
kernel didn't need to store modifier metadata on buffers?  (And this
gets in the way of Vulkan modifiers support, from what I understand).
Do you actually need this ABI?
Qiang Yu Feb. 7, 2019, 8:27 a.m. UTC | #2
On Thu, Feb 7, 2019 at 3:17 AM Eric Anholt <eric@anholt.net> wrote:
>
> Qiang Yu <yuq825@gmail.com> writes:
>
> > From: Lima Project Developers <lima@lists.freedesktop.org>
> >
> > Signed-off-by: Andreas Baierl <ichgeh@imkreisrum.de>
> > Signed-off-by: Erico Nunes <nunes.erico@gmail.com>
> > Signed-off-by: Heiko Stuebner <heiko@sntech.de>
> > Signed-off-by: Marek Vasut <marex@denx.de>
> > Signed-off-by: Neil Armstrong <narmstrong@baylibre.com>
> > Signed-off-by: Qiang Yu <yuq825@gmail.com>
> > Signed-off-by: Simon Shields <simon@lineageos.org>
> > Signed-off-by: Vasily Khoruzhick <anarsoul@gmail.com>
> > ---
>
> Some comments to follow.  Of them, the integer overflow and flags checks
> definitely need fixing, I strongly recommend changing your timeout
> handling, and would not block on any of my other suggestions.
Thanks for your kind and valuable suggestion, I'll fix the args check and
left some of suggestions as future improvement.

> > +int lima_gem_wait(struct drm_file *file, u32 handle, u32 op, u64 timeout_ns)
> > +{
> > +     bool write = op & LIMA_GEM_WAIT_WRITE;
> > +     struct drm_gem_object *obj;
> > +     struct lima_bo *bo;
> > +     signed long ret;
> > +     unsigned long timeout;
> > +
> > +     obj = drm_gem_object_lookup(file, handle);
> > +     if (!obj)
> > +             return -ENOENT;
> > +
> > +     bo = to_lima_bo(obj);
> > +
> > +     timeout = timeout_ns ? lima_timeout_to_jiffies(timeout_ns) : 0;
> > +
> > +     ret = lima_bo_reserve(bo, true);
> > +     if (ret)
> > +             goto out;
> > +
> > +     /* must use long for result check because in 64bit arch int
> > +      * will overflow if timeout is too large and get <0 result
> > +      */
> > +     ret = reservation_object_wait_timeout_rcu(bo->tbo.resv, write, true, timeout);
> > +     if (ret == 0)
> > +             ret = timeout ? -ETIMEDOUT : -EBUSY;
> > +     else if (ret > 0)
> > +             ret = 0;
> > +
> > +     lima_bo_unreserve(bo);
> > +out:
> > +     drm_gem_object_put_unlocked(obj);
> > +     return ret;
> > +}
>
> From Documentation/botching-up-ioctls.txt:
>
>  * For timeouts, use absolute times. If you're a good fellow and made your
>    ioctl restartable relative timeouts tend to be too coarse and can
>    indefinitely extend your wait time due to rounding on each restart.
>    Especially if your reference clock is something really slow like the display
>    frame counter. With a spec lawyer hat on this isn't a bug since timeouts can
>    always be extended - but users will surely hate you if their neat animations
>    starts to stutter due to this.
>
> (I made v3d's timeouts relative, but decrement the timeout value the
> user passed by how much I waited so that the timeout probably gets
> reduced after a restartable signal.  I should have done absolute.)
timeout_ns in lima is already an absolute one which will be converted to
relative one in lima_timeout_to_jiffies, is this what you want or I miss
understand?

>
> > diff --git a/include/uapi/drm/lima_drm.h b/include/uapi/drm/lima_drm.h
> > new file mode 100644
> > index 000000000000..c44757b4be39
> > --- /dev/null
> > +++ b/include/uapi/drm/lima_drm.h
>
> > +
> > +#define LIMA_SUBMIT_DEP_FENCE   0x00
> > +#define LIMA_SUBMIT_DEP_SYNC_FD 0x01
> > +
> > +struct drm_lima_gem_submit_dep_fence {
> > +     __u32 type;
> > +     __u32 ctx;
> > +     __u32 pipe;
> > +     __u32 seq;
> > +};
> > +
> > +struct drm_lima_gem_submit_dep_sync_fd {
> > +     __u32 type;
> > +     __u32 fd;
> > +};
> > +
> > +union drm_lima_gem_submit_dep {
> > +     __u32 type;
> > +     struct drm_lima_gem_submit_dep_fence fence;
> > +     struct drm_lima_gem_submit_dep_sync_fd sync_fd;
> > +};
>
> I've been using gem sync objects for exposing my fences in v3d.  You can
> import/export fences from sync files into syncobjs, and then you don't
> need a custom driver fence type in the uapi or your own ioctls for it if
> the submit just takes syncobjs in and out.
Sounds good, I'll consider about this way.

>
> > +#define LIMA_GEM_MOD_OP_GET 0
> > +#define LIMA_GEM_MOD_OP_SET 1
> > +
> > +struct drm_lima_gem_mod {
> > +     __u32 handle;      /* in */
> > +     __u32 op;          /* in */
> > +     __u64 modifier;    /* in/out */
> > +};
>
> I thought the whole idea with the DRI3 modifiers stuff was that the
> kernel didn't need to store modifier metadata on buffers?  (And this
> gets in the way of Vulkan modifiers support, from what I understand).
> Do you actually need this ABI?
Just for old apps when there's no user space modifier sharing method
like the DRI3 modifiers, like old xserver.

Regards,
Qiang
Eric Anholt Feb. 7, 2019, 7:38 p.m. UTC | #3
Qiang Yu <yuq825@gmail.com> writes:

> On Thu, Feb 7, 2019 at 3:17 AM Eric Anholt <eric@anholt.net> wrote:
>>
>> Qiang Yu <yuq825@gmail.com> writes:
>> > +int lima_gem_wait(struct drm_file *file, u32 handle, u32 op, u64 timeout_ns)
>> > +{
>> > +     bool write = op & LIMA_GEM_WAIT_WRITE;
>> > +     struct drm_gem_object *obj;
>> > +     struct lima_bo *bo;
>> > +     signed long ret;
>> > +     unsigned long timeout;
>> > +
>> > +     obj = drm_gem_object_lookup(file, handle);
>> > +     if (!obj)
>> > +             return -ENOENT;
>> > +
>> > +     bo = to_lima_bo(obj);
>> > +
>> > +     timeout = timeout_ns ? lima_timeout_to_jiffies(timeout_ns) : 0;
>> > +
>> > +     ret = lima_bo_reserve(bo, true);
>> > +     if (ret)
>> > +             goto out;
>> > +
>> > +     /* must use long for result check because in 64bit arch int
>> > +      * will overflow if timeout is too large and get <0 result
>> > +      */
>> > +     ret = reservation_object_wait_timeout_rcu(bo->tbo.resv, write, true, timeout);
>> > +     if (ret == 0)
>> > +             ret = timeout ? -ETIMEDOUT : -EBUSY;
>> > +     else if (ret > 0)
>> > +             ret = 0;
>> > +
>> > +     lima_bo_unreserve(bo);
>> > +out:
>> > +     drm_gem_object_put_unlocked(obj);
>> > +     return ret;
>> > +}
>>
>> From Documentation/botching-up-ioctls.txt:
>>
>>  * For timeouts, use absolute times. If you're a good fellow and made your
>>    ioctl restartable relative timeouts tend to be too coarse and can
>>    indefinitely extend your wait time due to rounding on each restart.
>>    Especially if your reference clock is something really slow like the display
>>    frame counter. With a spec lawyer hat on this isn't a bug since timeouts can
>>    always be extended - but users will surely hate you if their neat animations
>>    starts to stutter due to this.
>>
>> (I made v3d's timeouts relative, but decrement the timeout value the
>> user passed by how much I waited so that the timeout probably gets
>> reduced after a restartable signal.  I should have done absolute.)
> timeout_ns in lima is already an absolute one which will be converted to
> relative one in lima_timeout_to_jiffies, is this what you want or I miss
> understand?

Yeah, not sure how I missed the lima function, wich is clearly doing
absolute.  Sorry!
kernel test robot via dri-devel Feb. 12, 2019, 3:46 p.m. UTC | #4
On Wed, Feb 6, 2019 at 7:16 AM Qiang Yu <yuq825@gmail.com> wrote:
>
> From: Lima Project Developers <lima@lists.freedesktop.org>

This should be a person (you).

> Signed-off-by: Andreas Baierl <ichgeh@imkreisrum.de>
> Signed-off-by: Erico Nunes <nunes.erico@gmail.com>
> Signed-off-by: Heiko Stuebner <heiko@sntech.de>
> Signed-off-by: Marek Vasut <marex@denx.de>
> Signed-off-by: Neil Armstrong <narmstrong@baylibre.com>
> Signed-off-by: Qiang Yu <yuq825@gmail.com>

Being the submitter, your S-o-b should be last.

> Signed-off-by: Simon Shields <simon@lineageos.org>
> Signed-off-by: Vasily Khoruzhick <anarsoul@gmail.com>
> ---

> diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
> index 4385f00e1d05..dfefcb393858 100644
> --- a/drivers/gpu/drm/Kconfig
> +++ b/drivers/gpu/drm/Kconfig
> @@ -333,6 +333,8 @@ source "drivers/gpu/drm/tve200/Kconfig"
>
>  source "drivers/gpu/drm/xen/Kconfig"
>
> +source "drivers/gpu/drm/lima/Kconfig"
> +
>  # Keep legacy drivers last
>
>  menuconfig DRM_LEGACY
> diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
> index ce8d1d384319..8d024b729902 100644
> --- a/drivers/gpu/drm/Makefile
> +++ b/drivers/gpu/drm/Makefile
> @@ -109,3 +109,4 @@ obj-$(CONFIG_DRM_TINYDRM) += tinydrm/
>  obj-$(CONFIG_DRM_PL111) += pl111/
>  obj-$(CONFIG_DRM_TVE200) += tve200/
>  obj-$(CONFIG_DRM_XEN) += xen/
> +obj-$(CONFIG_DRM_LIMA)  += lima/

Not sure about this file, but normally these should be kept sorted.

> diff --git a/drivers/gpu/drm/lima/lima_bcast.c b/drivers/gpu/drm/lima/lima_bcast.c
> new file mode 100644
> index 000000000000..63754f6465ea
> --- /dev/null
> +++ b/drivers/gpu/drm/lima/lima_bcast.c
> @@ -0,0 +1,46 @@
> +// SPDX-License-Identifier: GPL-2.0 OR MIT
> +/* Copyright 2018 Qiang Yu <yuq825@gmail.com> */
> +
> +#include <linux/io.h>
> +#include <linux/device.h>
> +
> +#include "lima_device.h"
> +#include "lima_bcast.h"
> +#include "lima_regs.h"
> +
> +#define bcast_write(reg, data) writel(data, ip->iomem + LIMA_BCAST_##reg)
> +#define bcast_read(reg) readl(ip->iomem + LIMA_BCAST_##reg)

There are 2 things about this I would change. Just pass in 'ip' to the
macro so it is clear in calling functions that ip is actually used.
Second, don't do token pasting. It is generally avoided in the kernel.
It makes grepping the source code harder and is a pointless
indirection.

If you do both of those, then these can be static inline functions
instead which are preferred because you get type checking.

Same comment applies to all the other register accessors.


> +struct lima_ip {
> +       struct lima_device *dev;
> +       enum lima_ip_id id;
> +       bool present;
> +
> +       void __iomem *iomem;
> +       int irq;
> +
> +       union {
> +               /* gp/pp */
> +               bool async_reset;
> +               /* l2 cache */
> +               spinlock_t lock;

What happens when you need 2 elements for a sub-block. I'd make this a
struct pointer for each IP sub-block.

> +       } data;
> +};
> +
> +enum lima_pipe_id {
> +       lima_pipe_gp,
> +       lima_pipe_pp,
> +       lima_pipe_num,
> +};
> +
> +struct lima_device {
> +       struct device *dev;
> +       struct drm_device *ddev;
> +       struct platform_device *pdev;
> +
> +       enum lima_gpu_id id;
> +       int num_pp;
> +
> +       void __iomem *iomem;
> +       struct clk *clk_bus;
> +       struct clk *clk_gpu;
> +       struct reset_control *reset;
> +       struct regulator *regulator;
> +
> +       struct lima_ip ip[lima_ip_num];
> +       struct lima_sched_pipe pipe[lima_pipe_num];
> +
> +       struct lima_mman mman;
> +
> +       struct lima_vm *empty_vm;
> +       uint64_t va_start;
> +       uint64_t va_end;
> +
> +       u32 *dlbu_cpu;
> +       dma_addr_t dlbu_dma;
> +};
> +
> +static inline struct lima_device *
> +to_lima_dev(struct drm_device *dev)
> +{
> +       return dev->dev_private;
> +}
> +
> +static inline struct lima_device *
> +ttm_to_lima_dev(struct ttm_bo_device *dev)
> +{
> +       return container_of(dev, struct lima_device, mman.bdev);
> +}
> +
> +int lima_device_init(struct lima_device *ldev);
> +void lima_device_fini(struct lima_device *ldev);
> +
> +const char *lima_ip_name(struct lima_ip *ip);
> +
> +#endif
> diff --git a/drivers/gpu/drm/lima/lima_dlbu.c b/drivers/gpu/drm/lima/lima_dlbu.c
> new file mode 100644
> index 000000000000..6697d4ddd887
> --- /dev/null
> +++ b/drivers/gpu/drm/lima/lima_dlbu.c
> @@ -0,0 +1,56 @@
> +// SPDX-License-Identifier: GPL-2.0 OR MIT
> +/* Copyright 2018 Qiang Yu <yuq825@gmail.com> */
> +
> +#include <linux/io.h>
> +#include <linux/device.h>
> +
> +#include "lima_device.h"
> +#include "lima_dlbu.h"
> +#include "lima_vm.h"
> +#include "lima_regs.h"
> +
> +#define dlbu_write(reg, data) writel(data, ip->iomem + LIMA_DLBU_##reg)
> +#define dlbu_read(reg) readl(ip->iomem + LIMA_DLBU_##reg)
> +
> +void lima_dlbu_enable(struct lima_device *dev, int num_pp)
> +{
> +       struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp;
> +       struct lima_ip *ip = dev->ip + lima_ip_dlbu;
> +       int i, mask = 0;
> +
> +       for (i = 0; i < num_pp; i++) {
> +               struct lima_ip *pp = pipe->processor[i];
> +               mask |= 1 << (pp->id - lima_ip_pp0);
> +       }
> +
> +       dlbu_write(PP_ENABLE_MASK, mask);
> +}
> +
> +void lima_dlbu_disable(struct lima_device *dev)
> +{
> +       struct lima_ip *ip = dev->ip + lima_ip_dlbu;
> +       dlbu_write(PP_ENABLE_MASK, 0);
> +}
> +
> +void lima_dlbu_set_reg(struct lima_ip *ip, u32 *reg)
> +{
> +       dlbu_write(TLLIST_VBASEADDR, reg[0]);
> +       dlbu_write(FB_DIM, reg[1]);
> +       dlbu_write(TLLIST_CONF, reg[2]);
> +       dlbu_write(START_TILE_POS, reg[3]);
> +}
> +
> +int lima_dlbu_init(struct lima_ip *ip)
> +{
> +       struct lima_device *dev = ip->dev;
> +
> +       dlbu_write(MASTER_TLLIST_PHYS_ADDR, dev->dlbu_dma | 1);
> +       dlbu_write(MASTER_TLLIST_VADDR, LIMA_VA_RESERVE_DLBU);
> +
> +       return 0;
> +}
> +
> +void lima_dlbu_fini(struct lima_ip *ip)
> +{
> +
> +}
> diff --git a/drivers/gpu/drm/lima/lima_dlbu.h b/drivers/gpu/drm/lima/lima_dlbu.h
> new file mode 100644
> index 000000000000..60cba387cf30
> --- /dev/null
> +++ b/drivers/gpu/drm/lima/lima_dlbu.h
> @@ -0,0 +1,18 @@
> +/* SPDX-License-Identifier: GPL-2.0 OR MIT */
> +/* Copyright 2018 Qiang Yu <yuq825@gmail.com> */
> +
> +#ifndef __LIMA_DLBU_H__
> +#define __LIMA_DLBU_H__
> +
> +struct lima_ip;
> +struct lima_device;
> +
> +void lima_dlbu_enable(struct lima_device *dev, int num_pp);
> +void lima_dlbu_disable(struct lima_device *dev);
> +
> +void lima_dlbu_set_reg(struct lima_ip *ip, u32 *reg);
> +
> +int lima_dlbu_init(struct lima_ip *ip);
> +void lima_dlbu_fini(struct lima_ip *ip);
> +
> +#endif
> diff --git a/drivers/gpu/drm/lima/lima_drv.c b/drivers/gpu/drm/lima/lima_drv.c
> new file mode 100644
> index 000000000000..132071b9be9b
> --- /dev/null
> +++ b/drivers/gpu/drm/lima/lima_drv.c
> @@ -0,0 +1,459 @@
> +// SPDX-License-Identifier: GPL-2.0 OR MIT
> +/* Copyright 2017-2018 Qiang Yu <yuq825@gmail.com> */
> +
> +#include <linux/module.h>
> +#include <linux/of_platform.h>
> +#include <linux/log2.h>
> +#include <drm/drm_prime.h>
> +#include <drm/lima_drm.h>
> +
> +#include "lima_drv.h"
> +#include "lima_gem.h"
> +#include "lima_gem_prime.h"
> +#include "lima_vm.h"
> +
> +int lima_sched_timeout_ms = 0;
> +int lima_sched_max_tasks = 32;
> +int lima_max_mem = -1;
> +
> +MODULE_PARM_DESC(sched_timeout_ms, "task run timeout in ms (0 = no timeout (default))");
> +module_param_named(sched_timeout_ms, lima_sched_timeout_ms, int, 0444);
> +
> +MODULE_PARM_DESC(sched_max_tasks, "max queued task num in a context (default 32)");
> +module_param_named(sched_max_tasks, lima_sched_max_tasks, int, 0444);
> +
> +MODULE_PARM_DESC(max_mem, "Max memory size in MB can be used (<0 = auto)");
> +module_param_named(max_mem, lima_max_mem, int, 0444);
> +
> +static int lima_ioctl_info(struct drm_device *dev, void *data, struct drm_file *file)
> +{
> +       struct drm_lima_info *info = data;
> +       struct lima_device *ldev = to_lima_dev(dev);
> +
> +       switch (ldev->id) {
> +       case lima_gpu_mali400:
> +               info->gpu_id = LIMA_INFO_GPU_MALI400;
> +               break;
> +       case lima_gpu_mali450:
> +               info->gpu_id = LIMA_INFO_GPU_MALI450;
> +               break;
> +       default:
> +               return -ENODEV;
> +       }
> +       info->num_pp = ldev->pipe[lima_pipe_pp].num_processor;
> +       info->va_start = ldev->va_start;
> +       info->va_end = ldev->va_end;
> +       return 0;
> +}
> +
> +static int lima_ioctl_gem_create(struct drm_device *dev, void *data, struct drm_file *file)
> +{
> +       struct drm_lima_gem_create *args = data;
> +
> +       if (args->flags)
> +               return -EINVAL;
> +
> +       if (args->size == 0)
> +               return -EINVAL;
> +
> +       return lima_gem_create_handle(dev, file, args->size, args->flags, &args->handle);
> +}
> +
> +static int lima_ioctl_gem_info(struct drm_device *dev, void *data, struct drm_file *file)
> +{
> +       struct drm_lima_gem_info *args = data;
> +
> +       return lima_gem_mmap_offset(file, args->handle, &args->offset);
> +}
> +
> +static int lima_ioctl_gem_va(struct drm_device *dev, void *data, struct drm_file *file)
> +{
> +       struct drm_lima_gem_va *args = data;
> +
> +       switch (args->op) {
> +       case LIMA_VA_OP_MAP:
> +               return lima_gem_va_map(file, args->handle, args->flags, args->va);
> +       case LIMA_VA_OP_UNMAP:
> +               return lima_gem_va_unmap(file, args->handle, args->va);

These are mapping to GPU VA. Why not do that on GEM object creation or
import or when the objects are submitted with cmd queue as other
drivers do?

To put it another way, These ioctls look different than what other
drivers do. Why do you need to do things differently? My understanding
is best practice is to map and return the GPU offset when the GEM
object is created. This is what v3d does. I think Intel is moving to
that. And panfrost will do that.

> +       default:
> +               return -EINVAL;
> +       }
> +}
> +
> +static int lima_ioctl_gem_submit(struct drm_device *dev, void *data, struct drm_file *file)
> +{
> +       struct drm_lima_gem_submit_in *args = data;
> +       struct lima_device *ldev = to_lima_dev(dev);
> +       struct lima_drm_priv *priv = file->driver_priv;
> +       struct drm_lima_gem_submit_bo *bos;
> +       struct ttm_validate_buffer *vbs;
> +       union drm_lima_gem_submit_dep *deps = NULL;
> +       struct lima_sched_pipe *pipe;
> +       struct lima_sched_task *task;
> +       struct lima_ctx *ctx;
> +       struct lima_submit submit = {0};
> +       int err = 0, size;
> +
> +       if (args->pipe >= lima_pipe_num || args->nr_bos == 0)
> +               return -EINVAL;
> +
> +       if (args->flags & ~(LIMA_SUBMIT_FLAG_EXPLICIT_FENCE |
> +                           LIMA_SUBMIT_FLAG_SYNC_FD_OUT))
> +               return -EINVAL;
> +
> +       pipe = ldev->pipe + args->pipe;
> +       if (args->frame_size != pipe->frame_size)
> +               return -EINVAL;
> +
> +       size = args->nr_bos * (sizeof(*submit.bos) + sizeof(*submit.vbs)) +
> +               args->nr_deps * sizeof(*submit.deps);
> +       bos = kzalloc(size, GFP_KERNEL);
> +       if (!bos)
> +               return -ENOMEM;
> +
> +       size = args->nr_bos * sizeof(*submit.bos);
> +       if (copy_from_user(bos, u64_to_user_ptr(args->bos), size)) {
> +               err = -EFAULT;
> +               goto out0;
> +       }
> +
> +       vbs = (void *)bos + size;
> +
> +       if (args->nr_deps) {
> +               deps = (void *)vbs + args->nr_bos * sizeof(*submit.vbs);
> +               size = args->nr_deps * sizeof(*submit.deps);
> +               if (copy_from_user(deps, u64_to_user_ptr(args->deps), size)) {
> +                       err = -EFAULT;
> +                       goto out0;
> +               }
> +       }
> +
> +       task = kmem_cache_zalloc(pipe->task_slab, GFP_KERNEL);
> +       if (!task) {
> +               err = -ENOMEM;
> +               goto out0;
> +       }
> +
> +       task->frame = task + 1;
> +       if (copy_from_user(task->frame, u64_to_user_ptr(args->frame), args->frame_size)) {
> +               err = -EFAULT;
> +               goto out1;
> +       }
> +
> +       err = pipe->task_validate(pipe, task);
> +       if (err)
> +               goto out1;
> +
> +       ctx = lima_ctx_get(&priv->ctx_mgr, args->ctx);
> +       if (!ctx) {
> +               err = -ENOENT;
> +               goto out1;
> +       }
> +
> +       submit.pipe = args->pipe;
> +       submit.bos = bos;
> +       submit.vbs = vbs;
> +       submit.nr_bos = args->nr_bos;
> +       submit.task = task;
> +       submit.ctx = ctx;
> +       submit.deps = deps;
> +       submit.nr_deps = args->nr_deps;
> +       submit.flags = args->flags;
> +
> +       err = lima_gem_submit(file, &submit);
> +       if (!err) {
> +               struct drm_lima_gem_submit_out *out = data;
> +               out->fence = submit.fence;
> +               out->done = submit.done;
> +               out->sync_fd = submit.sync_fd;
> +       }
> +
> +       lima_ctx_put(ctx);
> +out1:
> +       if (err)
> +               kmem_cache_free(pipe->task_slab, task);
> +out0:
> +       kfree(bos);
> +       return err;
> +}
> +
> +static int lima_wait_fence(struct dma_fence *fence, u64 timeout_ns)
> +{
> +       signed long ret;
> +
> +       if (!timeout_ns)
> +               ret = dma_fence_is_signaled(fence) ? 0 : -EBUSY;

I think you can just call dma_fence_wait_timeout with a 0 timeout
below and remove this clause.

> +       else {
> +               unsigned long timeout = lima_timeout_to_jiffies(timeout_ns);
> +
> +               /* must use long for result check because in 64bit arch int
> +                * will overflow if timeout is too large and get <0 result
> +                */
> +               ret = dma_fence_wait_timeout(fence, true, timeout);
> +               if (ret == 0)
> +                       ret = timeout ? -ETIMEDOUT : -EBUSY;
> +               else if (ret > 0)
> +                       ret = 0;
> +       }

I suspect this could be common like reservation object waits. However,
I'm curious why lima needs this ioctl in the first place when I don't
see the same for other drivers.

> +
> +       return ret;
> +}
> +
> +static int lima_ioctl_wait_fence(struct drm_device *dev, void *data, struct drm_file *file)
> +{
> +       struct drm_lima_wait_fence *args = data;
> +       struct lima_drm_priv *priv = file->driver_priv;
> +       struct dma_fence *fence;
> +       int err = 0;
> +
> +       fence = lima_ctx_get_native_fence(&priv->ctx_mgr, args->ctx,
> +                                         args->pipe, args->seq);
> +       if (IS_ERR(fence))
> +               return PTR_ERR(fence);
> +
> +       if (fence) {
> +               err = lima_wait_fence(fence, args->timeout_ns);
> +               args->error = fence->error;
> +               dma_fence_put(fence);
> +       }
> +       else
> +               args->error = 0;
> +
> +       return err;
> +}
> +
> +static int lima_ioctl_gem_wait(struct drm_device *dev, void *data, struct drm_file *file)
> +{
> +       struct drm_lima_gem_wait *args = data;
> +
> +       if (!(args->op & (LIMA_GEM_WAIT_READ|LIMA_GEM_WAIT_WRITE)))
> +           return -EINVAL;
> +
> +       return lima_gem_wait(file, args->handle, args->op, args->timeout_ns);
> +}
> +
> +static int lima_ioctl_ctx(struct drm_device *dev, void *data, struct drm_file *file)
> +{
> +       struct drm_lima_ctx *args = data;
> +       struct lima_drm_priv *priv = file->driver_priv;
> +       struct lima_device *ldev = to_lima_dev(dev);
> +
> +       if (args->op == LIMA_CTX_OP_CREATE)
> +               return lima_ctx_create(ldev, &priv->ctx_mgr, &args->id);
> +       else if (args->op == LIMA_CTX_OP_FREE)
> +               return lima_ctx_free(&priv->ctx_mgr, args->id);
> +
> +       return -EINVAL;
> +}
> +
> +static int lima_ioctl_gem_mod(struct drm_device *dev, void *data, struct drm_file *file)
> +{
> +       struct drm_lima_gem_mod *args = data;
> +
> +       if (args->op == LIMA_GEM_MOD_OP_GET)
> +               return lima_gem_get_modifier(file, args->handle, &args->modifier);
> +       else if (args->op == LIMA_GEM_MOD_OP_SET)
> +               return lima_gem_set_modifier(file, args->handle, args->modifier);
> +
> +       return -EINVAL;
> +}
> +
> +static int lima_drm_driver_open(struct drm_device *dev, struct drm_file *file)
> +{
> +       int err;
> +       struct lima_drm_priv *priv;
> +       struct lima_device *ldev = to_lima_dev(dev);
> +
> +       priv = kzalloc(sizeof(*priv), GFP_KERNEL);
> +       if (!priv)
> +               return -ENOMEM;
> +
> +       priv->vm = lima_vm_create(ldev);
> +       if (!priv->vm) {
> +               err = -ENOMEM;
> +               goto err_out0;
> +       }
> +
> +        lima_ctx_mgr_init(&priv->ctx_mgr);
> +
> +       file->driver_priv = priv;
> +       return 0;
> +
> +err_out0:
> +       kfree(priv);
> +       return err;
> +}
> +
> +static void lima_drm_driver_postclose(struct drm_device *dev, struct drm_file *file)
> +{
> +       struct lima_drm_priv *priv = file->driver_priv;
> +
> +       lima_ctx_mgr_fini(&priv->ctx_mgr);
> +       lima_vm_put(priv->vm);
> +       kfree(priv);
> +}
> +
> +static const struct drm_ioctl_desc lima_drm_driver_ioctls[] = {
> +       DRM_IOCTL_DEF_DRV(LIMA_INFO, lima_ioctl_info, DRM_AUTH|DRM_RENDER_ALLOW),
> +       DRM_IOCTL_DEF_DRV(LIMA_GEM_CREATE, lima_ioctl_gem_create, DRM_AUTH|DRM_RENDER_ALLOW),
> +       DRM_IOCTL_DEF_DRV(LIMA_GEM_INFO, lima_ioctl_gem_info, DRM_AUTH|DRM_RENDER_ALLOW),
> +       DRM_IOCTL_DEF_DRV(LIMA_GEM_VA, lima_ioctl_gem_va, DRM_AUTH|DRM_RENDER_ALLOW),
> +       DRM_IOCTL_DEF_DRV(LIMA_GEM_SUBMIT, lima_ioctl_gem_submit, DRM_AUTH|DRM_RENDER_ALLOW),
> +       DRM_IOCTL_DEF_DRV(LIMA_WAIT_FENCE, lima_ioctl_wait_fence, DRM_AUTH|DRM_RENDER_ALLOW),
> +       DRM_IOCTL_DEF_DRV(LIMA_GEM_WAIT, lima_ioctl_gem_wait, DRM_AUTH|DRM_RENDER_ALLOW),
> +       DRM_IOCTL_DEF_DRV(LIMA_CTX, lima_ioctl_ctx, DRM_AUTH|DRM_RENDER_ALLOW),
> +       DRM_IOCTL_DEF_DRV(LIMA_GEM_MOD, lima_ioctl_gem_mod, DRM_AUTH|DRM_RENDER_ALLOW),
> +};
> +
> +static const struct file_operations lima_drm_driver_fops = {
> +       .owner              = THIS_MODULE,
> +       .open               = drm_open,
> +       .release            = drm_release,
> +       .unlocked_ioctl     = drm_ioctl,
> +#ifdef CONFIG_COMPAT
> +       .compat_ioctl       = drm_compat_ioctl,
> +#endif
> +       .mmap               = lima_gem_mmap,
> +};
> +
> +static struct drm_driver lima_drm_driver = {
> +       .driver_features    = DRIVER_RENDER | DRIVER_GEM | DRIVER_PRIME,
> +       .open               = lima_drm_driver_open,
> +       .postclose          = lima_drm_driver_postclose,
> +       .ioctls             = lima_drm_driver_ioctls,
> +       .num_ioctls         = ARRAY_SIZE(lima_drm_driver_ioctls),
> +       .fops               = &lima_drm_driver_fops,
> +       .gem_free_object_unlocked = lima_gem_free_object,
> +       .gem_open_object    = lima_gem_object_open,
> +       .gem_close_object   = lima_gem_object_close,
> +       .name               = "lima",
> +       .desc               = "lima DRM",
> +       .date               = "20170325",

Perhaps this should be updated? TBH, I don't know why this is even useful.

> +       .major              = 1,
> +       .minor              = 0,
> +       .patchlevel         = 0,
> +
> +       .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
> +       .gem_prime_import   = drm_gem_prime_import,
> +       .gem_prime_import_sg_table = lima_gem_prime_import_sg_table,
> +       .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
> +       .gem_prime_export   = drm_gem_prime_export,

import and export don't have to be set if you use the defaults.

> +       .gem_prime_res_obj  = lima_gem_prime_res_obj,
> +       .gem_prime_get_sg_table = lima_gem_prime_get_sg_table,
> +       .gem_prime_vmap = lima_gem_prime_vmap,
> +       .gem_prime_vunmap = lima_gem_prime_vunmap,
> +       .gem_prime_mmap = lima_gem_prime_mmap,
> +};
> +

> +int lima_gem_wait(struct drm_file *file, u32 handle, u32 op, u64 timeout_ns)
> +{
> +       bool write = op & LIMA_GEM_WAIT_WRITE;
> +       struct drm_gem_object *obj;
> +       struct lima_bo *bo;
> +       signed long ret;
> +       unsigned long timeout;
> +
> +       obj = drm_gem_object_lookup(file, handle);
> +       if (!obj)
> +               return -ENOENT;
> +
> +       bo = to_lima_bo(obj);
> +
> +       timeout = timeout_ns ? lima_timeout_to_jiffies(timeout_ns) : 0;
> +
> +       ret = lima_bo_reserve(bo, true);
> +       if (ret)
> +               goto out;
> +
> +       /* must use long for result check because in 64bit arch int
> +        * will overflow if timeout is too large and get <0 result
> +        */
> +       ret = reservation_object_wait_timeout_rcu(bo->tbo.resv, write, true, timeout);
> +       if (ret == 0)
> +               ret = timeout ? -ETIMEDOUT : -EBUSY;
> +       else if (ret > 0)
> +               ret = 0;

There's a helper I added for all this that should land in 5.1.

> +
> +       lima_bo_unreserve(bo);
> +out:
> +       drm_gem_object_put_unlocked(obj);
> +       return ret;
> +}
> +

> +static int lima_gp_soft_reset_async_wait(struct lima_ip *ip)
> +{
> +       struct lima_device *dev = ip->dev;
> +       int timeout;
> +
> +       if (!ip->data.async_reset)
> +               return 0;
> +
> +       for (timeout = 1000; timeout > 0; timeout--) {
> +               if (gp_read(INT_RAWSTAT) & LIMA_GP_IRQ_RESET_COMPLETED)
> +                       break;

Use readl_poll_timeout instead of writing your own. At least add a
udelay to the loop so the timing is fixed and not dependent on how
fast the code can run.

> +       }
> +       if (!timeout) {
> +               dev_err(dev->dev, "gp soft reset time out\n");
> +               return -ETIMEDOUT;
> +       }
> +
> +       gp_write(INT_CLEAR, LIMA_GP_IRQ_MASK_ALL);
> +       gp_write(INT_MASK, LIMA_GP_IRQ_MASK_USED);
> +
> +       ip->data.async_reset = false;
> +       return 0;
> +}

> diff --git a/drivers/gpu/drm/lima/lima_l2_cache.c b/drivers/gpu/drm/lima/lima_l2_cache.c
> new file mode 100644
> index 000000000000..e7cdec720e5d
> --- /dev/null
> +++ b/drivers/gpu/drm/lima/lima_l2_cache.c
> @@ -0,0 +1,79 @@
> +// SPDX-License-Identifier: GPL-2.0 OR MIT
> +/* Copyright 2017-2018 Qiang Yu <yuq825@gmail.com> */
> +
> +#include <linux/io.h>
> +#include <linux/device.h>
> +
> +#include "lima_device.h"
> +#include "lima_l2_cache.h"
> +#include "lima_regs.h"
> +
> +#define l2_cache_write(reg, data) writel(data, ip->iomem + LIMA_L2_CACHE_##reg)
> +#define l2_cache_read(reg) readl(ip->iomem + LIMA_L2_CACHE_##reg)
> +
> +static int lima_l2_cache_wait_idle(struct lima_ip *ip)
> +{
> +       int timeout;
> +       struct lima_device *dev = ip->dev;
> +
> +       for (timeout = 100000; timeout > 0; timeout--) {
> +           if (!(l2_cache_read(STATUS) & LIMA_L2_CACHE_STATUS_COMMAND_BUSY))
> +               break;

Use readl_poll_timeout or variant.

> +       }
> +       if (!timeout) {
> +           dev_err(dev->dev, "l2 cache wait command timeout\n");
> +           return -ETIMEDOUT;
> +       }
> +       return 0;
> +}
> +
> +int lima_l2_cache_flush(struct lima_ip *ip)
> +{
> +       int ret;
> +
> +       spin_lock(&ip->data.lock);
> +       l2_cache_write(COMMAND, LIMA_L2_CACHE_COMMAND_CLEAR_ALL);
> +       ret = lima_l2_cache_wait_idle(ip);
> +       spin_unlock(&ip->data.lock);
> +       return ret;
> +}
> +
> +int lima_l2_cache_init(struct lima_ip *ip)
> +{
> +       int i, err;
> +       u32 size;
> +       struct lima_device *dev = ip->dev;
> +
> +       /* l2_cache2 only exists when one of PP4-7 present */
> +       if (ip->id == lima_ip_l2_cache2) {
> +               for (i = lima_ip_pp4; i <= lima_ip_pp7; i++) {
> +                       if (dev->ip[i].present)
> +                               break;
> +               }
> +               if (i > lima_ip_pp7)
> +                       return -ENODEV;
> +       }
> +
> +       spin_lock_init(&ip->data.lock);
> +
> +       size = l2_cache_read(SIZE);
> +       dev_info(dev->dev, "l2 cache %uK, %u-way, %ubyte cache line, %ubit external bus\n",
> +                1 << (((size >> 16) & 0xff) - 10),
> +                1 << ((size >> 8) & 0xff),
> +                1 << (size & 0xff),
> +                1 << ((size >> 24) & 0xff));
> +
> +       err = lima_l2_cache_flush(ip);
> +       if (err)
> +               return err;
> +
> +       l2_cache_write(ENABLE, LIMA_L2_CACHE_ENABLE_ACCESS | LIMA_L2_CACHE_ENABLE_READ_ALLOCATE);
> +       l2_cache_write(MAX_READS, 0x1c);
> +
> +       return 0;
> +}
> +
> +void lima_l2_cache_fini(struct lima_ip *ip)
> +{
> +
> +}
> diff --git a/drivers/gpu/drm/lima/lima_l2_cache.h b/drivers/gpu/drm/lima/lima_l2_cache.h
> new file mode 100644
> index 000000000000..2ff91eafefbe
> --- /dev/null
> +++ b/drivers/gpu/drm/lima/lima_l2_cache.h
> @@ -0,0 +1,14 @@
> +/* SPDX-License-Identifier: GPL-2.0 OR MIT */
> +/* Copyright 2017-2018 Qiang Yu <yuq825@gmail.com> */
> +
> +#ifndef __LIMA_L2_CACHE_H__
> +#define __LIMA_L2_CACHE_H__
> +
> +struct lima_ip;
> +
> +int lima_l2_cache_init(struct lima_ip *ip);
> +void lima_l2_cache_fini(struct lima_ip *ip);
> +
> +int lima_l2_cache_flush(struct lima_ip *ip);
> +
> +#endif
> diff --git a/drivers/gpu/drm/lima/lima_mmu.c b/drivers/gpu/drm/lima/lima_mmu.c
> new file mode 100644
> index 000000000000..234fb90a4285
> --- /dev/null
> +++ b/drivers/gpu/drm/lima/lima_mmu.c
> @@ -0,0 +1,135 @@
> +// SPDX-License-Identifier: GPL-2.0 OR MIT
> +/* Copyright 2017-2018 Qiang Yu <yuq825@gmail.com> */
> +
> +#include <linux/interrupt.h>
> +#include <linux/io.h>
> +#include <linux/device.h>
> +
> +#include "lima_device.h"
> +#include "lima_mmu.h"
> +#include "lima_vm.h"
> +#include "lima_object.h"
> +#include "lima_regs.h"
> +
> +#define mmu_write(reg, data) writel(data, ip->iomem + LIMA_MMU_##reg)
> +#define mmu_read(reg) readl(ip->iomem + LIMA_MMU_##reg)
> +
> +#define lima_mmu_send_command(command, condition)           \
> +({                                                          \
> +       int __timeout, __ret = 0;                            \
> +                                                            \
> +       mmu_write(COMMAND, command);                         \
> +       for (__timeout = 1000; __timeout > 0; __timeout--) { \
> +               if (condition)                               \
> +                       break;                               \
> +       }                                                    \
> +       if (!__timeout) {                                    \
> +               dev_err(dev->dev, "mmu command %x timeout\n", command); \
> +               __ret = -ETIMEDOUT;                          \
> +       }                                                    \
> +       __ret;                                               \
> +})
> +
> +static irqreturn_t lima_mmu_irq_handler(int irq, void *data)
> +{
> +       struct lima_ip *ip = data;
> +       struct lima_device *dev = ip->dev;
> +       u32 status = mmu_read(INT_STATUS);
> +       struct lima_sched_pipe *pipe;
> +
> +       /* for shared irq case */
> +       if (!status)

Can status have masked irq's? If so, you should be masking out the
disabled irq bits.

> +               return IRQ_NONE;
> +
> +       if (status & LIMA_MMU_INT_PAGE_FAULT) {
> +               u32 fault = mmu_read(PAGE_FAULT_ADDR);
> +               dev_err(dev->dev, "mmu page fault at 0x%x from bus id %d of type %s on %s\n",
> +                       fault, LIMA_MMU_STATUS_BUS_ID(status),
> +                       status & LIMA_MMU_STATUS_PAGE_FAULT_IS_WRITE ? "write" : "read",
> +                       lima_ip_name(ip));
> +       }
> +
> +       if (status & LIMA_MMU_INT_READ_BUS_ERROR) {
> +               dev_err(dev->dev, "mmu %s irq bus error\n", lima_ip_name(ip));
> +       }
> +
> +       /* mask all interrupts before resume */
> +       mmu_write(INT_MASK, 0);
> +       mmu_write(INT_CLEAR, status);
> +
> +       pipe = dev->pipe + (ip->id == lima_ip_gpmmu ? lima_pipe_gp : lima_pipe_pp);
> +       lima_sched_pipe_mmu_error(pipe);
> +
> +       return IRQ_HANDLED;
> +}


> +
> +unsigned long lima_timeout_to_jiffies(u64 timeout_ns)

Create a common helper instead of copy-n-pasting this from other
drivers (etnaviv).

> +{
> +       unsigned long timeout_jiffies;
> +       ktime_t timeout;
> +
> +       /* clamp timeout if it's to large */
> +       if (((s64)timeout_ns) < 0)
> +               return MAX_SCHEDULE_TIMEOUT;
> +
> +       timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get());
> +       if (ktime_to_ns(timeout) < 0)
> +               return 0;
> +
> +       timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout));
> +       /*  clamp timeout to avoid unsigned-> signed overflow */
> +       if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT )
> +               return MAX_SCHEDULE_TIMEOUT;
> +
> +       return timeout_jiffies;
> +}
> +
> +void lima_sched_pipe_task_done(struct lima_sched_pipe *pipe)
> +{
> +       if (pipe->error)
> +               schedule_work(&pipe->error_work);
> +       else {
> +               struct lima_sched_task *task = pipe->current_task;
> +
> +               pipe->task_fini(pipe);
> +               dma_fence_signal(task->fence);
> +       }
> +}

> diff --git a/drivers/gpu/drm/lima/lima_vm.c b/drivers/gpu/drm/lima/lima_vm.c
> new file mode 100644
> index 000000000000..a264f3ae83fe
> --- /dev/null
> +++ b/drivers/gpu/drm/lima/lima_vm.c
> @@ -0,0 +1,354 @@
> +// SPDX-License-Identifier: GPL-2.0 OR MIT
> +/* Copyright 2017-2018 Qiang Yu <yuq825@gmail.com> */
> +
> +#include <linux/slab.h>
> +#include <linux/dma-mapping.h>
> +#include <linux/interval_tree_generic.h>
> +
> +#include "lima_device.h"
> +#include "lima_vm.h"
> +#include "lima_object.h"
> +#include "lima_regs.h"
> +
> +struct lima_bo_va_mapping {
> +       struct list_head list;
> +       struct rb_node rb;
> +       uint32_t start;
> +       uint32_t last;
> +       uint32_t __subtree_last;
> +};
> +
> +struct lima_bo_va {
> +       struct list_head list;
> +       unsigned ref_count;
> +
> +       struct list_head mapping;
> +
> +       struct lima_vm *vm;
> +};
> +
> +#define LIMA_VM_PD_SHIFT 22
> +#define LIMA_VM_PT_SHIFT 12
> +#define LIMA_VM_PB_SHIFT (LIMA_VM_PD_SHIFT + LIMA_VM_NUM_PT_PER_BT_SHIFT)
> +#define LIMA_VM_BT_SHIFT LIMA_VM_PT_SHIFT
> +
> +#define LIMA_VM_PT_MASK ((1 << LIMA_VM_PD_SHIFT) - 1)
> +#define LIMA_VM_BT_MASK ((1 << LIMA_VM_PB_SHIFT) - 1)
> +
> +#define LIMA_PDE(va) (va >> LIMA_VM_PD_SHIFT)
> +#define LIMA_PTE(va) ((va & LIMA_VM_PT_MASK) >> LIMA_VM_PT_SHIFT)
> +#define LIMA_PBE(va) (va >> LIMA_VM_PB_SHIFT)
> +#define LIMA_BTE(va) ((va & LIMA_VM_BT_MASK) >> LIMA_VM_BT_SHIFT)
> +
> +#define START(node) ((node)->start)
> +#define LAST(node) ((node)->last)
> +
> +INTERVAL_TREE_DEFINE(struct lima_bo_va_mapping, rb, uint32_t, __subtree_last,
> +                    START, LAST, static, lima_vm_it)
> +
> +#undef START
> +#undef LAST
> +
> +static void lima_vm_unmap_page_table(struct lima_vm *vm, u32 start, u32 end)
> +{
> +       u32 addr;
> +
> +       for (addr = start; addr <= end; addr += LIMA_PAGE_SIZE) {
> +               u32 pbe = LIMA_PBE(addr);
> +               u32 bte = LIMA_BTE(addr);
> +               u32 *bt;
> +
> +               bt = lima_bo_kmap(vm->bts[pbe]);
> +               bt[bte] = 0;
> +       }
> +}
> +
> +static int lima_vm_map_page_table(struct lima_vm *vm, dma_addr_t *dma,
> +                                 u32 start, u32 end)
> +{
> +       u64 addr;
> +       int err, i = 0;
> +
> +       for (addr = start; addr <= end; addr += LIMA_PAGE_SIZE) {
> +               u32 pbe = LIMA_PBE(addr);
> +               u32 bte = LIMA_BTE(addr);
> +               u32 *bt;
> +
> +               if (vm->bts[pbe])
> +                       bt = lima_bo_kmap(vm->bts[pbe]);
> +               else {
> +                       struct lima_bo *bt_bo;
> +                       dma_addr_t *pts;
> +                       u32 *pd;
> +                       int j;
> +
> +                       bt_bo = lima_bo_create(
> +                               vm->dev, LIMA_PAGE_SIZE << LIMA_VM_NUM_PT_PER_BT_SHIFT,
> +                               0, ttm_bo_type_kernel,
> +                               NULL, vm->pd->tbo.resv);

I don't think using BOs for page tables buys you anything. You could
just use the kernel DMA API directly. See io-pgtable-arm-v7s.c for
inspiration. For panfrost, it's standard ARM format page tables so we
can just use the io-pgtable library.

> +                       if (IS_ERR(bt_bo)) {
> +                               err = PTR_ERR(bt_bo);
> +                               goto err_out;
> +                       }
> +
> +                       bt = lima_bo_kmap(bt_bo);
> +                       if (IS_ERR(bt)) {
> +                               lima_bo_unref(bt_bo);
> +                               err = PTR_ERR(bt);
> +                               goto err_out;
> +                       }
> +                       memset(bt, 0, LIMA_PAGE_SIZE << LIMA_VM_NUM_PT_PER_BT_SHIFT);
> +
> +                       vm->bts[pbe] = bt_bo;
> +                       pd = lima_bo_kmap(vm->pd);
> +                       pd += pbe << LIMA_VM_NUM_PT_PER_BT_SHIFT;
> +                       pts = lima_bo_get_pages(bt_bo);
> +                       for (j = 0; j < LIMA_VM_NUM_PT_PER_BT; j++)
> +                               *pd++ = *pts++ | LIMA_VM_FLAG_PRESENT;
> +               }
> +
> +               bt[bte] = dma[i++] | LIMA_VM_FLAGS_CACHE;
> +       }
> +
> +       return 0;
> +
> +err_out:
> +       if (addr != start)
> +               lima_vm_unmap_page_table(vm, start, addr - 1);
> +       return err;
> +}
> +
> +static struct lima_bo_va *
> +lima_vm_bo_find(struct lima_vm *vm, struct lima_bo *bo)
> +{
> +       struct lima_bo_va *bo_va, *ret = NULL;
> +
> +       list_for_each_entry(bo_va, &bo->va, list) {
> +               if (bo_va->vm == vm) {
> +                       ret = bo_va;
> +                       break;
> +               }
> +       }
> +
> +       return ret;
> +}
> +
> +int lima_vm_bo_map(struct lima_vm *vm, struct lima_bo *bo, u32 start)
> +{
> +       int err;
> +       struct lima_bo_va_mapping *it, *mapping;
> +       u32 end = start + bo->gem.size - 1;
> +       dma_addr_t *pages_dma = lima_bo_get_pages(bo);
> +       struct lima_bo_va *bo_va;
> +
> +       it = lima_vm_it_iter_first(&vm->va, start, end);
> +       if (it) {
> +               dev_dbg(bo->gem.dev->dev, "lima vm map va overlap %x-%x %x-%x\n",
> +                       start, end, it->start, it->last);
> +               return -EINVAL;
> +       }
> +
> +       mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
> +       if (!mapping)
> +               return -ENOMEM;
> +       mapping->start = start;
> +       mapping->last = end;

Why don't you use the drm_mm_XX APIs instead of writing your own?

> +
> +       err = lima_vm_map_page_table(vm, pages_dma, start, end);
> +       if (err) {
> +               kfree(mapping);
> +               return err;
> +       }
> +
> +       lima_vm_it_insert(mapping, &vm->va);
> +
> +       bo_va = lima_vm_bo_find(vm, bo);
> +       list_add_tail(&mapping->list, &bo_va->mapping);
> +
> +       return 0;
> +}
kernel test robot via dri-devel Feb. 12, 2019, 4:23 p.m. UTC | #5
On Tue, Feb 12, 2019 at 10:53 AM Rob Herring via dri-devel
<dri-devel@lists.freedesktop.org> wrote:
>
> On Wed, Feb 6, 2019 at 7:16 AM Qiang Yu <yuq825@gmail.com> wrote:
> >
> > From: Lima Project Developers <lima@lists.freedesktop.org>
>
> This should be a person (you).
>
> > Signed-off-by: Andreas Baierl <ichgeh@imkreisrum.de>
> > Signed-off-by: Erico Nunes <nunes.erico@gmail.com>
> > Signed-off-by: Heiko Stuebner <heiko@sntech.de>
> > Signed-off-by: Marek Vasut <marex@denx.de>
> > Signed-off-by: Neil Armstrong <narmstrong@baylibre.com>
> > Signed-off-by: Qiang Yu <yuq825@gmail.com>
>
> Being the submitter, your S-o-b should be last.
>
> > Signed-off-by: Simon Shields <simon@lineageos.org>
> > Signed-off-by: Vasily Khoruzhick <anarsoul@gmail.com>
> > ---
>
> > diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
> > index 4385f00e1d05..dfefcb393858 100644
> > --- a/drivers/gpu/drm/Kconfig
> > +++ b/drivers/gpu/drm/Kconfig
> > @@ -333,6 +333,8 @@ source "drivers/gpu/drm/tve200/Kconfig"
> >
> >  source "drivers/gpu/drm/xen/Kconfig"
> >
> > +source "drivers/gpu/drm/lima/Kconfig"
> > +
> >  # Keep legacy drivers last
> >
> >  menuconfig DRM_LEGACY
> > diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
> > index ce8d1d384319..8d024b729902 100644
> > --- a/drivers/gpu/drm/Makefile
> > +++ b/drivers/gpu/drm/Makefile
> > @@ -109,3 +109,4 @@ obj-$(CONFIG_DRM_TINYDRM) += tinydrm/
> >  obj-$(CONFIG_DRM_PL111) += pl111/
> >  obj-$(CONFIG_DRM_TVE200) += tve200/
> >  obj-$(CONFIG_DRM_XEN) += xen/
> > +obj-$(CONFIG_DRM_LIMA)  += lima/
>
> Not sure about this file, but normally these should be kept sorted.
>
> > diff --git a/drivers/gpu/drm/lima/lima_bcast.c b/drivers/gpu/drm/lima/lima_bcast.c
> > new file mode 100644
> > index 000000000000..63754f6465ea
> > --- /dev/null
> > +++ b/drivers/gpu/drm/lima/lima_bcast.c
> > @@ -0,0 +1,46 @@
> > +// SPDX-License-Identifier: GPL-2.0 OR MIT
> > +/* Copyright 2018 Qiang Yu <yuq825@gmail.com> */
> > +
> > +#include <linux/io.h>
> > +#include <linux/device.h>
> > +
> > +#include "lima_device.h"
> > +#include "lima_bcast.h"
> > +#include "lima_regs.h"
> > +
> > +#define bcast_write(reg, data) writel(data, ip->iomem + LIMA_BCAST_##reg)
> > +#define bcast_read(reg) readl(ip->iomem + LIMA_BCAST_##reg)
>
> There are 2 things about this I would change. Just pass in 'ip' to the
> macro so it is clear in calling functions that ip is actually used.
> Second, don't do token pasting. It is generally avoided in the kernel.
> It makes grepping the source code harder and is a pointless
> indirection.
>
> If you do both of those, then these can be static inline functions
> instead which are preferred because you get type checking.
>
> Same comment applies to all the other register accessors.
>
>
> > +struct lima_ip {
> > +       struct lima_device *dev;
> > +       enum lima_ip_id id;
> > +       bool present;
> > +
> > +       void __iomem *iomem;
> > +       int irq;
> > +
> > +       union {
> > +               /* gp/pp */
> > +               bool async_reset;
> > +               /* l2 cache */
> > +               spinlock_t lock;
>
> What happens when you need 2 elements for a sub-block. I'd make this a
> struct pointer for each IP sub-block.
>
> > +       } data;
> > +};
> > +
> > +enum lima_pipe_id {
> > +       lima_pipe_gp,
> > +       lima_pipe_pp,
> > +       lima_pipe_num,
> > +};
> > +
> > +struct lima_device {
> > +       struct device *dev;
> > +       struct drm_device *ddev;
> > +       struct platform_device *pdev;
> > +
> > +       enum lima_gpu_id id;
> > +       int num_pp;
> > +
> > +       void __iomem *iomem;
> > +       struct clk *clk_bus;
> > +       struct clk *clk_gpu;
> > +       struct reset_control *reset;
> > +       struct regulator *regulator;
> > +
> > +       struct lima_ip ip[lima_ip_num];
> > +       struct lima_sched_pipe pipe[lima_pipe_num];
> > +
> > +       struct lima_mman mman;
> > +
> > +       struct lima_vm *empty_vm;
> > +       uint64_t va_start;
> > +       uint64_t va_end;
> > +
> > +       u32 *dlbu_cpu;
> > +       dma_addr_t dlbu_dma;
> > +};
> > +
> > +static inline struct lima_device *
> > +to_lima_dev(struct drm_device *dev)
> > +{
> > +       return dev->dev_private;
> > +}
> > +
> > +static inline struct lima_device *
> > +ttm_to_lima_dev(struct ttm_bo_device *dev)
> > +{
> > +       return container_of(dev, struct lima_device, mman.bdev);
> > +}
> > +
> > +int lima_device_init(struct lima_device *ldev);
> > +void lima_device_fini(struct lima_device *ldev);
> > +
> > +const char *lima_ip_name(struct lima_ip *ip);
> > +
> > +#endif
> > diff --git a/drivers/gpu/drm/lima/lima_dlbu.c b/drivers/gpu/drm/lima/lima_dlbu.c
> > new file mode 100644
> > index 000000000000..6697d4ddd887
> > --- /dev/null
> > +++ b/drivers/gpu/drm/lima/lima_dlbu.c
> > @@ -0,0 +1,56 @@
> > +// SPDX-License-Identifier: GPL-2.0 OR MIT
> > +/* Copyright 2018 Qiang Yu <yuq825@gmail.com> */
> > +
> > +#include <linux/io.h>
> > +#include <linux/device.h>
> > +
> > +#include "lima_device.h"
> > +#include "lima_dlbu.h"
> > +#include "lima_vm.h"
> > +#include "lima_regs.h"
> > +
> > +#define dlbu_write(reg, data) writel(data, ip->iomem + LIMA_DLBU_##reg)
> > +#define dlbu_read(reg) readl(ip->iomem + LIMA_DLBU_##reg)
> > +
> > +void lima_dlbu_enable(struct lima_device *dev, int num_pp)
> > +{
> > +       struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp;
> > +       struct lima_ip *ip = dev->ip + lima_ip_dlbu;
> > +       int i, mask = 0;
> > +
> > +       for (i = 0; i < num_pp; i++) {
> > +               struct lima_ip *pp = pipe->processor[i];
> > +               mask |= 1 << (pp->id - lima_ip_pp0);
> > +       }
> > +
> > +       dlbu_write(PP_ENABLE_MASK, mask);
> > +}
> > +
> > +void lima_dlbu_disable(struct lima_device *dev)
> > +{
> > +       struct lima_ip *ip = dev->ip + lima_ip_dlbu;
> > +       dlbu_write(PP_ENABLE_MASK, 0);
> > +}
> > +
> > +void lima_dlbu_set_reg(struct lima_ip *ip, u32 *reg)
> > +{
> > +       dlbu_write(TLLIST_VBASEADDR, reg[0]);
> > +       dlbu_write(FB_DIM, reg[1]);
> > +       dlbu_write(TLLIST_CONF, reg[2]);
> > +       dlbu_write(START_TILE_POS, reg[3]);
> > +}
> > +
> > +int lima_dlbu_init(struct lima_ip *ip)
> > +{
> > +       struct lima_device *dev = ip->dev;
> > +
> > +       dlbu_write(MASTER_TLLIST_PHYS_ADDR, dev->dlbu_dma | 1);
> > +       dlbu_write(MASTER_TLLIST_VADDR, LIMA_VA_RESERVE_DLBU);
> > +
> > +       return 0;
> > +}
> > +
> > +void lima_dlbu_fini(struct lima_ip *ip)
> > +{
> > +
> > +}
> > diff --git a/drivers/gpu/drm/lima/lima_dlbu.h b/drivers/gpu/drm/lima/lima_dlbu.h
> > new file mode 100644
> > index 000000000000..60cba387cf30
> > --- /dev/null
> > +++ b/drivers/gpu/drm/lima/lima_dlbu.h
> > @@ -0,0 +1,18 @@
> > +/* SPDX-License-Identifier: GPL-2.0 OR MIT */
> > +/* Copyright 2018 Qiang Yu <yuq825@gmail.com> */
> > +
> > +#ifndef __LIMA_DLBU_H__
> > +#define __LIMA_DLBU_H__
> > +
> > +struct lima_ip;
> > +struct lima_device;
> > +
> > +void lima_dlbu_enable(struct lima_device *dev, int num_pp);
> > +void lima_dlbu_disable(struct lima_device *dev);
> > +
> > +void lima_dlbu_set_reg(struct lima_ip *ip, u32 *reg);
> > +
> > +int lima_dlbu_init(struct lima_ip *ip);
> > +void lima_dlbu_fini(struct lima_ip *ip);
> > +
> > +#endif
> > diff --git a/drivers/gpu/drm/lima/lima_drv.c b/drivers/gpu/drm/lima/lima_drv.c
> > new file mode 100644
> > index 000000000000..132071b9be9b
> > --- /dev/null
> > +++ b/drivers/gpu/drm/lima/lima_drv.c
> > @@ -0,0 +1,459 @@
> > +// SPDX-License-Identifier: GPL-2.0 OR MIT
> > +/* Copyright 2017-2018 Qiang Yu <yuq825@gmail.com> */
> > +
> > +#include <linux/module.h>
> > +#include <linux/of_platform.h>
> > +#include <linux/log2.h>
> > +#include <drm/drm_prime.h>
> > +#include <drm/lima_drm.h>
> > +
> > +#include "lima_drv.h"
> > +#include "lima_gem.h"
> > +#include "lima_gem_prime.h"
> > +#include "lima_vm.h"
> > +
> > +int lima_sched_timeout_ms = 0;
> > +int lima_sched_max_tasks = 32;
> > +int lima_max_mem = -1;
> > +
> > +MODULE_PARM_DESC(sched_timeout_ms, "task run timeout in ms (0 = no timeout (default))");
> > +module_param_named(sched_timeout_ms, lima_sched_timeout_ms, int, 0444);
> > +
> > +MODULE_PARM_DESC(sched_max_tasks, "max queued task num in a context (default 32)");
> > +module_param_named(sched_max_tasks, lima_sched_max_tasks, int, 0444);
> > +
> > +MODULE_PARM_DESC(max_mem, "Max memory size in MB can be used (<0 = auto)");
> > +module_param_named(max_mem, lima_max_mem, int, 0444);
> > +
> > +static int lima_ioctl_info(struct drm_device *dev, void *data, struct drm_file *file)
> > +{
> > +       struct drm_lima_info *info = data;
> > +       struct lima_device *ldev = to_lima_dev(dev);
> > +
> > +       switch (ldev->id) {
> > +       case lima_gpu_mali400:
> > +               info->gpu_id = LIMA_INFO_GPU_MALI400;
> > +               break;
> > +       case lima_gpu_mali450:
> > +               info->gpu_id = LIMA_INFO_GPU_MALI450;
> > +               break;
> > +       default:
> > +               return -ENODEV;
> > +       }
> > +       info->num_pp = ldev->pipe[lima_pipe_pp].num_processor;
> > +       info->va_start = ldev->va_start;
> > +       info->va_end = ldev->va_end;
> > +       return 0;
> > +}
> > +
> > +static int lima_ioctl_gem_create(struct drm_device *dev, void *data, struct drm_file *file)
> > +{
> > +       struct drm_lima_gem_create *args = data;
> > +
> > +       if (args->flags)
> > +               return -EINVAL;
> > +
> > +       if (args->size == 0)
> > +               return -EINVAL;
> > +
> > +       return lima_gem_create_handle(dev, file, args->size, args->flags, &args->handle);
> > +}
> > +
> > +static int lima_ioctl_gem_info(struct drm_device *dev, void *data, struct drm_file *file)
> > +{
> > +       struct drm_lima_gem_info *args = data;
> > +
> > +       return lima_gem_mmap_offset(file, args->handle, &args->offset);
> > +}
> > +
> > +static int lima_ioctl_gem_va(struct drm_device *dev, void *data, struct drm_file *file)
> > +{
> > +       struct drm_lima_gem_va *args = data;
> > +
> > +       switch (args->op) {
> > +       case LIMA_VA_OP_MAP:
> > +               return lima_gem_va_map(file, args->handle, args->flags, args->va);
> > +       case LIMA_VA_OP_UNMAP:
> > +               return lima_gem_va_unmap(file, args->handle, args->va);
>
> These are mapping to GPU VA. Why not do that on GEM object creation or
> import or when the objects are submitted with cmd queue as other
> drivers do?
>
> To put it another way, These ioctls look different than what other
> drivers do. Why do you need to do things differently? My understanding
> is best practice is to map and return the GPU offset when the GEM
> object is created. This is what v3d does. I think Intel is moving to
> that. And panfrost will do that.

I think it would be a good idea to look at the amdgpu driver.  This
driver is heavily modeled after it.  Basically the GEM VA ioctl allows
userspace to manage per process (per fd really) virtual addresses.

Alex

>
> > +       default:
> > +               return -EINVAL;
> > +       }
> > +}
> > +
> > +static int lima_ioctl_gem_submit(struct drm_device *dev, void *data, struct drm_file *file)
> > +{
> > +       struct drm_lima_gem_submit_in *args = data;
> > +       struct lima_device *ldev = to_lima_dev(dev);
> > +       struct lima_drm_priv *priv = file->driver_priv;
> > +       struct drm_lima_gem_submit_bo *bos;
> > +       struct ttm_validate_buffer *vbs;
> > +       union drm_lima_gem_submit_dep *deps = NULL;
> > +       struct lima_sched_pipe *pipe;
> > +       struct lima_sched_task *task;
> > +       struct lima_ctx *ctx;
> > +       struct lima_submit submit = {0};
> > +       int err = 0, size;
> > +
> > +       if (args->pipe >= lima_pipe_num || args->nr_bos == 0)
> > +               return -EINVAL;
> > +
> > +       if (args->flags & ~(LIMA_SUBMIT_FLAG_EXPLICIT_FENCE |
> > +                           LIMA_SUBMIT_FLAG_SYNC_FD_OUT))
> > +               return -EINVAL;
> > +
> > +       pipe = ldev->pipe + args->pipe;
> > +       if (args->frame_size != pipe->frame_size)
> > +               return -EINVAL;
> > +
> > +       size = args->nr_bos * (sizeof(*submit.bos) + sizeof(*submit.vbs)) +
> > +               args->nr_deps * sizeof(*submit.deps);
> > +       bos = kzalloc(size, GFP_KERNEL);
> > +       if (!bos)
> > +               return -ENOMEM;
> > +
> > +       size = args->nr_bos * sizeof(*submit.bos);
> > +       if (copy_from_user(bos, u64_to_user_ptr(args->bos), size)) {
> > +               err = -EFAULT;
> > +               goto out0;
> > +       }
> > +
> > +       vbs = (void *)bos + size;
> > +
> > +       if (args->nr_deps) {
> > +               deps = (void *)vbs + args->nr_bos * sizeof(*submit.vbs);
> > +               size = args->nr_deps * sizeof(*submit.deps);
> > +               if (copy_from_user(deps, u64_to_user_ptr(args->deps), size)) {
> > +                       err = -EFAULT;
> > +                       goto out0;
> > +               }
> > +       }
> > +
> > +       task = kmem_cache_zalloc(pipe->task_slab, GFP_KERNEL);
> > +       if (!task) {
> > +               err = -ENOMEM;
> > +               goto out0;
> > +       }
> > +
> > +       task->frame = task + 1;
> > +       if (copy_from_user(task->frame, u64_to_user_ptr(args->frame), args->frame_size)) {
> > +               err = -EFAULT;
> > +               goto out1;
> > +       }
> > +
> > +       err = pipe->task_validate(pipe, task);
> > +       if (err)
> > +               goto out1;
> > +
> > +       ctx = lima_ctx_get(&priv->ctx_mgr, args->ctx);
> > +       if (!ctx) {
> > +               err = -ENOENT;
> > +               goto out1;
> > +       }
> > +
> > +       submit.pipe = args->pipe;
> > +       submit.bos = bos;
> > +       submit.vbs = vbs;
> > +       submit.nr_bos = args->nr_bos;
> > +       submit.task = task;
> > +       submit.ctx = ctx;
> > +       submit.deps = deps;
> > +       submit.nr_deps = args->nr_deps;
> > +       submit.flags = args->flags;
> > +
> > +       err = lima_gem_submit(file, &submit);
> > +       if (!err) {
> > +               struct drm_lima_gem_submit_out *out = data;
> > +               out->fence = submit.fence;
> > +               out->done = submit.done;
> > +               out->sync_fd = submit.sync_fd;
> > +       }
> > +
> > +       lima_ctx_put(ctx);
> > +out1:
> > +       if (err)
> > +               kmem_cache_free(pipe->task_slab, task);
> > +out0:
> > +       kfree(bos);
> > +       return err;
> > +}
> > +
> > +static int lima_wait_fence(struct dma_fence *fence, u64 timeout_ns)
> > +{
> > +       signed long ret;
> > +
> > +       if (!timeout_ns)
> > +               ret = dma_fence_is_signaled(fence) ? 0 : -EBUSY;
>
> I think you can just call dma_fence_wait_timeout with a 0 timeout
> below and remove this clause.
>
> > +       else {
> > +               unsigned long timeout = lima_timeout_to_jiffies(timeout_ns);
> > +
> > +               /* must use long for result check because in 64bit arch int
> > +                * will overflow if timeout is too large and get <0 result
> > +                */
> > +               ret = dma_fence_wait_timeout(fence, true, timeout);
> > +               if (ret == 0)
> > +                       ret = timeout ? -ETIMEDOUT : -EBUSY;
> > +               else if (ret > 0)
> > +                       ret = 0;
> > +       }
>
> I suspect this could be common like reservation object waits. However,
> I'm curious why lima needs this ioctl in the first place when I don't
> see the same for other drivers.
>
> > +
> > +       return ret;
> > +}
> > +
> > +static int lima_ioctl_wait_fence(struct drm_device *dev, void *data, struct drm_file *file)
> > +{
> > +       struct drm_lima_wait_fence *args = data;
> > +       struct lima_drm_priv *priv = file->driver_priv;
> > +       struct dma_fence *fence;
> > +       int err = 0;
> > +
> > +       fence = lima_ctx_get_native_fence(&priv->ctx_mgr, args->ctx,
> > +                                         args->pipe, args->seq);
> > +       if (IS_ERR(fence))
> > +               return PTR_ERR(fence);
> > +
> > +       if (fence) {
> > +               err = lima_wait_fence(fence, args->timeout_ns);
> > +               args->error = fence->error;
> > +               dma_fence_put(fence);
> > +       }
> > +       else
> > +               args->error = 0;
> > +
> > +       return err;
> > +}
> > +
> > +static int lima_ioctl_gem_wait(struct drm_device *dev, void *data, struct drm_file *file)
> > +{
> > +       struct drm_lima_gem_wait *args = data;
> > +
> > +       if (!(args->op & (LIMA_GEM_WAIT_READ|LIMA_GEM_WAIT_WRITE)))
> > +           return -EINVAL;
> > +
> > +       return lima_gem_wait(file, args->handle, args->op, args->timeout_ns);
> > +}
> > +
> > +static int lima_ioctl_ctx(struct drm_device *dev, void *data, struct drm_file *file)
> > +{
> > +       struct drm_lima_ctx *args = data;
> > +       struct lima_drm_priv *priv = file->driver_priv;
> > +       struct lima_device *ldev = to_lima_dev(dev);
> > +
> > +       if (args->op == LIMA_CTX_OP_CREATE)
> > +               return lima_ctx_create(ldev, &priv->ctx_mgr, &args->id);
> > +       else if (args->op == LIMA_CTX_OP_FREE)
> > +               return lima_ctx_free(&priv->ctx_mgr, args->id);
> > +
> > +       return -EINVAL;
> > +}
> > +
> > +static int lima_ioctl_gem_mod(struct drm_device *dev, void *data, struct drm_file *file)
> > +{
> > +       struct drm_lima_gem_mod *args = data;
> > +
> > +       if (args->op == LIMA_GEM_MOD_OP_GET)
> > +               return lima_gem_get_modifier(file, args->handle, &args->modifier);
> > +       else if (args->op == LIMA_GEM_MOD_OP_SET)
> > +               return lima_gem_set_modifier(file, args->handle, args->modifier);
> > +
> > +       return -EINVAL;
> > +}
> > +
> > +static int lima_drm_driver_open(struct drm_device *dev, struct drm_file *file)
> > +{
> > +       int err;
> > +       struct lima_drm_priv *priv;
> > +       struct lima_device *ldev = to_lima_dev(dev);
> > +
> > +       priv = kzalloc(sizeof(*priv), GFP_KERNEL);
> > +       if (!priv)
> > +               return -ENOMEM;
> > +
> > +       priv->vm = lima_vm_create(ldev);
> > +       if (!priv->vm) {
> > +               err = -ENOMEM;
> > +               goto err_out0;
> > +       }
> > +
> > +        lima_ctx_mgr_init(&priv->ctx_mgr);
> > +
> > +       file->driver_priv = priv;
> > +       return 0;
> > +
> > +err_out0:
> > +       kfree(priv);
> > +       return err;
> > +}
> > +
> > +static void lima_drm_driver_postclose(struct drm_device *dev, struct drm_file *file)
> > +{
> > +       struct lima_drm_priv *priv = file->driver_priv;
> > +
> > +       lima_ctx_mgr_fini(&priv->ctx_mgr);
> > +       lima_vm_put(priv->vm);
> > +       kfree(priv);
> > +}
> > +
> > +static const struct drm_ioctl_desc lima_drm_driver_ioctls[] = {
> > +       DRM_IOCTL_DEF_DRV(LIMA_INFO, lima_ioctl_info, DRM_AUTH|DRM_RENDER_ALLOW),
> > +       DRM_IOCTL_DEF_DRV(LIMA_GEM_CREATE, lima_ioctl_gem_create, DRM_AUTH|DRM_RENDER_ALLOW),
> > +       DRM_IOCTL_DEF_DRV(LIMA_GEM_INFO, lima_ioctl_gem_info, DRM_AUTH|DRM_RENDER_ALLOW),
> > +       DRM_IOCTL_DEF_DRV(LIMA_GEM_VA, lima_ioctl_gem_va, DRM_AUTH|DRM_RENDER_ALLOW),
> > +       DRM_IOCTL_DEF_DRV(LIMA_GEM_SUBMIT, lima_ioctl_gem_submit, DRM_AUTH|DRM_RENDER_ALLOW),
> > +       DRM_IOCTL_DEF_DRV(LIMA_WAIT_FENCE, lima_ioctl_wait_fence, DRM_AUTH|DRM_RENDER_ALLOW),
> > +       DRM_IOCTL_DEF_DRV(LIMA_GEM_WAIT, lima_ioctl_gem_wait, DRM_AUTH|DRM_RENDER_ALLOW),
> > +       DRM_IOCTL_DEF_DRV(LIMA_CTX, lima_ioctl_ctx, DRM_AUTH|DRM_RENDER_ALLOW),
> > +       DRM_IOCTL_DEF_DRV(LIMA_GEM_MOD, lima_ioctl_gem_mod, DRM_AUTH|DRM_RENDER_ALLOW),
> > +};
> > +
> > +static const struct file_operations lima_drm_driver_fops = {
> > +       .owner              = THIS_MODULE,
> > +       .open               = drm_open,
> > +       .release            = drm_release,
> > +       .unlocked_ioctl     = drm_ioctl,
> > +#ifdef CONFIG_COMPAT
> > +       .compat_ioctl       = drm_compat_ioctl,
> > +#endif
> > +       .mmap               = lima_gem_mmap,
> > +};
> > +
> > +static struct drm_driver lima_drm_driver = {
> > +       .driver_features    = DRIVER_RENDER | DRIVER_GEM | DRIVER_PRIME,
> > +       .open               = lima_drm_driver_open,
> > +       .postclose          = lima_drm_driver_postclose,
> > +       .ioctls             = lima_drm_driver_ioctls,
> > +       .num_ioctls         = ARRAY_SIZE(lima_drm_driver_ioctls),
> > +       .fops               = &lima_drm_driver_fops,
> > +       .gem_free_object_unlocked = lima_gem_free_object,
> > +       .gem_open_object    = lima_gem_object_open,
> > +       .gem_close_object   = lima_gem_object_close,
> > +       .name               = "lima",
> > +       .desc               = "lima DRM",
> > +       .date               = "20170325",
>
> Perhaps this should be updated? TBH, I don't know why this is even useful.
>
> > +       .major              = 1,
> > +       .minor              = 0,
> > +       .patchlevel         = 0,
> > +
> > +       .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
> > +       .gem_prime_import   = drm_gem_prime_import,
> > +       .gem_prime_import_sg_table = lima_gem_prime_import_sg_table,
> > +       .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
> > +       .gem_prime_export   = drm_gem_prime_export,
>
> import and export don't have to be set if you use the defaults.
>
> > +       .gem_prime_res_obj  = lima_gem_prime_res_obj,
> > +       .gem_prime_get_sg_table = lima_gem_prime_get_sg_table,
> > +       .gem_prime_vmap = lima_gem_prime_vmap,
> > +       .gem_prime_vunmap = lima_gem_prime_vunmap,
> > +       .gem_prime_mmap = lima_gem_prime_mmap,
> > +};
> > +
>
> > +int lima_gem_wait(struct drm_file *file, u32 handle, u32 op, u64 timeout_ns)
> > +{
> > +       bool write = op & LIMA_GEM_WAIT_WRITE;
> > +       struct drm_gem_object *obj;
> > +       struct lima_bo *bo;
> > +       signed long ret;
> > +       unsigned long timeout;
> > +
> > +       obj = drm_gem_object_lookup(file, handle);
> > +       if (!obj)
> > +               return -ENOENT;
> > +
> > +       bo = to_lima_bo(obj);
> > +
> > +       timeout = timeout_ns ? lima_timeout_to_jiffies(timeout_ns) : 0;
> > +
> > +       ret = lima_bo_reserve(bo, true);
> > +       if (ret)
> > +               goto out;
> > +
> > +       /* must use long for result check because in 64bit arch int
> > +        * will overflow if timeout is too large and get <0 result
> > +        */
> > +       ret = reservation_object_wait_timeout_rcu(bo->tbo.resv, write, true, timeout);
> > +       if (ret == 0)
> > +               ret = timeout ? -ETIMEDOUT : -EBUSY;
> > +       else if (ret > 0)
> > +               ret = 0;
>
> There's a helper I added for all this that should land in 5.1.
>
> > +
> > +       lima_bo_unreserve(bo);
> > +out:
> > +       drm_gem_object_put_unlocked(obj);
> > +       return ret;
> > +}
> > +
>
> > +static int lima_gp_soft_reset_async_wait(struct lima_ip *ip)
> > +{
> > +       struct lima_device *dev = ip->dev;
> > +       int timeout;
> > +
> > +       if (!ip->data.async_reset)
> > +               return 0;
> > +
> > +       for (timeout = 1000; timeout > 0; timeout--) {
> > +               if (gp_read(INT_RAWSTAT) & LIMA_GP_IRQ_RESET_COMPLETED)
> > +                       break;
>
> Use readl_poll_timeout instead of writing your own. At least add a
> udelay to the loop so the timing is fixed and not dependent on how
> fast the code can run.
>
> > +       }
> > +       if (!timeout) {
> > +               dev_err(dev->dev, "gp soft reset time out\n");
> > +               return -ETIMEDOUT;
> > +       }
> > +
> > +       gp_write(INT_CLEAR, LIMA_GP_IRQ_MASK_ALL);
> > +       gp_write(INT_MASK, LIMA_GP_IRQ_MASK_USED);
> > +
> > +       ip->data.async_reset = false;
> > +       return 0;
> > +}
>
> > diff --git a/drivers/gpu/drm/lima/lima_l2_cache.c b/drivers/gpu/drm/lima/lima_l2_cache.c
> > new file mode 100644
> > index 000000000000..e7cdec720e5d
> > --- /dev/null
> > +++ b/drivers/gpu/drm/lima/lima_l2_cache.c
> > @@ -0,0 +1,79 @@
> > +// SPDX-License-Identifier: GPL-2.0 OR MIT
> > +/* Copyright 2017-2018 Qiang Yu <yuq825@gmail.com> */
> > +
> > +#include <linux/io.h>
> > +#include <linux/device.h>
> > +
> > +#include "lima_device.h"
> > +#include "lima_l2_cache.h"
> > +#include "lima_regs.h"
> > +
> > +#define l2_cache_write(reg, data) writel(data, ip->iomem + LIMA_L2_CACHE_##reg)
> > +#define l2_cache_read(reg) readl(ip->iomem + LIMA_L2_CACHE_##reg)
> > +
> > +static int lima_l2_cache_wait_idle(struct lima_ip *ip)
> > +{
> > +       int timeout;
> > +       struct lima_device *dev = ip->dev;
> > +
> > +       for (timeout = 100000; timeout > 0; timeout--) {
> > +           if (!(l2_cache_read(STATUS) & LIMA_L2_CACHE_STATUS_COMMAND_BUSY))
> > +               break;
>
> Use readl_poll_timeout or variant.
>
> > +       }
> > +       if (!timeout) {
> > +           dev_err(dev->dev, "l2 cache wait command timeout\n");
> > +           return -ETIMEDOUT;
> > +       }
> > +       return 0;
> > +}
> > +
> > +int lima_l2_cache_flush(struct lima_ip *ip)
> > +{
> > +       int ret;
> > +
> > +       spin_lock(&ip->data.lock);
> > +       l2_cache_write(COMMAND, LIMA_L2_CACHE_COMMAND_CLEAR_ALL);
> > +       ret = lima_l2_cache_wait_idle(ip);
> > +       spin_unlock(&ip->data.lock);
> > +       return ret;
> > +}
> > +
> > +int lima_l2_cache_init(struct lima_ip *ip)
> > +{
> > +       int i, err;
> > +       u32 size;
> > +       struct lima_device *dev = ip->dev;
> > +
> > +       /* l2_cache2 only exists when one of PP4-7 present */
> > +       if (ip->id == lima_ip_l2_cache2) {
> > +               for (i = lima_ip_pp4; i <= lima_ip_pp7; i++) {
> > +                       if (dev->ip[i].present)
> > +                               break;
> > +               }
> > +               if (i > lima_ip_pp7)
> > +                       return -ENODEV;
> > +       }
> > +
> > +       spin_lock_init(&ip->data.lock);
> > +
> > +       size = l2_cache_read(SIZE);
> > +       dev_info(dev->dev, "l2 cache %uK, %u-way, %ubyte cache line, %ubit external bus\n",
> > +                1 << (((size >> 16) & 0xff) - 10),
> > +                1 << ((size >> 8) & 0xff),
> > +                1 << (size & 0xff),
> > +                1 << ((size >> 24) & 0xff));
> > +
> > +       err = lima_l2_cache_flush(ip);
> > +       if (err)
> > +               return err;
> > +
> > +       l2_cache_write(ENABLE, LIMA_L2_CACHE_ENABLE_ACCESS | LIMA_L2_CACHE_ENABLE_READ_ALLOCATE);
> > +       l2_cache_write(MAX_READS, 0x1c);
> > +
> > +       return 0;
> > +}
> > +
> > +void lima_l2_cache_fini(struct lima_ip *ip)
> > +{
> > +
> > +}
> > diff --git a/drivers/gpu/drm/lima/lima_l2_cache.h b/drivers/gpu/drm/lima/lima_l2_cache.h
> > new file mode 100644
> > index 000000000000..2ff91eafefbe
> > --- /dev/null
> > +++ b/drivers/gpu/drm/lima/lima_l2_cache.h
> > @@ -0,0 +1,14 @@
> > +/* SPDX-License-Identifier: GPL-2.0 OR MIT */
> > +/* Copyright 2017-2018 Qiang Yu <yuq825@gmail.com> */
> > +
> > +#ifndef __LIMA_L2_CACHE_H__
> > +#define __LIMA_L2_CACHE_H__
> > +
> > +struct lima_ip;
> > +
> > +int lima_l2_cache_init(struct lima_ip *ip);
> > +void lima_l2_cache_fini(struct lima_ip *ip);
> > +
> > +int lima_l2_cache_flush(struct lima_ip *ip);
> > +
> > +#endif
> > diff --git a/drivers/gpu/drm/lima/lima_mmu.c b/drivers/gpu/drm/lima/lima_mmu.c
> > new file mode 100644
> > index 000000000000..234fb90a4285
> > --- /dev/null
> > +++ b/drivers/gpu/drm/lima/lima_mmu.c
> > @@ -0,0 +1,135 @@
> > +// SPDX-License-Identifier: GPL-2.0 OR MIT
> > +/* Copyright 2017-2018 Qiang Yu <yuq825@gmail.com> */
> > +
> > +#include <linux/interrupt.h>
> > +#include <linux/io.h>
> > +#include <linux/device.h>
> > +
> > +#include "lima_device.h"
> > +#include "lima_mmu.h"
> > +#include "lima_vm.h"
> > +#include "lima_object.h"
> > +#include "lima_regs.h"
> > +
> > +#define mmu_write(reg, data) writel(data, ip->iomem + LIMA_MMU_##reg)
> > +#define mmu_read(reg) readl(ip->iomem + LIMA_MMU_##reg)
> > +
> > +#define lima_mmu_send_command(command, condition)           \
> > +({                                                          \
> > +       int __timeout, __ret = 0;                            \
> > +                                                            \
> > +       mmu_write(COMMAND, command);                         \
> > +       for (__timeout = 1000; __timeout > 0; __timeout--) { \
> > +               if (condition)                               \
> > +                       break;                               \
> > +       }                                                    \
> > +       if (!__timeout) {                                    \
> > +               dev_err(dev->dev, "mmu command %x timeout\n", command); \
> > +               __ret = -ETIMEDOUT;                          \
> > +       }                                                    \
> > +       __ret;                                               \
> > +})
> > +
> > +static irqreturn_t lima_mmu_irq_handler(int irq, void *data)
> > +{
> > +       struct lima_ip *ip = data;
> > +       struct lima_device *dev = ip->dev;
> > +       u32 status = mmu_read(INT_STATUS);
> > +       struct lima_sched_pipe *pipe;
> > +
> > +       /* for shared irq case */
> > +       if (!status)
>
> Can status have masked irq's? If so, you should be masking out the
> disabled irq bits.
>
> > +               return IRQ_NONE;
> > +
> > +       if (status & LIMA_MMU_INT_PAGE_FAULT) {
> > +               u32 fault = mmu_read(PAGE_FAULT_ADDR);
> > +               dev_err(dev->dev, "mmu page fault at 0x%x from bus id %d of type %s on %s\n",
> > +                       fault, LIMA_MMU_STATUS_BUS_ID(status),
> > +                       status & LIMA_MMU_STATUS_PAGE_FAULT_IS_WRITE ? "write" : "read",
> > +                       lima_ip_name(ip));
> > +       }
> > +
> > +       if (status & LIMA_MMU_INT_READ_BUS_ERROR) {
> > +               dev_err(dev->dev, "mmu %s irq bus error\n", lima_ip_name(ip));
> > +       }
> > +
> > +       /* mask all interrupts before resume */
> > +       mmu_write(INT_MASK, 0);
> > +       mmu_write(INT_CLEAR, status);
> > +
> > +       pipe = dev->pipe + (ip->id == lima_ip_gpmmu ? lima_pipe_gp : lima_pipe_pp);
> > +       lima_sched_pipe_mmu_error(pipe);
> > +
> > +       return IRQ_HANDLED;
> > +}
>
>
> > +
> > +unsigned long lima_timeout_to_jiffies(u64 timeout_ns)
>
> Create a common helper instead of copy-n-pasting this from other
> drivers (etnaviv).
>
> > +{
> > +       unsigned long timeout_jiffies;
> > +       ktime_t timeout;
> > +
> > +       /* clamp timeout if it's to large */
> > +       if (((s64)timeout_ns) < 0)
> > +               return MAX_SCHEDULE_TIMEOUT;
> > +
> > +       timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get());
> > +       if (ktime_to_ns(timeout) < 0)
> > +               return 0;
> > +
> > +       timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout));
> > +       /*  clamp timeout to avoid unsigned-> signed overflow */
> > +       if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT )
> > +               return MAX_SCHEDULE_TIMEOUT;
> > +
> > +       return timeout_jiffies;
> > +}
> > +
> > +void lima_sched_pipe_task_done(struct lima_sched_pipe *pipe)
> > +{
> > +       if (pipe->error)
> > +               schedule_work(&pipe->error_work);
> > +       else {
> > +               struct lima_sched_task *task = pipe->current_task;
> > +
> > +               pipe->task_fini(pipe);
> > +               dma_fence_signal(task->fence);
> > +       }
> > +}
>
> > diff --git a/drivers/gpu/drm/lima/lima_vm.c b/drivers/gpu/drm/lima/lima_vm.c
> > new file mode 100644
> > index 000000000000..a264f3ae83fe
> > --- /dev/null
> > +++ b/drivers/gpu/drm/lima/lima_vm.c
> > @@ -0,0 +1,354 @@
> > +// SPDX-License-Identifier: GPL-2.0 OR MIT
> > +/* Copyright 2017-2018 Qiang Yu <yuq825@gmail.com> */
> > +
> > +#include <linux/slab.h>
> > +#include <linux/dma-mapping.h>
> > +#include <linux/interval_tree_generic.h>
> > +
> > +#include "lima_device.h"
> > +#include "lima_vm.h"
> > +#include "lima_object.h"
> > +#include "lima_regs.h"
> > +
> > +struct lima_bo_va_mapping {
> > +       struct list_head list;
> > +       struct rb_node rb;
> > +       uint32_t start;
> > +       uint32_t last;
> > +       uint32_t __subtree_last;
> > +};
> > +
> > +struct lima_bo_va {
> > +       struct list_head list;
> > +       unsigned ref_count;
> > +
> > +       struct list_head mapping;
> > +
> > +       struct lima_vm *vm;
> > +};
> > +
> > +#define LIMA_VM_PD_SHIFT 22
> > +#define LIMA_VM_PT_SHIFT 12
> > +#define LIMA_VM_PB_SHIFT (LIMA_VM_PD_SHIFT + LIMA_VM_NUM_PT_PER_BT_SHIFT)
> > +#define LIMA_VM_BT_SHIFT LIMA_VM_PT_SHIFT
> > +
> > +#define LIMA_VM_PT_MASK ((1 << LIMA_VM_PD_SHIFT) - 1)
> > +#define LIMA_VM_BT_MASK ((1 << LIMA_VM_PB_SHIFT) - 1)
> > +
> > +#define LIMA_PDE(va) (va >> LIMA_VM_PD_SHIFT)
> > +#define LIMA_PTE(va) ((va & LIMA_VM_PT_MASK) >> LIMA_VM_PT_SHIFT)
> > +#define LIMA_PBE(va) (va >> LIMA_VM_PB_SHIFT)
> > +#define LIMA_BTE(va) ((va & LIMA_VM_BT_MASK) >> LIMA_VM_BT_SHIFT)
> > +
> > +#define START(node) ((node)->start)
> > +#define LAST(node) ((node)->last)
> > +
> > +INTERVAL_TREE_DEFINE(struct lima_bo_va_mapping, rb, uint32_t, __subtree_last,
> > +                    START, LAST, static, lima_vm_it)
> > +
> > +#undef START
> > +#undef LAST
> > +
> > +static void lima_vm_unmap_page_table(struct lima_vm *vm, u32 start, u32 end)
> > +{
> > +       u32 addr;
> > +
> > +       for (addr = start; addr <= end; addr += LIMA_PAGE_SIZE) {
> > +               u32 pbe = LIMA_PBE(addr);
> > +               u32 bte = LIMA_BTE(addr);
> > +               u32 *bt;
> > +
> > +               bt = lima_bo_kmap(vm->bts[pbe]);
> > +               bt[bte] = 0;
> > +       }
> > +}
> > +
> > +static int lima_vm_map_page_table(struct lima_vm *vm, dma_addr_t *dma,
> > +                                 u32 start, u32 end)
> > +{
> > +       u64 addr;
> > +       int err, i = 0;
> > +
> > +       for (addr = start; addr <= end; addr += LIMA_PAGE_SIZE) {
> > +               u32 pbe = LIMA_PBE(addr);
> > +               u32 bte = LIMA_BTE(addr);
> > +               u32 *bt;
> > +
> > +               if (vm->bts[pbe])
> > +                       bt = lima_bo_kmap(vm->bts[pbe]);
> > +               else {
> > +                       struct lima_bo *bt_bo;
> > +                       dma_addr_t *pts;
> > +                       u32 *pd;
> > +                       int j;
> > +
> > +                       bt_bo = lima_bo_create(
> > +                               vm->dev, LIMA_PAGE_SIZE << LIMA_VM_NUM_PT_PER_BT_SHIFT,
> > +                               0, ttm_bo_type_kernel,
> > +                               NULL, vm->pd->tbo.resv);
>
> I don't think using BOs for page tables buys you anything. You could
> just use the kernel DMA API directly. See io-pgtable-arm-v7s.c for
> inspiration. For panfrost, it's standard ARM format page tables so we
> can just use the io-pgtable library.
>
> > +                       if (IS_ERR(bt_bo)) {
> > +                               err = PTR_ERR(bt_bo);
> > +                               goto err_out;
> > +                       }
> > +
> > +                       bt = lima_bo_kmap(bt_bo);
> > +                       if (IS_ERR(bt)) {
> > +                               lima_bo_unref(bt_bo);
> > +                               err = PTR_ERR(bt);
> > +                               goto err_out;
> > +                       }
> > +                       memset(bt, 0, LIMA_PAGE_SIZE << LIMA_VM_NUM_PT_PER_BT_SHIFT);
> > +
> > +                       vm->bts[pbe] = bt_bo;
> > +                       pd = lima_bo_kmap(vm->pd);
> > +                       pd += pbe << LIMA_VM_NUM_PT_PER_BT_SHIFT;
> > +                       pts = lima_bo_get_pages(bt_bo);
> > +                       for (j = 0; j < LIMA_VM_NUM_PT_PER_BT; j++)
> > +                               *pd++ = *pts++ | LIMA_VM_FLAG_PRESENT;
> > +               }
> > +
> > +               bt[bte] = dma[i++] | LIMA_VM_FLAGS_CACHE;
> > +       }
> > +
> > +       return 0;
> > +
> > +err_out:
> > +       if (addr != start)
> > +               lima_vm_unmap_page_table(vm, start, addr - 1);
> > +       return err;
> > +}
> > +
> > +static struct lima_bo_va *
> > +lima_vm_bo_find(struct lima_vm *vm, struct lima_bo *bo)
> > +{
> > +       struct lima_bo_va *bo_va, *ret = NULL;
> > +
> > +       list_for_each_entry(bo_va, &bo->va, list) {
> > +               if (bo_va->vm == vm) {
> > +                       ret = bo_va;
> > +                       break;
> > +               }
> > +       }
> > +
> > +       return ret;
> > +}
> > +
> > +int lima_vm_bo_map(struct lima_vm *vm, struct lima_bo *bo, u32 start)
> > +{
> > +       int err;
> > +       struct lima_bo_va_mapping *it, *mapping;
> > +       u32 end = start + bo->gem.size - 1;
> > +       dma_addr_t *pages_dma = lima_bo_get_pages(bo);
> > +       struct lima_bo_va *bo_va;
> > +
> > +       it = lima_vm_it_iter_first(&vm->va, start, end);
> > +       if (it) {
> > +               dev_dbg(bo->gem.dev->dev, "lima vm map va overlap %x-%x %x-%x\n",
> > +                       start, end, it->start, it->last);
> > +               return -EINVAL;
> > +       }
> > +
> > +       mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
> > +       if (!mapping)
> > +               return -ENOMEM;
> > +       mapping->start = start;
> > +       mapping->last = end;
>
> Why don't you use the drm_mm_XX APIs instead of writing your own?
>
> > +
> > +       err = lima_vm_map_page_table(vm, pages_dma, start, end);
> > +       if (err) {
> > +               kfree(mapping);
> > +               return err;
> > +       }
> > +
> > +       lima_vm_it_insert(mapping, &vm->va);
> > +
> > +       bo_va = lima_vm_bo_find(vm, bo);
> > +       list_add_tail(&mapping->list, &bo_va->mapping);
> > +
> > +       return 0;
> > +}
> _______________________________________________
> dri-devel mailing list
> dri-devel@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/dri-devel
kernel test robot via dri-devel Feb. 12, 2019, 8:04 p.m. UTC | #6
On Tue, Feb 12, 2019 at 10:24 AM Alex Deucher <alexdeucher@gmail.com> wrote:
>
> On Tue, Feb 12, 2019 at 10:53 AM Rob Herring via dri-devel
> <dri-devel@lists.freedesktop.org> wrote:
> >
> > On Wed, Feb 6, 2019 at 7:16 AM Qiang Yu <yuq825@gmail.com> wrote:
> > >
> > > From: Lima Project Developers <lima@lists.freedesktop.org>

[...]

> > > +static int lima_ioctl_gem_va(struct drm_device *dev, void *data, struct drm_file *file)
> > > +{
> > > +       struct drm_lima_gem_va *args = data;
> > > +
> > > +       switch (args->op) {
> > > +       case LIMA_VA_OP_MAP:
> > > +               return lima_gem_va_map(file, args->handle, args->flags, args->va);
> > > +       case LIMA_VA_OP_UNMAP:
> > > +               return lima_gem_va_unmap(file, args->handle, args->va);
> >
> > These are mapping to GPU VA. Why not do that on GEM object creation or
> > import or when the objects are submitted with cmd queue as other
> > drivers do?
> >
> > To put it another way, These ioctls look different than what other
> > drivers do. Why do you need to do things differently? My understanding
> > is best practice is to map and return the GPU offset when the GEM
> > object is created. This is what v3d does. I think Intel is moving to
> > that. And panfrost will do that.
>
> I think it would be a good idea to look at the amdgpu driver.  This
> driver is heavily modeled after it.  Basically the GEM VA ioctl allows
> userspace to manage per process (per fd really) virtual addresses.

Why do you want userspace to manage assigning VAs versus the kernel to
do so? Exposing that detail to userspace means the driver must support
a per process address space. Letting the kernel assign addresses means
it can either be a single address space or be a per process address
space. It seems to me more flexible to allow the kernel driver to
evolve without that ABI.

With any new driver in the kernel, the question is always which
existing one is the best model to follow. I don't think Intel, AMD or
Nouveau are good examples to follow because they have a lot of history
and legacy, are both GPU and DC, and have separate graphics memory
(except Intel I guess). The GPUs in ARM land have none of those
really. Looking thru freedreno, etnaviv, and v3d mostly, I see they
all have similar user ABIs. But they are all different based on what
driver they copied and how they've evolved. I know it's a big can of
worms, but it would be nice to have some alignment of ABIs. I know the
reasons why there isn't, but it's frustrating that 11 out of 60K IGT
tests will run. I don't think a common ABI matters much for the big 3,
but in the ARM zoo I think it does. At least if the interfaces are
kept similar, then having common code shared among the embedded GPUs
would be easier and writing some IGT shim for each driver would be
easier.


Rob
kernel test robot via dri-devel Feb. 13, 2019, 12:56 a.m. UTC | #7
On Tue, Feb 12, 2019 at 11:47 PM Rob Herring <robh@kernel.org> wrote:
>
> On Wed, Feb 6, 2019 at 7:16 AM Qiang Yu <yuq825@gmail.com> wrote:
> >
> > From: Lima Project Developers <lima@lists.freedesktop.org>
>
> This should be a person (you).
>
> > Signed-off-by: Andreas Baierl <ichgeh@imkreisrum.de>
> > Signed-off-by: Erico Nunes <nunes.erico@gmail.com>
> > Signed-off-by: Heiko Stuebner <heiko@sntech.de>
> > Signed-off-by: Marek Vasut <marex@denx.de>
> > Signed-off-by: Neil Armstrong <narmstrong@baylibre.com>
> > Signed-off-by: Qiang Yu <yuq825@gmail.com>
>
> Being the submitter, your S-o-b should be last.
>
> > Signed-off-by: Simon Shields <simon@lineageos.org>
> > Signed-off-by: Vasily Khoruzhick <anarsoul@gmail.com>
> > ---
>
> > diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
> > index 4385f00e1d05..dfefcb393858 100644
> > --- a/drivers/gpu/drm/Kconfig
> > +++ b/drivers/gpu/drm/Kconfig
> > @@ -333,6 +333,8 @@ source "drivers/gpu/drm/tve200/Kconfig"
> >
> >  source "drivers/gpu/drm/xen/Kconfig"
> >
> > +source "drivers/gpu/drm/lima/Kconfig"
> > +
> >  # Keep legacy drivers last
> >
> >  menuconfig DRM_LEGACY
> > diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
> > index ce8d1d384319..8d024b729902 100644
> > --- a/drivers/gpu/drm/Makefile
> > +++ b/drivers/gpu/drm/Makefile
> > @@ -109,3 +109,4 @@ obj-$(CONFIG_DRM_TINYDRM) += tinydrm/
> >  obj-$(CONFIG_DRM_PL111) += pl111/
> >  obj-$(CONFIG_DRM_TVE200) += tve200/
> >  obj-$(CONFIG_DRM_XEN) += xen/
> > +obj-$(CONFIG_DRM_LIMA)  += lima/
>
> Not sure about this file, but normally these should be kept sorted.
>
> > diff --git a/drivers/gpu/drm/lima/lima_bcast.c b/drivers/gpu/drm/lima/lima_bcast.c
> > new file mode 100644
> > index 000000000000..63754f6465ea
> > --- /dev/null
> > +++ b/drivers/gpu/drm/lima/lima_bcast.c
> > @@ -0,0 +1,46 @@
> > +// SPDX-License-Identifier: GPL-2.0 OR MIT
> > +/* Copyright 2018 Qiang Yu <yuq825@gmail.com> */
> > +
> > +#include <linux/io.h>
> > +#include <linux/device.h>
> > +
> > +#include "lima_device.h"
> > +#include "lima_bcast.h"
> > +#include "lima_regs.h"
> > +
> > +#define bcast_write(reg, data) writel(data, ip->iomem + LIMA_BCAST_##reg)
> > +#define bcast_read(reg) readl(ip->iomem + LIMA_BCAST_##reg)
>
> There are 2 things about this I would change. Just pass in 'ip' to the
> macro so it is clear in calling functions that ip is actually used.
> Second, don't do token pasting. It is generally avoided in the kernel.
> It makes grepping the source code harder and is a pointless
> indirection.
>
> If you do both of those, then these can be static inline functions
> instead which are preferred because you get type checking.
>
> Same comment applies to all the other register accessors.
>
>
> > +struct lima_ip {
> > +       struct lima_device *dev;
> > +       enum lima_ip_id id;
> > +       bool present;
> > +
> > +       void __iomem *iomem;
> > +       int irq;
> > +
> > +       union {
> > +               /* gp/pp */
> > +               bool async_reset;
> > +               /* l2 cache */
> > +               spinlock_t lock;
>
> What happens when you need 2 elements for a sub-block. I'd make this a
> struct pointer for each IP sub-block.
Let's use sub-block latter if there comes second element.

>
> > +       } data;
> > +};
> > +
> > +enum lima_pipe_id {
> > +       lima_pipe_gp,
> > +       lima_pipe_pp,
> > +       lima_pipe_num,
> > +};
> > +
> > +struct lima_device {
> > +       struct device *dev;
> > +       struct drm_device *ddev;
> > +       struct platform_device *pdev;
> > +
> > +       enum lima_gpu_id id;
> > +       int num_pp;
> > +
> > +       void __iomem *iomem;
> > +       struct clk *clk_bus;
> > +       struct clk *clk_gpu;
> > +       struct reset_control *reset;
> > +       struct regulator *regulator;
> > +
> > +       struct lima_ip ip[lima_ip_num];
> > +       struct lima_sched_pipe pipe[lima_pipe_num];
> > +
> > +       struct lima_mman mman;
> > +
> > +       struct lima_vm *empty_vm;
> > +       uint64_t va_start;
> > +       uint64_t va_end;
> > +
> > +       u32 *dlbu_cpu;
> > +       dma_addr_t dlbu_dma;
> > +};
> > +
> > +static inline struct lima_device *
> > +to_lima_dev(struct drm_device *dev)
> > +{
> > +       return dev->dev_private;
> > +}
> > +
> > +static inline struct lima_device *
> > +ttm_to_lima_dev(struct ttm_bo_device *dev)
> > +{
> > +       return container_of(dev, struct lima_device, mman.bdev);
> > +}
> > +
> > +int lima_device_init(struct lima_device *ldev);
> > +void lima_device_fini(struct lima_device *ldev);
> > +
> > +const char *lima_ip_name(struct lima_ip *ip);
> > +
> > +#endif
> > diff --git a/drivers/gpu/drm/lima/lima_dlbu.c b/drivers/gpu/drm/lima/lima_dlbu.c
> > new file mode 100644
> > index 000000000000..6697d4ddd887
> > --- /dev/null
> > +++ b/drivers/gpu/drm/lima/lima_dlbu.c
> > @@ -0,0 +1,56 @@
> > +// SPDX-License-Identifier: GPL-2.0 OR MIT
> > +/* Copyright 2018 Qiang Yu <yuq825@gmail.com> */
> > +
> > +#include <linux/io.h>
> > +#include <linux/device.h>
> > +
> > +#include "lima_device.h"
> > +#include "lima_dlbu.h"
> > +#include "lima_vm.h"
> > +#include "lima_regs.h"
> > +
> > +#define dlbu_write(reg, data) writel(data, ip->iomem + LIMA_DLBU_##reg)
> > +#define dlbu_read(reg) readl(ip->iomem + LIMA_DLBU_##reg)
> > +
> > +void lima_dlbu_enable(struct lima_device *dev, int num_pp)
> > +{
> > +       struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp;
> > +       struct lima_ip *ip = dev->ip + lima_ip_dlbu;
> > +       int i, mask = 0;
> > +
> > +       for (i = 0; i < num_pp; i++) {
> > +               struct lima_ip *pp = pipe->processor[i];
> > +               mask |= 1 << (pp->id - lima_ip_pp0);
> > +       }
> > +
> > +       dlbu_write(PP_ENABLE_MASK, mask);
> > +}
> > +
> > +void lima_dlbu_disable(struct lima_device *dev)
> > +{
> > +       struct lima_ip *ip = dev->ip + lima_ip_dlbu;
> > +       dlbu_write(PP_ENABLE_MASK, 0);
> > +}
> > +
> > +void lima_dlbu_set_reg(struct lima_ip *ip, u32 *reg)
> > +{
> > +       dlbu_write(TLLIST_VBASEADDR, reg[0]);
> > +       dlbu_write(FB_DIM, reg[1]);
> > +       dlbu_write(TLLIST_CONF, reg[2]);
> > +       dlbu_write(START_TILE_POS, reg[3]);
> > +}
> > +
> > +int lima_dlbu_init(struct lima_ip *ip)
> > +{
> > +       struct lima_device *dev = ip->dev;
> > +
> > +       dlbu_write(MASTER_TLLIST_PHYS_ADDR, dev->dlbu_dma | 1);
> > +       dlbu_write(MASTER_TLLIST_VADDR, LIMA_VA_RESERVE_DLBU);
> > +
> > +       return 0;
> > +}
> > +
> > +void lima_dlbu_fini(struct lima_ip *ip)
> > +{
> > +
> > +}
> > diff --git a/drivers/gpu/drm/lima/lima_dlbu.h b/drivers/gpu/drm/lima/lima_dlbu.h
> > new file mode 100644
> > index 000000000000..60cba387cf30
> > --- /dev/null
> > +++ b/drivers/gpu/drm/lima/lima_dlbu.h
> > @@ -0,0 +1,18 @@
> > +/* SPDX-License-Identifier: GPL-2.0 OR MIT */
> > +/* Copyright 2018 Qiang Yu <yuq825@gmail.com> */
> > +
> > +#ifndef __LIMA_DLBU_H__
> > +#define __LIMA_DLBU_H__
> > +
> > +struct lima_ip;
> > +struct lima_device;
> > +
> > +void lima_dlbu_enable(struct lima_device *dev, int num_pp);
> > +void lima_dlbu_disable(struct lima_device *dev);
> > +
> > +void lima_dlbu_set_reg(struct lima_ip *ip, u32 *reg);
> > +
> > +int lima_dlbu_init(struct lima_ip *ip);
> > +void lima_dlbu_fini(struct lima_ip *ip);
> > +
> > +#endif
> > diff --git a/drivers/gpu/drm/lima/lima_drv.c b/drivers/gpu/drm/lima/lima_drv.c
> > new file mode 100644
> > index 000000000000..132071b9be9b
> > --- /dev/null
> > +++ b/drivers/gpu/drm/lima/lima_drv.c
> > @@ -0,0 +1,459 @@
> > +// SPDX-License-Identifier: GPL-2.0 OR MIT
> > +/* Copyright 2017-2018 Qiang Yu <yuq825@gmail.com> */
> > +
> > +#include <linux/module.h>
> > +#include <linux/of_platform.h>
> > +#include <linux/log2.h>
> > +#include <drm/drm_prime.h>
> > +#include <drm/lima_drm.h>
> > +
> > +#include "lima_drv.h"
> > +#include "lima_gem.h"
> > +#include "lima_gem_prime.h"
> > +#include "lima_vm.h"
> > +
> > +int lima_sched_timeout_ms = 0;
> > +int lima_sched_max_tasks = 32;
> > +int lima_max_mem = -1;
> > +
> > +MODULE_PARM_DESC(sched_timeout_ms, "task run timeout in ms (0 = no timeout (default))");
> > +module_param_named(sched_timeout_ms, lima_sched_timeout_ms, int, 0444);
> > +
> > +MODULE_PARM_DESC(sched_max_tasks, "max queued task num in a context (default 32)");
> > +module_param_named(sched_max_tasks, lima_sched_max_tasks, int, 0444);
> > +
> > +MODULE_PARM_DESC(max_mem, "Max memory size in MB can be used (<0 = auto)");
> > +module_param_named(max_mem, lima_max_mem, int, 0444);
> > +
> > +static int lima_ioctl_info(struct drm_device *dev, void *data, struct drm_file *file)
> > +{
> > +       struct drm_lima_info *info = data;
> > +       struct lima_device *ldev = to_lima_dev(dev);
> > +
> > +       switch (ldev->id) {
> > +       case lima_gpu_mali400:
> > +               info->gpu_id = LIMA_INFO_GPU_MALI400;
> > +               break;
> > +       case lima_gpu_mali450:
> > +               info->gpu_id = LIMA_INFO_GPU_MALI450;
> > +               break;
> > +       default:
> > +               return -ENODEV;
> > +       }
> > +       info->num_pp = ldev->pipe[lima_pipe_pp].num_processor;
> > +       info->va_start = ldev->va_start;
> > +       info->va_end = ldev->va_end;
> > +       return 0;
> > +}
> > +
> > +static int lima_ioctl_gem_create(struct drm_device *dev, void *data, struct drm_file *file)
> > +{
> > +       struct drm_lima_gem_create *args = data;
> > +
> > +       if (args->flags)
> > +               return -EINVAL;
> > +
> > +       if (args->size == 0)
> > +               return -EINVAL;
> > +
> > +       return lima_gem_create_handle(dev, file, args->size, args->flags, &args->handle);
> > +}
> > +
> > +static int lima_ioctl_gem_info(struct drm_device *dev, void *data, struct drm_file *file)
> > +{
> > +       struct drm_lima_gem_info *args = data;
> > +
> > +       return lima_gem_mmap_offset(file, args->handle, &args->offset);
> > +}
> > +
> > +static int lima_ioctl_gem_va(struct drm_device *dev, void *data, struct drm_file *file)
> > +{
> > +       struct drm_lima_gem_va *args = data;
> > +
> > +       switch (args->op) {
> > +       case LIMA_VA_OP_MAP:
> > +               return lima_gem_va_map(file, args->handle, args->flags, args->va);
> > +       case LIMA_VA_OP_UNMAP:
> > +               return lima_gem_va_unmap(file, args->handle, args->va);
>
> These are mapping to GPU VA. Why not do that on GEM object creation or
> import or when the objects are submitted with cmd queue as other
> drivers do?
>
> To put it another way, These ioctls look different than what other
> drivers do. Why do you need to do things differently? My understanding
> is best practice is to map and return the GPU offset when the GEM
> object is created. This is what v3d does. I think Intel is moving to
> that. And panfrost will do that.
>
> > +       default:
> > +               return -EINVAL;
> > +       }
> > +}
> > +
> > +static int lima_ioctl_gem_submit(struct drm_device *dev, void *data, struct drm_file *file)
> > +{
> > +       struct drm_lima_gem_submit_in *args = data;
> > +       struct lima_device *ldev = to_lima_dev(dev);
> > +       struct lima_drm_priv *priv = file->driver_priv;
> > +       struct drm_lima_gem_submit_bo *bos;
> > +       struct ttm_validate_buffer *vbs;
> > +       union drm_lima_gem_submit_dep *deps = NULL;
> > +       struct lima_sched_pipe *pipe;
> > +       struct lima_sched_task *task;
> > +       struct lima_ctx *ctx;
> > +       struct lima_submit submit = {0};
> > +       int err = 0, size;
> > +
> > +       if (args->pipe >= lima_pipe_num || args->nr_bos == 0)
> > +               return -EINVAL;
> > +
> > +       if (args->flags & ~(LIMA_SUBMIT_FLAG_EXPLICIT_FENCE |
> > +                           LIMA_SUBMIT_FLAG_SYNC_FD_OUT))
> > +               return -EINVAL;
> > +
> > +       pipe = ldev->pipe + args->pipe;
> > +       if (args->frame_size != pipe->frame_size)
> > +               return -EINVAL;
> > +
> > +       size = args->nr_bos * (sizeof(*submit.bos) + sizeof(*submit.vbs)) +
> > +               args->nr_deps * sizeof(*submit.deps);
> > +       bos = kzalloc(size, GFP_KERNEL);
> > +       if (!bos)
> > +               return -ENOMEM;
> > +
> > +       size = args->nr_bos * sizeof(*submit.bos);
> > +       if (copy_from_user(bos, u64_to_user_ptr(args->bos), size)) {
> > +               err = -EFAULT;
> > +               goto out0;
> > +       }
> > +
> > +       vbs = (void *)bos + size;
> > +
> > +       if (args->nr_deps) {
> > +               deps = (void *)vbs + args->nr_bos * sizeof(*submit.vbs);
> > +               size = args->nr_deps * sizeof(*submit.deps);
> > +               if (copy_from_user(deps, u64_to_user_ptr(args->deps), size)) {
> > +                       err = -EFAULT;
> > +                       goto out0;
> > +               }
> > +       }
> > +
> > +       task = kmem_cache_zalloc(pipe->task_slab, GFP_KERNEL);
> > +       if (!task) {
> > +               err = -ENOMEM;
> > +               goto out0;
> > +       }
> > +
> > +       task->frame = task + 1;
> > +       if (copy_from_user(task->frame, u64_to_user_ptr(args->frame), args->frame_size)) {
> > +               err = -EFAULT;
> > +               goto out1;
> > +       }
> > +
> > +       err = pipe->task_validate(pipe, task);
> > +       if (err)
> > +               goto out1;
> > +
> > +       ctx = lima_ctx_get(&priv->ctx_mgr, args->ctx);
> > +       if (!ctx) {
> > +               err = -ENOENT;
> > +               goto out1;
> > +       }
> > +
> > +       submit.pipe = args->pipe;
> > +       submit.bos = bos;
> > +       submit.vbs = vbs;
> > +       submit.nr_bos = args->nr_bos;
> > +       submit.task = task;
> > +       submit.ctx = ctx;
> > +       submit.deps = deps;
> > +       submit.nr_deps = args->nr_deps;
> > +       submit.flags = args->flags;
> > +
> > +       err = lima_gem_submit(file, &submit);
> > +       if (!err) {
> > +               struct drm_lima_gem_submit_out *out = data;
> > +               out->fence = submit.fence;
> > +               out->done = submit.done;
> > +               out->sync_fd = submit.sync_fd;
> > +       }
> > +
> > +       lima_ctx_put(ctx);
> > +out1:
> > +       if (err)
> > +               kmem_cache_free(pipe->task_slab, task);
> > +out0:
> > +       kfree(bos);
> > +       return err;
> > +}
> > +
> > +static int lima_wait_fence(struct dma_fence *fence, u64 timeout_ns)
> > +{
> > +       signed long ret;
> > +
> > +       if (!timeout_ns)
> > +               ret = dma_fence_is_signaled(fence) ? 0 : -EBUSY;
>
> I think you can just call dma_fence_wait_timeout with a 0 timeout
> below and remove this clause.
>
> > +       else {
> > +               unsigned long timeout = lima_timeout_to_jiffies(timeout_ns);
> > +
> > +               /* must use long for result check because in 64bit arch int
> > +                * will overflow if timeout is too large and get <0 result
> > +                */
> > +               ret = dma_fence_wait_timeout(fence, true, timeout);
> > +               if (ret == 0)
> > +                       ret = timeout ? -ETIMEDOUT : -EBUSY;
> > +               else if (ret > 0)
> > +                       ret = 0;
> > +       }
>
> I suspect this could be common like reservation object waits. However,
> I'm curious why lima needs this ioctl in the first place when I don't
> see the same for other drivers.
I've prepared a v2 patch set which removed this ioctl by adopt Eric's
suggestion to use drm_syncobj.

>
> > +
> > +       return ret;
> > +}
> > +
> > +static int lima_ioctl_wait_fence(struct drm_device *dev, void *data, struct drm_file *file)
> > +{
> > +       struct drm_lima_wait_fence *args = data;
> > +       struct lima_drm_priv *priv = file->driver_priv;
> > +       struct dma_fence *fence;
> > +       int err = 0;
> > +
> > +       fence = lima_ctx_get_native_fence(&priv->ctx_mgr, args->ctx,
> > +                                         args->pipe, args->seq);
> > +       if (IS_ERR(fence))
> > +               return PTR_ERR(fence);
> > +
> > +       if (fence) {
> > +               err = lima_wait_fence(fence, args->timeout_ns);
> > +               args->error = fence->error;
> > +               dma_fence_put(fence);
> > +       }
> > +       else
> > +               args->error = 0;
> > +
> > +       return err;
> > +}
> > +
> > +static int lima_ioctl_gem_wait(struct drm_device *dev, void *data, struct drm_file *file)
> > +{
> > +       struct drm_lima_gem_wait *args = data;
> > +
> > +       if (!(args->op & (LIMA_GEM_WAIT_READ|LIMA_GEM_WAIT_WRITE)))
> > +           return -EINVAL;
> > +
> > +       return lima_gem_wait(file, args->handle, args->op, args->timeout_ns);
> > +}
> > +
> > +static int lima_ioctl_ctx(struct drm_device *dev, void *data, struct drm_file *file)
> > +{
> > +       struct drm_lima_ctx *args = data;
> > +       struct lima_drm_priv *priv = file->driver_priv;
> > +       struct lima_device *ldev = to_lima_dev(dev);
> > +
> > +       if (args->op == LIMA_CTX_OP_CREATE)
> > +               return lima_ctx_create(ldev, &priv->ctx_mgr, &args->id);
> > +       else if (args->op == LIMA_CTX_OP_FREE)
> > +               return lima_ctx_free(&priv->ctx_mgr, args->id);
> > +
> > +       return -EINVAL;
> > +}
> > +
> > +static int lima_ioctl_gem_mod(struct drm_device *dev, void *data, struct drm_file *file)
> > +{
> > +       struct drm_lima_gem_mod *args = data;
> > +
> > +       if (args->op == LIMA_GEM_MOD_OP_GET)
> > +               return lima_gem_get_modifier(file, args->handle, &args->modifier);
> > +       else if (args->op == LIMA_GEM_MOD_OP_SET)
> > +               return lima_gem_set_modifier(file, args->handle, args->modifier);
> > +
> > +       return -EINVAL;
> > +}
> > +
> > +static int lima_drm_driver_open(struct drm_device *dev, struct drm_file *file)
> > +{
> > +       int err;
> > +       struct lima_drm_priv *priv;
> > +       struct lima_device *ldev = to_lima_dev(dev);
> > +
> > +       priv = kzalloc(sizeof(*priv), GFP_KERNEL);
> > +       if (!priv)
> > +               return -ENOMEM;
> > +
> > +       priv->vm = lima_vm_create(ldev);
> > +       if (!priv->vm) {
> > +               err = -ENOMEM;
> > +               goto err_out0;
> > +       }
> > +
> > +        lima_ctx_mgr_init(&priv->ctx_mgr);
> > +
> > +       file->driver_priv = priv;
> > +       return 0;
> > +
> > +err_out0:
> > +       kfree(priv);
> > +       return err;
> > +}
> > +
> > +static void lima_drm_driver_postclose(struct drm_device *dev, struct drm_file *file)
> > +{
> > +       struct lima_drm_priv *priv = file->driver_priv;
> > +
> > +       lima_ctx_mgr_fini(&priv->ctx_mgr);
> > +       lima_vm_put(priv->vm);
> > +       kfree(priv);
> > +}
> > +
> > +static const struct drm_ioctl_desc lima_drm_driver_ioctls[] = {
> > +       DRM_IOCTL_DEF_DRV(LIMA_INFO, lima_ioctl_info, DRM_AUTH|DRM_RENDER_ALLOW),
> > +       DRM_IOCTL_DEF_DRV(LIMA_GEM_CREATE, lima_ioctl_gem_create, DRM_AUTH|DRM_RENDER_ALLOW),
> > +       DRM_IOCTL_DEF_DRV(LIMA_GEM_INFO, lima_ioctl_gem_info, DRM_AUTH|DRM_RENDER_ALLOW),
> > +       DRM_IOCTL_DEF_DRV(LIMA_GEM_VA, lima_ioctl_gem_va, DRM_AUTH|DRM_RENDER_ALLOW),
> > +       DRM_IOCTL_DEF_DRV(LIMA_GEM_SUBMIT, lima_ioctl_gem_submit, DRM_AUTH|DRM_RENDER_ALLOW),
> > +       DRM_IOCTL_DEF_DRV(LIMA_WAIT_FENCE, lima_ioctl_wait_fence, DRM_AUTH|DRM_RENDER_ALLOW),
> > +       DRM_IOCTL_DEF_DRV(LIMA_GEM_WAIT, lima_ioctl_gem_wait, DRM_AUTH|DRM_RENDER_ALLOW),
> > +       DRM_IOCTL_DEF_DRV(LIMA_CTX, lima_ioctl_ctx, DRM_AUTH|DRM_RENDER_ALLOW),
> > +       DRM_IOCTL_DEF_DRV(LIMA_GEM_MOD, lima_ioctl_gem_mod, DRM_AUTH|DRM_RENDER_ALLOW),
> > +};
> > +
> > +static const struct file_operations lima_drm_driver_fops = {
> > +       .owner              = THIS_MODULE,
> > +       .open               = drm_open,
> > +       .release            = drm_release,
> > +       .unlocked_ioctl     = drm_ioctl,
> > +#ifdef CONFIG_COMPAT
> > +       .compat_ioctl       = drm_compat_ioctl,
> > +#endif
> > +       .mmap               = lima_gem_mmap,
> > +};
> > +
> > +static struct drm_driver lima_drm_driver = {
> > +       .driver_features    = DRIVER_RENDER | DRIVER_GEM | DRIVER_PRIME,
> > +       .open               = lima_drm_driver_open,
> > +       .postclose          = lima_drm_driver_postclose,
> > +       .ioctls             = lima_drm_driver_ioctls,
> > +       .num_ioctls         = ARRAY_SIZE(lima_drm_driver_ioctls),
> > +       .fops               = &lima_drm_driver_fops,
> > +       .gem_free_object_unlocked = lima_gem_free_object,
> > +       .gem_open_object    = lima_gem_object_open,
> > +       .gem_close_object   = lima_gem_object_close,
> > +       .name               = "lima",
> > +       .desc               = "lima DRM",
> > +       .date               = "20170325",
>
> Perhaps this should be updated? TBH, I don't know why this is even useful.
>
> > +       .major              = 1,
> > +       .minor              = 0,
> > +       .patchlevel         = 0,
> > +
> > +       .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
> > +       .gem_prime_import   = drm_gem_prime_import,
> > +       .gem_prime_import_sg_table = lima_gem_prime_import_sg_table,
> > +       .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
> > +       .gem_prime_export   = drm_gem_prime_export,
>
> import and export don't have to be set if you use the defaults.
>
> > +       .gem_prime_res_obj  = lima_gem_prime_res_obj,
> > +       .gem_prime_get_sg_table = lima_gem_prime_get_sg_table,
> > +       .gem_prime_vmap = lima_gem_prime_vmap,
> > +       .gem_prime_vunmap = lima_gem_prime_vunmap,
> > +       .gem_prime_mmap = lima_gem_prime_mmap,
> > +};
> > +
>
> > +int lima_gem_wait(struct drm_file *file, u32 handle, u32 op, u64 timeout_ns)
> > +{
> > +       bool write = op & LIMA_GEM_WAIT_WRITE;
> > +       struct drm_gem_object *obj;
> > +       struct lima_bo *bo;
> > +       signed long ret;
> > +       unsigned long timeout;
> > +
> > +       obj = drm_gem_object_lookup(file, handle);
> > +       if (!obj)
> > +               return -ENOENT;
> > +
> > +       bo = to_lima_bo(obj);
> > +
> > +       timeout = timeout_ns ? lima_timeout_to_jiffies(timeout_ns) : 0;
> > +
> > +       ret = lima_bo_reserve(bo, true);
> > +       if (ret)
> > +               goto out;
> > +
> > +       /* must use long for result check because in 64bit arch int
> > +        * will overflow if timeout is too large and get <0 result
> > +        */
> > +       ret = reservation_object_wait_timeout_rcu(bo->tbo.resv, write, true, timeout);
> > +       if (ret == 0)
> > +               ret = timeout ? -ETIMEDOUT : -EBUSY;
> > +       else if (ret > 0)
> > +               ret = 0;
>
> There's a helper I added for all this that should land in 5.1.
>
> > +
> > +       lima_bo_unreserve(bo);
> > +out:
> > +       drm_gem_object_put_unlocked(obj);
> > +       return ret;
> > +}
> > +
>
> > +static int lima_gp_soft_reset_async_wait(struct lima_ip *ip)
> > +{
> > +       struct lima_device *dev = ip->dev;
> > +       int timeout;
> > +
> > +       if (!ip->data.async_reset)
> > +               return 0;
> > +
> > +       for (timeout = 1000; timeout > 0; timeout--) {
> > +               if (gp_read(INT_RAWSTAT) & LIMA_GP_IRQ_RESET_COMPLETED)
> > +                       break;
>
> Use readl_poll_timeout instead of writing your own. At least add a
> udelay to the loop so the timing is fixed and not dependent on how
> fast the code can run.
>
> > +       }
> > +       if (!timeout) {
> > +               dev_err(dev->dev, "gp soft reset time out\n");
> > +               return -ETIMEDOUT;
> > +       }
> > +
> > +       gp_write(INT_CLEAR, LIMA_GP_IRQ_MASK_ALL);
> > +       gp_write(INT_MASK, LIMA_GP_IRQ_MASK_USED);
> > +
> > +       ip->data.async_reset = false;
> > +       return 0;
> > +}
>
> > diff --git a/drivers/gpu/drm/lima/lima_l2_cache.c b/drivers/gpu/drm/lima/lima_l2_cache.c
> > new file mode 100644
> > index 000000000000..e7cdec720e5d
> > --- /dev/null
> > +++ b/drivers/gpu/drm/lima/lima_l2_cache.c
> > @@ -0,0 +1,79 @@
> > +// SPDX-License-Identifier: GPL-2.0 OR MIT
> > +/* Copyright 2017-2018 Qiang Yu <yuq825@gmail.com> */
> > +
> > +#include <linux/io.h>
> > +#include <linux/device.h>
> > +
> > +#include "lima_device.h"
> > +#include "lima_l2_cache.h"
> > +#include "lima_regs.h"
> > +
> > +#define l2_cache_write(reg, data) writel(data, ip->iomem + LIMA_L2_CACHE_##reg)
> > +#define l2_cache_read(reg) readl(ip->iomem + LIMA_L2_CACHE_##reg)
> > +
> > +static int lima_l2_cache_wait_idle(struct lima_ip *ip)
> > +{
> > +       int timeout;
> > +       struct lima_device *dev = ip->dev;
> > +
> > +       for (timeout = 100000; timeout > 0; timeout--) {
> > +           if (!(l2_cache_read(STATUS) & LIMA_L2_CACHE_STATUS_COMMAND_BUSY))
> > +               break;
>
> Use readl_poll_timeout or variant.
>
> > +       }
> > +       if (!timeout) {
> > +           dev_err(dev->dev, "l2 cache wait command timeout\n");
> > +           return -ETIMEDOUT;
> > +       }
> > +       return 0;
> > +}
> > +
> > +int lima_l2_cache_flush(struct lima_ip *ip)
> > +{
> > +       int ret;
> > +
> > +       spin_lock(&ip->data.lock);
> > +       l2_cache_write(COMMAND, LIMA_L2_CACHE_COMMAND_CLEAR_ALL);
> > +       ret = lima_l2_cache_wait_idle(ip);
> > +       spin_unlock(&ip->data.lock);
> > +       return ret;
> > +}
> > +
> > +int lima_l2_cache_init(struct lima_ip *ip)
> > +{
> > +       int i, err;
> > +       u32 size;
> > +       struct lima_device *dev = ip->dev;
> > +
> > +       /* l2_cache2 only exists when one of PP4-7 present */
> > +       if (ip->id == lima_ip_l2_cache2) {
> > +               for (i = lima_ip_pp4; i <= lima_ip_pp7; i++) {
> > +                       if (dev->ip[i].present)
> > +                               break;
> > +               }
> > +               if (i > lima_ip_pp7)
> > +                       return -ENODEV;
> > +       }
> > +
> > +       spin_lock_init(&ip->data.lock);
> > +
> > +       size = l2_cache_read(SIZE);
> > +       dev_info(dev->dev, "l2 cache %uK, %u-way, %ubyte cache line, %ubit external bus\n",
> > +                1 << (((size >> 16) & 0xff) - 10),
> > +                1 << ((size >> 8) & 0xff),
> > +                1 << (size & 0xff),
> > +                1 << ((size >> 24) & 0xff));
> > +
> > +       err = lima_l2_cache_flush(ip);
> > +       if (err)
> > +               return err;
> > +
> > +       l2_cache_write(ENABLE, LIMA_L2_CACHE_ENABLE_ACCESS | LIMA_L2_CACHE_ENABLE_READ_ALLOCATE);
> > +       l2_cache_write(MAX_READS, 0x1c);
> > +
> > +       return 0;
> > +}
> > +
> > +void lima_l2_cache_fini(struct lima_ip *ip)
> > +{
> > +
> > +}
> > diff --git a/drivers/gpu/drm/lima/lima_l2_cache.h b/drivers/gpu/drm/lima/lima_l2_cache.h
> > new file mode 100644
> > index 000000000000..2ff91eafefbe
> > --- /dev/null
> > +++ b/drivers/gpu/drm/lima/lima_l2_cache.h
> > @@ -0,0 +1,14 @@
> > +/* SPDX-License-Identifier: GPL-2.0 OR MIT */
> > +/* Copyright 2017-2018 Qiang Yu <yuq825@gmail.com> */
> > +
> > +#ifndef __LIMA_L2_CACHE_H__
> > +#define __LIMA_L2_CACHE_H__
> > +
> > +struct lima_ip;
> > +
> > +int lima_l2_cache_init(struct lima_ip *ip);
> > +void lima_l2_cache_fini(struct lima_ip *ip);
> > +
> > +int lima_l2_cache_flush(struct lima_ip *ip);
> > +
> > +#endif
> > diff --git a/drivers/gpu/drm/lima/lima_mmu.c b/drivers/gpu/drm/lima/lima_mmu.c
> > new file mode 100644
> > index 000000000000..234fb90a4285
> > --- /dev/null
> > +++ b/drivers/gpu/drm/lima/lima_mmu.c
> > @@ -0,0 +1,135 @@
> > +// SPDX-License-Identifier: GPL-2.0 OR MIT
> > +/* Copyright 2017-2018 Qiang Yu <yuq825@gmail.com> */
> > +
> > +#include <linux/interrupt.h>
> > +#include <linux/io.h>
> > +#include <linux/device.h>
> > +
> > +#include "lima_device.h"
> > +#include "lima_mmu.h"
> > +#include "lima_vm.h"
> > +#include "lima_object.h"
> > +#include "lima_regs.h"
> > +
> > +#define mmu_write(reg, data) writel(data, ip->iomem + LIMA_MMU_##reg)
> > +#define mmu_read(reg) readl(ip->iomem + LIMA_MMU_##reg)
> > +
> > +#define lima_mmu_send_command(command, condition)           \
> > +({                                                          \
> > +       int __timeout, __ret = 0;                            \
> > +                                                            \
> > +       mmu_write(COMMAND, command);                         \
> > +       for (__timeout = 1000; __timeout > 0; __timeout--) { \
> > +               if (condition)                               \
> > +                       break;                               \
> > +       }                                                    \
> > +       if (!__timeout) {                                    \
> > +               dev_err(dev->dev, "mmu command %x timeout\n", command); \
> > +               __ret = -ETIMEDOUT;                          \
> > +       }                                                    \
> > +       __ret;                                               \
> > +})
> > +
> > +static irqreturn_t lima_mmu_irq_handler(int irq, void *data)
> > +{
> > +       struct lima_ip *ip = data;
> > +       struct lima_device *dev = ip->dev;
> > +       u32 status = mmu_read(INT_STATUS);
> > +       struct lima_sched_pipe *pipe;
> > +
> > +       /* for shared irq case */
> > +       if (!status)
>
> Can status have masked irq's? If so, you should be masking out the
> disabled irq bits.
>
> > +               return IRQ_NONE;
> > +
> > +       if (status & LIMA_MMU_INT_PAGE_FAULT) {
> > +               u32 fault = mmu_read(PAGE_FAULT_ADDR);
> > +               dev_err(dev->dev, "mmu page fault at 0x%x from bus id %d of type %s on %s\n",
> > +                       fault, LIMA_MMU_STATUS_BUS_ID(status),
> > +                       status & LIMA_MMU_STATUS_PAGE_FAULT_IS_WRITE ? "write" : "read",
> > +                       lima_ip_name(ip));
> > +       }
> > +
> > +       if (status & LIMA_MMU_INT_READ_BUS_ERROR) {
> > +               dev_err(dev->dev, "mmu %s irq bus error\n", lima_ip_name(ip));
> > +       }
> > +
> > +       /* mask all interrupts before resume */
> > +       mmu_write(INT_MASK, 0);
> > +       mmu_write(INT_CLEAR, status);
> > +
> > +       pipe = dev->pipe + (ip->id == lima_ip_gpmmu ? lima_pipe_gp : lima_pipe_pp);
> > +       lima_sched_pipe_mmu_error(pipe);
> > +
> > +       return IRQ_HANDLED;
> > +}
>
>
> > +
> > +unsigned long lima_timeout_to_jiffies(u64 timeout_ns)
>
> Create a common helper instead of copy-n-pasting this from other
> drivers (etnaviv).
There's one drm_timeout_abs_to_jiffies but not exported.

>
> > +{
> > +       unsigned long timeout_jiffies;
> > +       ktime_t timeout;
> > +
> > +       /* clamp timeout if it's to large */
> > +       if (((s64)timeout_ns) < 0)
> > +               return MAX_SCHEDULE_TIMEOUT;
> > +
> > +       timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get());
> > +       if (ktime_to_ns(timeout) < 0)
> > +               return 0;
> > +
> > +       timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout));
> > +       /*  clamp timeout to avoid unsigned-> signed overflow */
> > +       if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT )
> > +               return MAX_SCHEDULE_TIMEOUT;
> > +
> > +       return timeout_jiffies;
> > +}
> > +
> > +void lima_sched_pipe_task_done(struct lima_sched_pipe *pipe)
> > +{
> > +       if (pipe->error)
> > +               schedule_work(&pipe->error_work);
> > +       else {
> > +               struct lima_sched_task *task = pipe->current_task;
> > +
> > +               pipe->task_fini(pipe);
> > +               dma_fence_signal(task->fence);
> > +       }
> > +}
>
> > diff --git a/drivers/gpu/drm/lima/lima_vm.c b/drivers/gpu/drm/lima/lima_vm.c
> > new file mode 100644
> > index 000000000000..a264f3ae83fe
> > --- /dev/null
> > +++ b/drivers/gpu/drm/lima/lima_vm.c
> > @@ -0,0 +1,354 @@
> > +// SPDX-License-Identifier: GPL-2.0 OR MIT
> > +/* Copyright 2017-2018 Qiang Yu <yuq825@gmail.com> */
> > +
> > +#include <linux/slab.h>
> > +#include <linux/dma-mapping.h>
> > +#include <linux/interval_tree_generic.h>
> > +
> > +#include "lima_device.h"
> > +#include "lima_vm.h"
> > +#include "lima_object.h"
> > +#include "lima_regs.h"
> > +
> > +struct lima_bo_va_mapping {
> > +       struct list_head list;
> > +       struct rb_node rb;
> > +       uint32_t start;
> > +       uint32_t last;
> > +       uint32_t __subtree_last;
> > +};
> > +
> > +struct lima_bo_va {
> > +       struct list_head list;
> > +       unsigned ref_count;
> > +
> > +       struct list_head mapping;
> > +
> > +       struct lima_vm *vm;
> > +};
> > +
> > +#define LIMA_VM_PD_SHIFT 22
> > +#define LIMA_VM_PT_SHIFT 12
> > +#define LIMA_VM_PB_SHIFT (LIMA_VM_PD_SHIFT + LIMA_VM_NUM_PT_PER_BT_SHIFT)
> > +#define LIMA_VM_BT_SHIFT LIMA_VM_PT_SHIFT
> > +
> > +#define LIMA_VM_PT_MASK ((1 << LIMA_VM_PD_SHIFT) - 1)
> > +#define LIMA_VM_BT_MASK ((1 << LIMA_VM_PB_SHIFT) - 1)
> > +
> > +#define LIMA_PDE(va) (va >> LIMA_VM_PD_SHIFT)
> > +#define LIMA_PTE(va) ((va & LIMA_VM_PT_MASK) >> LIMA_VM_PT_SHIFT)
> > +#define LIMA_PBE(va) (va >> LIMA_VM_PB_SHIFT)
> > +#define LIMA_BTE(va) ((va & LIMA_VM_BT_MASK) >> LIMA_VM_BT_SHIFT)
> > +
> > +#define START(node) ((node)->start)
> > +#define LAST(node) ((node)->last)
> > +
> > +INTERVAL_TREE_DEFINE(struct lima_bo_va_mapping, rb, uint32_t, __subtree_last,
> > +                    START, LAST, static, lima_vm_it)
> > +
> > +#undef START
> > +#undef LAST
> > +
> > +static void lima_vm_unmap_page_table(struct lima_vm *vm, u32 start, u32 end)
> > +{
> > +       u32 addr;
> > +
> > +       for (addr = start; addr <= end; addr += LIMA_PAGE_SIZE) {
> > +               u32 pbe = LIMA_PBE(addr);
> > +               u32 bte = LIMA_BTE(addr);
> > +               u32 *bt;
> > +
> > +               bt = lima_bo_kmap(vm->bts[pbe]);
> > +               bt[bte] = 0;
> > +       }
> > +}
> > +
> > +static int lima_vm_map_page_table(struct lima_vm *vm, dma_addr_t *dma,
> > +                                 u32 start, u32 end)
> > +{
> > +       u64 addr;
> > +       int err, i = 0;
> > +
> > +       for (addr = start; addr <= end; addr += LIMA_PAGE_SIZE) {
> > +               u32 pbe = LIMA_PBE(addr);
> > +               u32 bte = LIMA_BTE(addr);
> > +               u32 *bt;
> > +
> > +               if (vm->bts[pbe])
> > +                       bt = lima_bo_kmap(vm->bts[pbe]);
> > +               else {
> > +                       struct lima_bo *bt_bo;
> > +                       dma_addr_t *pts;
> > +                       u32 *pd;
> > +                       int j;
> > +
> > +                       bt_bo = lima_bo_create(
> > +                               vm->dev, LIMA_PAGE_SIZE << LIMA_VM_NUM_PT_PER_BT_SHIFT,
> > +                               0, ttm_bo_type_kernel,
> > +                               NULL, vm->pd->tbo.resv);
>
> I don't think using BOs for page tables buys you anything. You could
> just use the kernel DMA API directly. See io-pgtable-arm-v7s.c for
> inspiration. For panfrost, it's standard ARM format page tables so we
> can just use the io-pgtable library.
Right, v2 will use DMA API directly.

>
> > +                       if (IS_ERR(bt_bo)) {
> > +                               err = PTR_ERR(bt_bo);
> > +                               goto err_out;
> > +                       }
> > +
> > +                       bt = lima_bo_kmap(bt_bo);
> > +                       if (IS_ERR(bt)) {
> > +                               lima_bo_unref(bt_bo);
> > +                               err = PTR_ERR(bt);
> > +                               goto err_out;
> > +                       }
> > +                       memset(bt, 0, LIMA_PAGE_SIZE << LIMA_VM_NUM_PT_PER_BT_SHIFT);
> > +
> > +                       vm->bts[pbe] = bt_bo;
> > +                       pd = lima_bo_kmap(vm->pd);
> > +                       pd += pbe << LIMA_VM_NUM_PT_PER_BT_SHIFT;
> > +                       pts = lima_bo_get_pages(bt_bo);
> > +                       for (j = 0; j < LIMA_VM_NUM_PT_PER_BT; j++)
> > +                               *pd++ = *pts++ | LIMA_VM_FLAG_PRESENT;
> > +               }
> > +
> > +               bt[bte] = dma[i++] | LIMA_VM_FLAGS_CACHE;
> > +       }
> > +
> > +       return 0;
> > +
> > +err_out:
> > +       if (addr != start)
> > +               lima_vm_unmap_page_table(vm, start, addr - 1);
> > +       return err;
> > +}
> > +
> > +static struct lima_bo_va *
> > +lima_vm_bo_find(struct lima_vm *vm, struct lima_bo *bo)
> > +{
> > +       struct lima_bo_va *bo_va, *ret = NULL;
> > +
> > +       list_for_each_entry(bo_va, &bo->va, list) {
> > +               if (bo_va->vm == vm) {
> > +                       ret = bo_va;
> > +                       break;
> > +               }
> > +       }
> > +
> > +       return ret;
> > +}
> > +
> > +int lima_vm_bo_map(struct lima_vm *vm, struct lima_bo *bo, u32 start)
> > +{
> > +       int err;
> > +       struct lima_bo_va_mapping *it, *mapping;
> > +       u32 end = start + bo->gem.size - 1;
> > +       dma_addr_t *pages_dma = lima_bo_get_pages(bo);
> > +       struct lima_bo_va *bo_va;
> > +
> > +       it = lima_vm_it_iter_first(&vm->va, start, end);
> > +       if (it) {
> > +               dev_dbg(bo->gem.dev->dev, "lima vm map va overlap %x-%x %x-%x\n",
> > +                       start, end, it->start, it->last);
> > +               return -EINVAL;
> > +       }
> > +
> > +       mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
> > +       if (!mapping)
> > +               return -ENOMEM;
> > +       mapping->start = start;
> > +       mapping->last = end;
>
> Why don't you use the drm_mm_XX APIs instead of writing your own?
Use it in v2.

>
> > +
> > +       err = lima_vm_map_page_table(vm, pages_dma, start, end);
> > +       if (err) {
> > +               kfree(mapping);
> > +               return err;
> > +       }
> > +
> > +       lima_vm_it_insert(mapping, &vm->va);
> > +
> > +       bo_va = lima_vm_bo_find(vm, bo);
> > +       list_add_tail(&mapping->list, &bo_va->mapping);
> > +
> > +       return 0;
> > +}
kernel test robot via dri-devel Feb. 13, 2019, 1:13 a.m. UTC | #8
On Wed, Feb 13, 2019 at 4:05 AM Rob Herring <robh@kernel.org> wrote:
>
> On Tue, Feb 12, 2019 at 10:24 AM Alex Deucher <alexdeucher@gmail.com> wrote:
> >
> > On Tue, Feb 12, 2019 at 10:53 AM Rob Herring via dri-devel
> > <dri-devel@lists.freedesktop.org> wrote:
> > >
> > > On Wed, Feb 6, 2019 at 7:16 AM Qiang Yu <yuq825@gmail.com> wrote:
> > > >
> > > > From: Lima Project Developers <lima@lists.freedesktop.org>
>
> [...]
>
> > > > +static int lima_ioctl_gem_va(struct drm_device *dev, void *data, struct drm_file *file)
> > > > +{
> > > > +       struct drm_lima_gem_va *args = data;
> > > > +
> > > > +       switch (args->op) {
> > > > +       case LIMA_VA_OP_MAP:
> > > > +               return lima_gem_va_map(file, args->handle, args->flags, args->va);
> > > > +       case LIMA_VA_OP_UNMAP:
> > > > +               return lima_gem_va_unmap(file, args->handle, args->va);
> > >
> > > These are mapping to GPU VA. Why not do that on GEM object creation or
> > > import or when the objects are submitted with cmd queue as other
> > > drivers do?
> > >
> > > To put it another way, These ioctls look different than what other
> > > drivers do. Why do you need to do things differently? My understanding
> > > is best practice is to map and return the GPU offset when the GEM
> > > object is created. This is what v3d does. I think Intel is moving to
> > > that. And panfrost will do that.
> >
> > I think it would be a good idea to look at the amdgpu driver.  This
> > driver is heavily modeled after it.  Basically the GEM VA ioctl allows
> > userspace to manage per process (per fd really) virtual addresses.
>
> Why do you want userspace to manage assigning VAs versus the kernel to
> do so? Exposing that detail to userspace means the driver must support
> a per process address space. Letting the kernel assign addresses means
> it can either be a single address space or be a per process address
> space. It seems to me more flexible to allow the kernel driver to
> evolve without that ABI.
>
> With any new driver in the kernel, the question is always which
> existing one is the best model to follow. I don't think Intel, AMD or
> Nouveau are good examples to follow because they have a lot of history
> and legacy, are both GPU and DC, and have separate graphics memory
> (except Intel I guess). The GPUs in ARM land have none of those
> really. Looking thru freedreno, etnaviv, and v3d mostly, I see they
> all have similar user ABIs. But they are all different based on what
> driver they copied and how they've evolved. I know it's a big can of
> worms, but it would be nice to have some alignment of ABIs. I know the
> reasons why there isn't, but it's frustrating that 11 out of 60K IGT
> tests will run. I don't think a common ABI matters much for the big 3,
> but in the ARM zoo I think it does. At least if the interfaces are
> kept similar, then having common code shared among the embedded GPUs
> would be easier and writing some IGT shim for each driver would be
> easier.
I admit the userspace VA management is not suitable and overfit to GPU
like Mali4xx which does not have complicated SW/HW like AMD GPU.
It needs some extra effort to make it work but bings no seen benefit
to lima driver.

So I do VA map when BO creation in v2 now. And you may find the v2
driver is closer to the ARM world GPU driver which also uses GEM+shmem
way instead of TTM:
https://gitlab.freedesktop.org/lima/linux/commits/lima-5.0-rc6

But due to the change is huge, I'll send it here for review latter this
week for more testing and address other of your review comments.

Thanks,
Qiang

>
>
> Rob
kernel test robot via dri-devel Feb. 13, 2019, 1:46 a.m. UTC | #9
On Tue, Feb 12, 2019 at 6:56 PM Qiang Yu <yuq825@gmail.com> wrote:
>
> On Tue, Feb 12, 2019 at 11:47 PM Rob Herring <robh@kernel.org> wrote:
> >
> > On Wed, Feb 6, 2019 at 7:16 AM Qiang Yu <yuq825@gmail.com> wrote:
> > >
> > > From: Lima Project Developers <lima@lists.freedesktop.org>
> >
> > This should be a person (you).
> >
> > > Signed-off-by: Andreas Baierl <ichgeh@imkreisrum.de>
> > > Signed-off-by: Erico Nunes <nunes.erico@gmail.com>
> > > Signed-off-by: Heiko Stuebner <heiko@sntech.de>
> > > Signed-off-by: Marek Vasut <marex@denx.de>
> > > Signed-off-by: Neil Armstrong <narmstrong@baylibre.com>
> > > Signed-off-by: Qiang Yu <yuq825@gmail.com>
> >
> > Being the submitter, your S-o-b should be last.
> >
> > > Signed-off-by: Simon Shields <simon@lineageos.org>
> > > Signed-off-by: Vasily Khoruzhick <anarsoul@gmail.com>
> > > ---

> > > +
> > > +unsigned long lima_timeout_to_jiffies(u64 timeout_ns)
> >
> > Create a common helper instead of copy-n-pasting this from other
> > drivers (etnaviv).
> There's one drm_timeout_abs_to_jiffies but not exported.

Then let's export it. We don't work around kernel APIs or lack of. We fix them.


Rob
kernel test robot via dri-devel Feb. 14, 2019, 2:52 a.m. UTC | #10
I typed this up yesterday, but it looks like I never hit send.


On Tue, Feb 12, 2019 at 3:05 PM Rob Herring <robh@kernel.org> wrote:
>
> On Tue, Feb 12, 2019 at 10:24 AM Alex Deucher <alexdeucher@gmail.com> wrote:
> >
> > On Tue, Feb 12, 2019 at 10:53 AM Rob Herring via dri-devel
> > <dri-devel@lists.freedesktop.org> wrote:
> > >
> > > On Wed, Feb 6, 2019 at 7:16 AM Qiang Yu <yuq825@gmail.com> wrote:
> > > >
> > > > From: Lima Project Developers <lima@lists.freedesktop.org>
>
> [...]
>
> > > > +static int lima_ioctl_gem_va(struct drm_device *dev, void *data, struct drm_file *file)
> > > > +{
> > > > +       struct drm_lima_gem_va *args = data;
> > > > +
> > > > +       switch (args->op) {
> > > > +       case LIMA_VA_OP_MAP:
> > > > +               return lima_gem_va_map(file, args->handle, args->flags, args->va);
> > > > +       case LIMA_VA_OP_UNMAP:
> > > > +               return lima_gem_va_unmap(file, args->handle, args->va);
> > >
> > > These are mapping to GPU VA. Why not do that on GEM object creation or
> > > import or when the objects are submitted with cmd queue as other
> > > drivers do?
> > >
> > > To put it another way, These ioctls look different than what other
> > > drivers do. Why do you need to do things differently? My understanding
> > > is best practice is to map and return the GPU offset when the GEM
> > > object is created. This is what v3d does. I think Intel is moving to
> > > that. And panfrost will do that.
> >
> > I think it would be a good idea to look at the amdgpu driver.  This
> > driver is heavily modeled after it.  Basically the GEM VA ioctl allows
> > userspace to manage per process (per fd really) virtual addresses.
>
> Why do you want userspace to manage assigning VAs versus the kernel to
> do so? Exposing that detail to userspace means the driver must support
> a per process address space. Letting the kernel assign addresses means
> it can either be a single address space or be a per process address
> space. It seems to me more flexible to allow the kernel driver to
> evolve without that ABI.

Having it in userspace provides a lot more flexibility and makes it
easier to support things like unified address space between CPU and
GPU. I guess it depends on the hw as to what is the right choice.

>
> With any new driver in the kernel, the question is always which
> existing one is the best model to follow. I don't think Intel, AMD or
> Nouveau are good examples to follow because they have a lot of history
> and legacy, are both GPU and DC, and have separate graphics memory
> (except Intel I guess). The GPUs in ARM land have none of those
> really. Looking thru freedreno, etnaviv, and v3d mostly, I see they
> all have similar user ABIs. But they are all different based on what
> driver they copied and how they've evolved. I know it's a big can of
> worms, but it would be nice to have some alignment of ABIs. I know the
> reasons why there isn't, but it's frustrating that 11 out of 60K IGT
> tests will run. I don't think a common ABI matters much for the big 3,
> but in the ARM zoo I think it does. At least if the interfaces are
> kept similar, then having common code shared among the embedded GPUs
> would be easier and writing some IGT shim for each driver would be
> easier.

It also depends on what you are familiar with.  Qiang is familiar with
amdgpu so it makes sense to use that as a model for his own projects.
I don't know that similarity between drivers for hw that is mostly
part of ARM SoCs is any more important than the applicability to the
hw or what the maintainer likes.  I mean you can use amdgpu or nouveau
on ARM too.

Alex
kernel test robot via dri-devel Feb. 14, 2019, 9:12 a.m. UTC | #11
Am 14.02.19 um 03:52 schrieb Alex Deucher via dri-devel:
> [SNIP]
>>>>> +static int lima_ioctl_gem_va(struct drm_device *dev, void *data, struct drm_file *file)
>>>>> +{
>>>>> +       struct drm_lima_gem_va *args = data;
>>>>> +
>>>>> +       switch (args->op) {
>>>>> +       case LIMA_VA_OP_MAP:
>>>>> +               return lima_gem_va_map(file, args->handle, args->flags, args->va);
>>>>> +       case LIMA_VA_OP_UNMAP:
>>>>> +               return lima_gem_va_unmap(file, args->handle, args->va);
>>>> These are mapping to GPU VA. Why not do that on GEM object creation or
>>>> import or when the objects are submitted with cmd queue as other
>>>> drivers do?
>>>>
>>>> To put it another way, These ioctls look different than what other
>>>> drivers do. Why do you need to do things differently? My understanding
>>>> is best practice is to map and return the GPU offset when the GEM
>>>> object is created. This is what v3d does. I think Intel is moving to
>>>> that. And panfrost will do that.
>>> I think it would be a good idea to look at the amdgpu driver.  This
>>> driver is heavily modeled after it.  Basically the GEM VA ioctl allows
>>> userspace to manage per process (per fd really) virtual addresses.
>> Why do you want userspace to manage assigning VAs versus the kernel to
>> do so? Exposing that detail to userspace means the driver must support
>> a per process address space. Letting the kernel assign addresses means
>> it can either be a single address space or be a per process address
>> space. It seems to me more flexible to allow the kernel driver to
>> evolve without that ABI.
> Having it in userspace provides a lot more flexibility and makes it
> easier to support things like unified address space between CPU and
> GPU. I guess it depends on the hw as to what is the right choice.

To summarize we actually have tried this approach with the radeon and it 
turned out to be a really bad mistake.

To implement features like partial residential textures and shared 
virtual address space you absolutely need userspace to be in charge of 
allocating virtual addresses.

Regards,
Christian.
Daniel Vetter Feb. 14, 2019, 10:15 a.m. UTC | #12
On Thu, Feb 14, 2019 at 10:12 AM Christian König via dri-devel
<dri-devel@lists.freedesktop.org> wrote:
>
> Am 14.02.19 um 03:52 schrieb Alex Deucher via dri-devel:
> > [SNIP]
> >>>>> +static int lima_ioctl_gem_va(struct drm_device *dev, void *data, struct drm_file *file)
> >>>>> +{
> >>>>> +       struct drm_lima_gem_va *args = data;
> >>>>> +
> >>>>> +       switch (args->op) {
> >>>>> +       case LIMA_VA_OP_MAP:
> >>>>> +               return lima_gem_va_map(file, args->handle, args->flags, args->va);
> >>>>> +       case LIMA_VA_OP_UNMAP:
> >>>>> +               return lima_gem_va_unmap(file, args->handle, args->va);
> >>>> These are mapping to GPU VA. Why not do that on GEM object creation or
> >>>> import or when the objects are submitted with cmd queue as other
> >>>> drivers do?
> >>>>
> >>>> To put it another way, These ioctls look different than what other
> >>>> drivers do. Why do you need to do things differently? My understanding
> >>>> is best practice is to map and return the GPU offset when the GEM
> >>>> object is created. This is what v3d does. I think Intel is moving to
> >>>> that. And panfrost will do that.
> >>> I think it would be a good idea to look at the amdgpu driver.  This
> >>> driver is heavily modeled after it.  Basically the GEM VA ioctl allows
> >>> userspace to manage per process (per fd really) virtual addresses.
> >> Why do you want userspace to manage assigning VAs versus the kernel to
> >> do so? Exposing that detail to userspace means the driver must support
> >> a per process address space. Letting the kernel assign addresses means
> >> it can either be a single address space or be a per process address
> >> space. It seems to me more flexible to allow the kernel driver to
> >> evolve without that ABI.
> > Having it in userspace provides a lot more flexibility and makes it
> > easier to support things like unified address space between CPU and
> > GPU. I guess it depends on the hw as to what is the right choice.
>
> To summarize we actually have tried this approach with the radeon and it
> turned out to be a really bad mistake.
>
> To implement features like partial residential textures and shared
> virtual address space you absolutely need userspace to be in charge of
> allocating virtual addresses.

Yeah same here, as soon as you have per-process address spaces you
want your userspace to control where buffers are placed. All new intel
drivers use softpin to control the layout fully (anv and iris). Of
course if you also have hw without per-process virtual address space
on the gpu in some form, then the kernel needs to assign addresses,
which means lots of relocs. i965_dri.so still works like that, even
with the rewritten buffer/batch manager. But I'd really only do that
if you can't avoid it.
-Daniel
Dave Airlie March 14, 2019, 8:44 p.m. UTC | #13
On Thu, 14 Feb 2019 at 19:12, Christian König via dri-devel
<dri-devel@lists.freedesktop.org> wrote:
>
> Am 14.02.19 um 03:52 schrieb Alex Deucher via dri-devel:
> > [SNIP]
> >>>>> +static int lima_ioctl_gem_va(struct drm_device *dev, void *data, struct drm_file *file)
> >>>>> +{
> >>>>> +       struct drm_lima_gem_va *args = data;
> >>>>> +
> >>>>> +       switch (args->op) {
> >>>>> +       case LIMA_VA_OP_MAP:
> >>>>> +               return lima_gem_va_map(file, args->handle, args->flags, args->va);
> >>>>> +       case LIMA_VA_OP_UNMAP:
> >>>>> +               return lima_gem_va_unmap(file, args->handle, args->va);
> >>>> These are mapping to GPU VA. Why not do that on GEM object creation or
> >>>> import or when the objects are submitted with cmd queue as other
> >>>> drivers do?
> >>>>
> >>>> To put it another way, These ioctls look different than what other
> >>>> drivers do. Why do you need to do things differently? My understanding
> >>>> is best practice is to map and return the GPU offset when the GEM
> >>>> object is created. This is what v3d does. I think Intel is moving to
> >>>> that. And panfrost will do that.
> >>> I think it would be a good idea to look at the amdgpu driver.  This
> >>> driver is heavily modeled after it.  Basically the GEM VA ioctl allows
> >>> userspace to manage per process (per fd really) virtual addresses.
> >> Why do you want userspace to manage assigning VAs versus the kernel to
> >> do so? Exposing that detail to userspace means the driver must support
> >> a per process address space. Letting the kernel assign addresses means
> >> it can either be a single address space or be a per process address
> >> space. It seems to me more flexible to allow the kernel driver to
> >> evolve without that ABI.
> > Having it in userspace provides a lot more flexibility and makes it
> > easier to support things like unified address space between CPU and
> > GPU. I guess it depends on the hw as to what is the right choice.
>
> To summarize we actually have tried this approach with the radeon and it
> turned out to be a really bad mistake.
>
> To implement features like partial residential textures and shared
> virtual address space you absolutely need userspace to be in charge of
> allocating virtual addresses.
>

I think for lima not having this is fine, but for panfrost it really
should have it.

If you can implement vulkan you probably want this, nouveau hasn't a
vulkan driver because of exactly this problem in their uapi, so maybe
adjust panfrost to do user-space managed vma.

Dave.
Rob Herring (Arm) March 14, 2019, 9:44 p.m. UTC | #14
On Thu, Mar 14, 2019 at 3:45 PM Dave Airlie <airlied@gmail.com> wrote:
>
> On Thu, 14 Feb 2019 at 19:12, Christian König via dri-devel
> <dri-devel@lists.freedesktop.org> wrote:
> >
> > Am 14.02.19 um 03:52 schrieb Alex Deucher via dri-devel:
> > > [SNIP]
> > >>>>> +static int lima_ioctl_gem_va(struct drm_device *dev, void *data, struct drm_file *file)
> > >>>>> +{
> > >>>>> +       struct drm_lima_gem_va *args = data;
> > >>>>> +
> > >>>>> +       switch (args->op) {
> > >>>>> +       case LIMA_VA_OP_MAP:
> > >>>>> +               return lima_gem_va_map(file, args->handle, args->flags, args->va);
> > >>>>> +       case LIMA_VA_OP_UNMAP:
> > >>>>> +               return lima_gem_va_unmap(file, args->handle, args->va);
> > >>>> These are mapping to GPU VA. Why not do that on GEM object creation or
> > >>>> import or when the objects are submitted with cmd queue as other
> > >>>> drivers do?
> > >>>>
> > >>>> To put it another way, These ioctls look different than what other
> > >>>> drivers do. Why do you need to do things differently? My understanding
> > >>>> is best practice is to map and return the GPU offset when the GEM
> > >>>> object is created. This is what v3d does. I think Intel is moving to
> > >>>> that. And panfrost will do that.
> > >>> I think it would be a good idea to look at the amdgpu driver.  This
> > >>> driver is heavily modeled after it.  Basically the GEM VA ioctl allows
> > >>> userspace to manage per process (per fd really) virtual addresses.
> > >> Why do you want userspace to manage assigning VAs versus the kernel to
> > >> do so? Exposing that detail to userspace means the driver must support
> > >> a per process address space. Letting the kernel assign addresses means
> > >> it can either be a single address space or be a per process address
> > >> space. It seems to me more flexible to allow the kernel driver to
> > >> evolve without that ABI.
> > > Having it in userspace provides a lot more flexibility and makes it
> > > easier to support things like unified address space between CPU and
> > > GPU. I guess it depends on the hw as to what is the right choice.
> >
> > To summarize we actually have tried this approach with the radeon and it
> > turned out to be a really bad mistake.
> >
> > To implement features like partial residential textures and shared
> > virtual address space you absolutely need userspace to be in charge of
> > allocating virtual addresses.
> >
>
> I think for lima not having this is fine, but for panfrost it really
> should have it.
>
> If you can implement vulkan you probably want this, nouveau hasn't a
> vulkan driver because of exactly this problem in their uapi, so maybe
> adjust panfrost to do user-space managed vma.

Wouldn't this just require an allocation flag to not map the BO up
front and then new ioctl's like above to map and unmap at specified
VAs? Seems like we could add that when we get there.

Rob
Eric Anholt March 14, 2019, 10:28 p.m. UTC | #15
Rob Herring <robh@kernel.org> writes:

> On Thu, Mar 14, 2019 at 3:45 PM Dave Airlie <airlied@gmail.com> wrote:
>>
>> On Thu, 14 Feb 2019 at 19:12, Christian König via dri-devel
>> <dri-devel@lists.freedesktop.org> wrote:
>> >
>> > Am 14.02.19 um 03:52 schrieb Alex Deucher via dri-devel:
>> > > [SNIP]
>> > >>>>> +static int lima_ioctl_gem_va(struct drm_device *dev, void *data, struct drm_file *file)
>> > >>>>> +{
>> > >>>>> +       struct drm_lima_gem_va *args = data;
>> > >>>>> +
>> > >>>>> +       switch (args->op) {
>> > >>>>> +       case LIMA_VA_OP_MAP:
>> > >>>>> +               return lima_gem_va_map(file, args->handle, args->flags, args->va);
>> > >>>>> +       case LIMA_VA_OP_UNMAP:
>> > >>>>> +               return lima_gem_va_unmap(file, args->handle, args->va);
>> > >>>> These are mapping to GPU VA. Why not do that on GEM object creation or
>> > >>>> import or when the objects are submitted with cmd queue as other
>> > >>>> drivers do?
>> > >>>>
>> > >>>> To put it another way, These ioctls look different than what other
>> > >>>> drivers do. Why do you need to do things differently? My understanding
>> > >>>> is best practice is to map and return the GPU offset when the GEM
>> > >>>> object is created. This is what v3d does. I think Intel is moving to
>> > >>>> that. And panfrost will do that.
>> > >>> I think it would be a good idea to look at the amdgpu driver.  This
>> > >>> driver is heavily modeled after it.  Basically the GEM VA ioctl allows
>> > >>> userspace to manage per process (per fd really) virtual addresses.
>> > >> Why do you want userspace to manage assigning VAs versus the kernel to
>> > >> do so? Exposing that detail to userspace means the driver must support
>> > >> a per process address space. Letting the kernel assign addresses means
>> > >> it can either be a single address space or be a per process address
>> > >> space. It seems to me more flexible to allow the kernel driver to
>> > >> evolve without that ABI.
>> > > Having it in userspace provides a lot more flexibility and makes it
>> > > easier to support things like unified address space between CPU and
>> > > GPU. I guess it depends on the hw as to what is the right choice.
>> >
>> > To summarize we actually have tried this approach with the radeon and it
>> > turned out to be a really bad mistake.
>> >
>> > To implement features like partial residential textures and shared
>> > virtual address space you absolutely need userspace to be in charge of
>> > allocating virtual addresses.
>> >
>>
>> I think for lima not having this is fine, but for panfrost it really
>> should have it.
>>
>> If you can implement vulkan you probably want this, nouveau hasn't a
>> vulkan driver because of exactly this problem in their uapi, so maybe
>> adjust panfrost to do user-space managed vma.
>
> Wouldn't this just require an allocation flag to not map the BO up
> front and then new ioctl's like above to map and unmap at specified
> VAs? Seems like we could add that when we get there.

Sounds pretty reasonable to me.
Christian König March 15, 2019, 8:06 a.m. UTC | #16
Am 14.03.19 um 23:28 schrieb Eric Anholt:
> Rob Herring <robh@kernel.org> writes:
>
>> On Thu, Mar 14, 2019 at 3:45 PM Dave Airlie <airlied@gmail.com> wrote:
>>> On Thu, 14 Feb 2019 at 19:12, Christian König via dri-devel
>>> <dri-devel@lists.freedesktop.org> wrote:
>>>> Am 14.02.19 um 03:52 schrieb Alex Deucher via dri-devel:
>>>>> [SNIP]
>>>>>>>>> +static int lima_ioctl_gem_va(struct drm_device *dev, void *data, struct drm_file *file)
>>>>>>>>> +{
>>>>>>>>> +       struct drm_lima_gem_va *args = data;
>>>>>>>>> +
>>>>>>>>> +       switch (args->op) {
>>>>>>>>> +       case LIMA_VA_OP_MAP:
>>>>>>>>> +               return lima_gem_va_map(file, args->handle, args->flags, args->va);
>>>>>>>>> +       case LIMA_VA_OP_UNMAP:
>>>>>>>>> +               return lima_gem_va_unmap(file, args->handle, args->va);
>>>>>>>> These are mapping to GPU VA. Why not do that on GEM object creation or
>>>>>>>> import or when the objects are submitted with cmd queue as other
>>>>>>>> drivers do?
>>>>>>>>
>>>>>>>> To put it another way, These ioctls look different than what other
>>>>>>>> drivers do. Why do you need to do things differently? My understanding
>>>>>>>> is best practice is to map and return the GPU offset when the GEM
>>>>>>>> object is created. This is what v3d does. I think Intel is moving to
>>>>>>>> that. And panfrost will do that.
>>>>>>> I think it would be a good idea to look at the amdgpu driver.  This
>>>>>>> driver is heavily modeled after it.  Basically the GEM VA ioctl allows
>>>>>>> userspace to manage per process (per fd really) virtual addresses.
>>>>>> Why do you want userspace to manage assigning VAs versus the kernel to
>>>>>> do so? Exposing that detail to userspace means the driver must support
>>>>>> a per process address space. Letting the kernel assign addresses means
>>>>>> it can either be a single address space or be a per process address
>>>>>> space. It seems to me more flexible to allow the kernel driver to
>>>>>> evolve without that ABI.
>>>>> Having it in userspace provides a lot more flexibility and makes it
>>>>> easier to support things like unified address space between CPU and
>>>>> GPU. I guess it depends on the hw as to what is the right choice.
>>>> To summarize we actually have tried this approach with the radeon and it
>>>> turned out to be a really bad mistake.
>>>>
>>>> To implement features like partial residential textures and shared
>>>> virtual address space you absolutely need userspace to be in charge of
>>>> allocating virtual addresses.
>>>>
>>> I think for lima not having this is fine, but for panfrost it really
>>> should have it.
>>>
>>> If you can implement vulkan you probably want this, nouveau hasn't a
>>> vulkan driver because of exactly this problem in their uapi, so maybe
>>> adjust panfrost to do user-space managed vma.
>> Wouldn't this just require an allocation flag to not map the BO up
>> front and then new ioctl's like above to map and unmap at specified
>> VAs? Seems like we could add that when we get there.
> Sounds pretty reasonable to me.

I can only advise to NOT do this.

A address space manager in userspace is rather easily doable, but fixing 
up UAPI without breaking existing applications isn't.

This is also one of the reasons why we don't support Vulkan with radeon 
on SI hardware generation.

So you got at least two drivers which have gone down this route and 
regret it now and I can perfectly understand if Dave says that any new 
DRM driver should not make the same mistake again.

Regards,
Christian.
Eric Anholt March 15, 2019, 4:05 p.m. UTC | #17
"Koenig, Christian" <Christian.Koenig@amd.com> writes:

> Am 14.03.19 um 23:28 schrieb Eric Anholt:
>> Rob Herring <robh@kernel.org> writes:
>>
>>> On Thu, Mar 14, 2019 at 3:45 PM Dave Airlie <airlied@gmail.com> wrote:
>>>> On Thu, 14 Feb 2019 at 19:12, Christian König via dri-devel
>>>> <dri-devel@lists.freedesktop.org> wrote:
>>>>> Am 14.02.19 um 03:52 schrieb Alex Deucher via dri-devel:
>>>>>> [SNIP]
>>>>>>>>>> +static int lima_ioctl_gem_va(struct drm_device *dev, void *data, struct drm_file *file)
>>>>>>>>>> +{
>>>>>>>>>> +       struct drm_lima_gem_va *args = data;
>>>>>>>>>> +
>>>>>>>>>> +       switch (args->op) {
>>>>>>>>>> +       case LIMA_VA_OP_MAP:
>>>>>>>>>> +               return lima_gem_va_map(file, args->handle, args->flags, args->va);
>>>>>>>>>> +       case LIMA_VA_OP_UNMAP:
>>>>>>>>>> +               return lima_gem_va_unmap(file, args->handle, args->va);
>>>>>>>>> These are mapping to GPU VA. Why not do that on GEM object creation or
>>>>>>>>> import or when the objects are submitted with cmd queue as other
>>>>>>>>> drivers do?
>>>>>>>>>
>>>>>>>>> To put it another way, These ioctls look different than what other
>>>>>>>>> drivers do. Why do you need to do things differently? My understanding
>>>>>>>>> is best practice is to map and return the GPU offset when the GEM
>>>>>>>>> object is created. This is what v3d does. I think Intel is moving to
>>>>>>>>> that. And panfrost will do that.
>>>>>>>> I think it would be a good idea to look at the amdgpu driver.  This
>>>>>>>> driver is heavily modeled after it.  Basically the GEM VA ioctl allows
>>>>>>>> userspace to manage per process (per fd really) virtual addresses.
>>>>>>> Why do you want userspace to manage assigning VAs versus the kernel to
>>>>>>> do so? Exposing that detail to userspace means the driver must support
>>>>>>> a per process address space. Letting the kernel assign addresses means
>>>>>>> it can either be a single address space or be a per process address
>>>>>>> space. It seems to me more flexible to allow the kernel driver to
>>>>>>> evolve without that ABI.
>>>>>> Having it in userspace provides a lot more flexibility and makes it
>>>>>> easier to support things like unified address space between CPU and
>>>>>> GPU. I guess it depends on the hw as to what is the right choice.
>>>>> To summarize we actually have tried this approach with the radeon and it
>>>>> turned out to be a really bad mistake.
>>>>>
>>>>> To implement features like partial residential textures and shared
>>>>> virtual address space you absolutely need userspace to be in charge of
>>>>> allocating virtual addresses.
>>>>>
>>>> I think for lima not having this is fine, but for panfrost it really
>>>> should have it.
>>>>
>>>> If you can implement vulkan you probably want this, nouveau hasn't a
>>>> vulkan driver because of exactly this problem in their uapi, so maybe
>>>> adjust panfrost to do user-space managed vma.
>>> Wouldn't this just require an allocation flag to not map the BO up
>>> front and then new ioctl's like above to map and unmap at specified
>>> VAs? Seems like we could add that when we get there.
>> Sounds pretty reasonable to me.
>
> I can only advise to NOT do this.
>
> A address space manager in userspace is rather easily doable, but fixing 
> up UAPI without breaking existing applications isn't.

Can you expand on what goes wrong with Rob's idea?  The only thing I can
come up with is maybe dmabuf imports don't have a flag for
don't-automatically-map?
Christian König March 15, 2019, 4:19 p.m. UTC | #18
Am 15.03.19 um 17:05 schrieb Eric Anholt:
> [SNIP]
>>>>> I think for lima not having this is fine, but for panfrost it really
>>>>> should have it.
>>>>>
>>>>> If you can implement vulkan you probably want this, nouveau hasn't a
>>>>> vulkan driver because of exactly this problem in their uapi, so maybe
>>>>> adjust panfrost to do user-space managed vma.
>>>> Wouldn't this just require an allocation flag to not map the BO up
>>>> front and then new ioctl's like above to map and unmap at specified
>>>> VAs? Seems like we could add that when we get there.
>>> Sounds pretty reasonable to me.
>> I can only advise to NOT do this.
>>
>> A address space manager in userspace is rather easily doable, but fixing
>> up UAPI without breaking existing applications isn't.
> Can you expand on what goes wrong with Rob's idea?  The only thing I can
> come up with is maybe dmabuf imports don't have a flag for
> don't-automatically-map?

Suppressing automatically mapping is not the problem. The problem is 
that you need exactly one instance to have the authority over the 
address space management.

With Vulkan and features like PRT this must be done by userspace. So 
can't just go ahead in the kernel and allocate some address space and 
map your buffers.

You can of course hope that all client respect this and you don't have 
combinations of multiple clients in the same process, but from 
experience I can say that this won't work at all.

Regards,
Christian.
diff mbox series

Patch

diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 4385f00e1d05..dfefcb393858 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -333,6 +333,8 @@  source "drivers/gpu/drm/tve200/Kconfig"
 
 source "drivers/gpu/drm/xen/Kconfig"
 
+source "drivers/gpu/drm/lima/Kconfig"
+
 # Keep legacy drivers last
 
 menuconfig DRM_LEGACY
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index ce8d1d384319..8d024b729902 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -109,3 +109,4 @@  obj-$(CONFIG_DRM_TINYDRM) += tinydrm/
 obj-$(CONFIG_DRM_PL111) += pl111/
 obj-$(CONFIG_DRM_TVE200) += tve200/
 obj-$(CONFIG_DRM_XEN) += xen/
+obj-$(CONFIG_DRM_LIMA)  += lima/
diff --git a/drivers/gpu/drm/lima/Kconfig b/drivers/gpu/drm/lima/Kconfig
new file mode 100644
index 000000000000..89d63cca8a75
--- /dev/null
+++ b/drivers/gpu/drm/lima/Kconfig
@@ -0,0 +1,10 @@ 
+# SPDX-License-Identifier: GPL-2.0 OR MIT
+# Copyright 2017-2018 Qiang Yu <yuq825@gmail.com>
+
+config DRM_LIMA
+       tristate "LIMA (DRM support for ARM Mali 400/450 GPU)"
+       depends on DRM
+       select DRM_SCHED
+       select DRM_TTM
+       help
+         DRM driver for ARM Mali 400/450 GPUs.
diff --git a/drivers/gpu/drm/lima/Makefile b/drivers/gpu/drm/lima/Makefile
new file mode 100644
index 000000000000..8bb97410d961
--- /dev/null
+++ b/drivers/gpu/drm/lima/Makefile
@@ -0,0 +1,22 @@ 
+# SPDX-License-Identifier: GPL-2.0 OR MIT
+# Copyright 2017-2018 Qiang Yu <yuq825@gmail.com>
+
+lima-y := \
+	lima_drv.o \
+	lima_device.o \
+	lima_pmu.o \
+	lima_l2_cache.o \
+	lima_mmu.o \
+	lima_gp.o \
+	lima_pp.o \
+	lima_gem.o \
+	lima_vm.o \
+	lima_sched.o \
+	lima_ctx.o \
+	lima_gem_prime.o \
+	lima_dlbu.o \
+	lima_bcast.o \
+	lima_ttm.o \
+	lima_object.o
+
+obj-$(CONFIG_DRM_LIMA) += lima.o
diff --git a/drivers/gpu/drm/lima/lima_bcast.c b/drivers/gpu/drm/lima/lima_bcast.c
new file mode 100644
index 000000000000..63754f6465ea
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_bcast.c
@@ -0,0 +1,46 @@ 
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright 2018 Qiang Yu <yuq825@gmail.com> */
+
+#include <linux/io.h>
+#include <linux/device.h>
+
+#include "lima_device.h"
+#include "lima_bcast.h"
+#include "lima_regs.h"
+
+#define bcast_write(reg, data) writel(data, ip->iomem + LIMA_BCAST_##reg)
+#define bcast_read(reg) readl(ip->iomem + LIMA_BCAST_##reg)
+
+void lima_bcast_enable(struct lima_device *dev, int num_pp)
+{
+	struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp;
+	struct lima_ip *ip = dev->ip + lima_ip_bcast;
+	int i, mask = bcast_read(BROADCAST_MASK) & 0xffff0000;
+
+	for (i = 0; i < num_pp; i++) {
+		struct lima_ip *pp = pipe->processor[i];
+		mask |= 1 << (pp->id - lima_ip_pp0);
+	}
+
+	bcast_write(BROADCAST_MASK, mask);
+}
+
+int lima_bcast_init(struct lima_ip *ip)
+{
+	int i, mask = 0;
+
+	for (i = lima_ip_pp0; i <= lima_ip_pp7; i++) {
+		if (ip->dev->ip[i].present)
+			mask |= 1 << (i - lima_ip_pp0);
+	}
+
+	bcast_write(BROADCAST_MASK, mask << 16);
+	bcast_write(INTERRUPT_MASK, mask);
+	return 0;
+}
+
+void lima_bcast_fini(struct lima_ip *ip)
+{
+	
+}
+
diff --git a/drivers/gpu/drm/lima/lima_bcast.h b/drivers/gpu/drm/lima/lima_bcast.h
new file mode 100644
index 000000000000..345e3e809860
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_bcast.h
@@ -0,0 +1,14 @@ 
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2018 Qiang Yu <yuq825@gmail.com> */
+
+#ifndef __LIMA_BCAST_H__
+#define __LIMA_BCAST_H__
+
+struct lima_ip;
+
+int lima_bcast_init(struct lima_ip *ip);
+void lima_bcast_fini(struct lima_ip *ip);
+
+void lima_bcast_enable(struct lima_device *dev, int num_pp);
+
+#endif
diff --git a/drivers/gpu/drm/lima/lima_ctx.c b/drivers/gpu/drm/lima/lima_ctx.c
new file mode 100644
index 000000000000..724ac4051f7a
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_ctx.c
@@ -0,0 +1,124 @@ 
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright 2018 Qiang Yu <yuq825@gmail.com> */
+
+#include <linux/slab.h>
+
+#include "lima_device.h"
+#include "lima_ctx.h"
+
+int lima_ctx_create(struct lima_device *dev, struct lima_ctx_mgr *mgr, u32 *id)
+{
+	struct lima_ctx *ctx;
+	int i, err;
+
+	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+	if (!ctx)
+		return -ENOMEM;
+	ctx->dev = dev;
+	kref_init(&ctx->refcnt);
+
+	for (i = 0; i < lima_pipe_num; i++) {
+		err = lima_sched_context_init(dev->pipe + i, ctx->context + i, &ctx->guilty);
+		if (err)
+			goto err_out0;
+	}
+
+	idr_preload(GFP_KERNEL);
+	spin_lock(&mgr->lock);
+	err = idr_alloc(&mgr->handles, ctx, 1, 0, GFP_ATOMIC);
+	spin_unlock(&mgr->lock);
+	idr_preload_end();
+	if (err < 0)
+		goto err_out0;
+
+	*id = err;
+	return 0;
+
+err_out0:
+	for (i--; i >= 0; i--)
+		lima_sched_context_fini(dev->pipe + i, ctx->context + i);
+	kfree(ctx);
+	return err;
+}
+
+static void lima_ctx_do_release(struct kref *ref)
+{
+	struct lima_ctx *ctx = container_of(ref, struct lima_ctx, refcnt);
+	int i;
+
+	for (i = 0; i < lima_pipe_num; i++)
+		lima_sched_context_fini(ctx->dev->pipe + i, ctx->context + i);
+	kfree(ctx);
+}
+
+int lima_ctx_free(struct lima_ctx_mgr *mgr, u32 id)
+{
+	struct lima_ctx *ctx;
+
+	spin_lock(&mgr->lock);
+	ctx = idr_remove(&mgr->handles, id);
+	spin_unlock(&mgr->lock);
+
+	if (ctx) {
+		kref_put(&ctx->refcnt, lima_ctx_do_release);
+		return 0;
+	}
+	return -EINVAL;
+}
+
+struct lima_ctx *lima_ctx_get(struct lima_ctx_mgr *mgr, u32 id)
+{
+	struct lima_ctx *ctx;
+
+	spin_lock(&mgr->lock);
+	ctx = idr_find(&mgr->handles, id);
+	if (ctx)
+		kref_get(&ctx->refcnt);
+	spin_unlock(&mgr->lock);
+	return ctx;
+}
+
+void lima_ctx_put(struct lima_ctx *ctx)
+{
+	kref_put(&ctx->refcnt, lima_ctx_do_release);
+}
+
+void lima_ctx_mgr_init(struct lima_ctx_mgr *mgr)
+{
+	spin_lock_init(&mgr->lock);
+	idr_init(&mgr->handles);
+}
+
+void lima_ctx_mgr_fini(struct lima_ctx_mgr *mgr)
+{
+	struct lima_ctx *ctx;
+	struct idr *idp;
+	uint32_t id;
+
+	idp = &mgr->handles;
+
+	idr_for_each_entry(idp, ctx, id) {
+	        kref_put(&ctx->refcnt, lima_ctx_do_release);
+	}
+
+	idr_destroy(&mgr->handles);
+}
+
+struct dma_fence *lima_ctx_get_native_fence(struct lima_ctx_mgr *mgr,
+					    u32 ctx, u32 pipe, u32 seq)
+{
+	struct lima_ctx *c;
+	struct dma_fence *ret;
+
+	if (pipe >= lima_pipe_num)
+		return ERR_PTR(-EINVAL);
+
+	c = lima_ctx_get(mgr, ctx);
+	if (!c)
+		return ERR_PTR(-ENOENT);
+
+	ret = lima_sched_context_get_fence(c->context + pipe, seq);
+
+	lima_ctx_put(c);
+	return ret;
+}
diff --git a/drivers/gpu/drm/lima/lima_ctx.h b/drivers/gpu/drm/lima/lima_ctx.h
new file mode 100644
index 000000000000..80e55e16619f
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_ctx.h
@@ -0,0 +1,33 @@ 
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2018 Qiang Yu <yuq825@gmail.com> */
+
+#ifndef __LIMA_CTX_H__
+#define __LIMA_CTX_H__
+
+#include <linux/idr.h>
+
+#include "lima_device.h"
+
+struct lima_ctx {
+	struct kref refcnt;
+	struct lima_device *dev;
+	struct lima_sched_context context[lima_pipe_num];
+	atomic_t guilty;
+};
+
+struct lima_ctx_mgr {
+	spinlock_t lock;
+	struct idr handles;
+};
+
+int lima_ctx_create(struct lima_device *dev, struct lima_ctx_mgr *mgr, u32 *id);
+int lima_ctx_free(struct lima_ctx_mgr *mgr, u32 id);
+struct lima_ctx *lima_ctx_get(struct lima_ctx_mgr *mgr, u32 id);
+void lima_ctx_put(struct lima_ctx *ctx);
+void lima_ctx_mgr_init(struct lima_ctx_mgr *mgr);
+void lima_ctx_mgr_fini(struct lima_ctx_mgr *mgr);
+
+struct dma_fence *lima_ctx_get_native_fence(struct lima_ctx_mgr *mgr,
+					    u32 ctx, u32 pipe, u32 seq);
+
+#endif
diff --git a/drivers/gpu/drm/lima/lima_device.c b/drivers/gpu/drm/lima/lima_device.c
new file mode 100644
index 000000000000..b88c84d796fc
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_device.c
@@ -0,0 +1,384 @@ 
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright 2017-2018 Qiang Yu <yuq825@gmail.com> */
+
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+
+#include "lima_device.h"
+#include "lima_gp.h"
+#include "lima_pp.h"
+#include "lima_mmu.h"
+#include "lima_pmu.h"
+#include "lima_l2_cache.h"
+#include "lima_dlbu.h"
+#include "lima_bcast.h"
+#include "lima_vm.h"
+
+struct lima_ip_desc {
+	char *name;
+	char *irq_name;
+	bool must_have[lima_gpu_num];
+	int offset[lima_gpu_num];
+
+	int (*init)(struct lima_ip *);
+	void (*fini)(struct lima_ip *);
+};
+
+#define LIMA_IP_DESC(ipname, mst0, mst1, off0, off1, func, irq) \
+	[lima_ip_##ipname] = { \
+		.name = #ipname, \
+		.irq_name = irq, \
+		.must_have = { \
+			[lima_gpu_mali400] = mst0, \
+			[lima_gpu_mali450] = mst1, \
+		}, \
+		.offset = { \
+			[lima_gpu_mali400] = off0, \
+			[lima_gpu_mali450] = off1, \
+		}, \
+		.init = lima_##func##_init, \
+		.fini = lima_##func##_fini, \
+	}
+
+static struct lima_ip_desc lima_ip_desc[lima_ip_num] = {
+	LIMA_IP_DESC(pmu,         false, false, 0x02000, 0x02000, pmu,      "pmu"),
+	LIMA_IP_DESC(l2_cache0,   true,  true,  0x01000, 0x10000, l2_cache, NULL),
+	LIMA_IP_DESC(l2_cache1,   false, true,  -1,      0x01000, l2_cache, NULL),
+	LIMA_IP_DESC(l2_cache2,   false, false, -1,      0x11000, l2_cache, NULL),
+	LIMA_IP_DESC(gp,          true,  true,  0x00000, 0x00000, gp,       "gp"),
+	LIMA_IP_DESC(pp0,         true,  true,  0x08000, 0x08000, pp,       "pp0"),
+	LIMA_IP_DESC(pp1,         false, false, 0x0A000, 0x0A000, pp,       "pp1"),
+	LIMA_IP_DESC(pp2,         false, false, 0x0C000, 0x0C000, pp,       "pp2"),
+	LIMA_IP_DESC(pp3,         false, false, 0x0E000, 0x0E000, pp,       "pp3"),
+	LIMA_IP_DESC(pp4,         false, false, -1,      0x28000, pp,       "pp4"),
+	LIMA_IP_DESC(pp5,         false, false, -1,      0x2A000, pp,       "pp5"),
+	LIMA_IP_DESC(pp6,         false, false, -1,      0x2C000, pp,       "pp6"),
+	LIMA_IP_DESC(pp7,         false, false, -1,      0x2E000, pp,       "pp7"),
+	LIMA_IP_DESC(gpmmu,       true,  true,  0x03000, 0x03000, mmu,      "gpmmu"),
+	LIMA_IP_DESC(ppmmu0,      true,  true,  0x04000, 0x04000, mmu,      "ppmmu0"),
+	LIMA_IP_DESC(ppmmu1,      false, false, 0x05000, 0x05000, mmu,      "ppmmu1"),
+	LIMA_IP_DESC(ppmmu2,      false, false, 0x06000, 0x06000, mmu,      "ppmmu2"),
+	LIMA_IP_DESC(ppmmu3,      false, false, 0x07000, 0x07000, mmu,      "ppmmu3"),
+	LIMA_IP_DESC(ppmmu4,      false, false, -1,      0x1C000, mmu,      "ppmmu4"),
+	LIMA_IP_DESC(ppmmu5,      false, false, -1,      0x1D000, mmu,      "ppmmu5"),
+	LIMA_IP_DESC(ppmmu6,      false, false, -1,      0x1E000, mmu,      "ppmmu6"),
+	LIMA_IP_DESC(ppmmu7,      false, false, -1,      0x1F000, mmu,      "ppmmu7"),
+	LIMA_IP_DESC(dlbu,        false, true,  -1,      0x14000, dlbu,     NULL),
+	LIMA_IP_DESC(bcast,       false, true,  -1,      0x13000, bcast,    NULL),
+	LIMA_IP_DESC(pp_bcast,    false, true,  -1,      0x16000, pp_bcast, "pp"),
+	LIMA_IP_DESC(ppmmu_bcast, false, true,  -1,      0x15000, mmu,      NULL),
+};
+
+const char *lima_ip_name(struct lima_ip *ip)
+{
+	return lima_ip_desc[ip->id].name;
+}
+
+static int lima_clk_init(struct lima_device *dev)
+{
+	int err;
+	unsigned long bus_rate, gpu_rate;
+
+	dev->clk_bus = devm_clk_get(dev->dev, "bus");
+	if (IS_ERR(dev->clk_bus)) {
+		dev_err(dev->dev, "get bus clk failed %ld\n", PTR_ERR(dev->clk_bus));
+		return PTR_ERR(dev->clk_bus);
+	}
+
+	dev->clk_gpu = devm_clk_get(dev->dev, "core");
+	if (IS_ERR(dev->clk_gpu)) {
+		dev_err(dev->dev, "get core clk failed %ld\n", PTR_ERR(dev->clk_gpu));
+		return PTR_ERR(dev->clk_gpu);
+	}
+
+	bus_rate = clk_get_rate(dev->clk_bus);
+	dev_info(dev->dev, "bus rate = %lu\n", bus_rate);
+
+	gpu_rate = clk_get_rate(dev->clk_gpu);
+	dev_info(dev->dev, "mod rate = %lu", gpu_rate);
+
+	if ((err = clk_prepare_enable(dev->clk_bus)))
+		return err;
+	if ((err = clk_prepare_enable(dev->clk_gpu)))
+		goto error_out0;
+
+	dev->reset = devm_reset_control_get_optional(dev->dev, NULL);
+	if (IS_ERR(dev->reset)) {
+		err = PTR_ERR(dev->reset);
+		goto error_out1;
+	} else if (dev->reset != NULL) {
+		if ((err = reset_control_deassert(dev->reset)))
+			goto error_out1;
+	}
+
+	return 0;
+
+error_out1:
+	clk_disable_unprepare(dev->clk_gpu);
+error_out0:
+	clk_disable_unprepare(dev->clk_bus);
+	return err;
+}
+
+static void lima_clk_fini(struct lima_device *dev)
+{
+	if (dev->reset != NULL)
+		reset_control_assert(dev->reset);
+	clk_disable_unprepare(dev->clk_gpu);
+	clk_disable_unprepare(dev->clk_bus);
+}
+
+static int lima_regulator_init(struct lima_device *dev)
+{
+	int ret;
+	dev->regulator = devm_regulator_get_optional(dev->dev, "mali");
+	if (IS_ERR(dev->regulator)) {
+		ret = PTR_ERR(dev->regulator);
+		dev->regulator = NULL;
+		if (ret == -ENODEV)
+			return 0;
+		dev_err(dev->dev, "failed to get regulator: %ld\n", PTR_ERR(dev->regulator));
+		return ret;
+	}
+
+	ret = regulator_enable(dev->regulator);
+	if (ret < 0) {
+		dev_err(dev->dev, "failed to enable regulator: %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static void lima_regulator_fini(struct lima_device *dev)
+{
+	if (dev->regulator)
+		regulator_disable(dev->regulator);
+}
+
+static int lima_init_ip(struct lima_device *dev, int index)
+{
+	struct lima_ip_desc *desc = lima_ip_desc + index;
+	struct lima_ip *ip = dev->ip + index;
+	int offset = desc->offset[dev->id];
+	bool must = desc->must_have[dev->id];
+	int err;
+
+	if (offset < 0)
+		return 0;
+
+	ip->dev = dev;
+	ip->id = index;
+	ip->iomem = dev->iomem + offset;
+	if (desc->irq_name) {
+		err = platform_get_irq_byname(dev->pdev, desc->irq_name);
+		if (err < 0)
+			goto out;
+		ip->irq = err;
+	}
+
+	err = desc->init(ip);
+	if (!err) {
+		ip->present = true;
+		return 0;
+	}
+
+out:
+	return must ? err : 0;
+}
+
+static void lima_fini_ip(struct lima_device *ldev, int index)
+{
+	struct lima_ip_desc *desc = lima_ip_desc + index;
+	struct lima_ip *ip = ldev->ip + index;
+
+	if (ip->present)
+		desc->fini(ip);
+}
+
+static int lima_init_gp_pipe(struct lima_device *dev)
+{
+	struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_gp;
+	int err;
+
+	if ((err = lima_sched_pipe_init(pipe, "gp")))
+		return err;
+
+	pipe->l2_cache[pipe->num_l2_cache++] = dev->ip + lima_ip_l2_cache0;
+	pipe->mmu[pipe->num_mmu++] = dev->ip + lima_ip_gpmmu;
+	pipe->processor[pipe->num_processor++] = dev->ip + lima_ip_gp;
+
+	if ((err = lima_gp_pipe_init(dev))) {
+		lima_sched_pipe_fini(pipe);
+		return err;
+	}
+
+	return 0;
+}
+
+static void lima_fini_gp_pipe(struct lima_device *dev)
+{
+	struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_gp;
+
+	lima_gp_pipe_fini(dev);
+	lima_sched_pipe_fini(pipe);
+}
+
+static int lima_init_pp_pipe(struct lima_device *dev)
+{
+	struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp;
+	int err, i;
+
+	if ((err = lima_sched_pipe_init(pipe, "pp")))
+		return err;
+
+	for (i = 0; i < LIMA_SCHED_PIPE_MAX_PROCESSOR; i++) {
+		struct lima_ip *pp = dev->ip + lima_ip_pp0 + i;
+		struct lima_ip *ppmmu = dev->ip + lima_ip_ppmmu0 + i;
+		struct lima_ip *l2_cache;
+
+		if (dev->id == lima_gpu_mali400)
+			l2_cache = dev->ip + lima_ip_l2_cache0;
+		else
+			l2_cache = dev->ip + lima_ip_l2_cache1 + (i >> 2);
+
+		if (pp->present && ppmmu->present && l2_cache->present) {
+			pipe->mmu[pipe->num_mmu++] = ppmmu;
+			pipe->processor[pipe->num_processor++] = pp;
+			if (!pipe->l2_cache[i >> 2])
+				pipe->l2_cache[pipe->num_l2_cache++] = l2_cache;
+		}
+	}
+
+	if (dev->ip[lima_ip_bcast].present) {
+		pipe->bcast_processor = dev->ip + lima_ip_pp_bcast;
+		pipe->bcast_mmu = dev->ip + lima_ip_ppmmu_bcast;
+	}
+
+	if ((err = lima_pp_pipe_init(dev))) {
+		lima_sched_pipe_fini(pipe);
+		return err;
+	}
+
+	return 0;
+}
+
+static void lima_fini_pp_pipe(struct lima_device *dev)
+{
+	struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp;
+
+	lima_pp_pipe_fini(dev);
+	lima_sched_pipe_fini(pipe);
+}
+
+int lima_device_init(struct lima_device *ldev)
+{
+	int err, i;
+	struct resource *res;
+
+	dma_set_coherent_mask(ldev->dev, DMA_BIT_MASK(32));
+
+	err = lima_clk_init(ldev);
+	if (err) {
+		dev_err(ldev->dev, "clk init fail %d\n", err);
+		return err;
+	}
+
+	if ((err = lima_regulator_init(ldev))) {
+		dev_err(ldev->dev, "regulator init fail %d\n", err);
+		goto err_out0;
+	}
+
+	err = lima_ttm_init(ldev);
+	if (err)
+		goto err_out1;
+
+	ldev->empty_vm = lima_vm_create(ldev);
+	if (!ldev->empty_vm) {
+		err = -ENOMEM;
+		goto err_out2;
+	}
+
+	ldev->va_start = 0;
+	if (ldev->id == lima_gpu_mali450) {
+		ldev->va_end = LIMA_VA_RESERVE_START;
+		ldev->dlbu_cpu = dma_alloc_wc(
+			ldev->dev, LIMA_PAGE_SIZE,
+			&ldev->dlbu_dma, GFP_KERNEL);
+		if (!ldev->dlbu_cpu) {
+			err = -ENOMEM;
+			goto err_out3;
+		}
+	}
+	else
+		ldev->va_end = LIMA_VA_RESERVE_END;
+
+	res = platform_get_resource(ldev->pdev, IORESOURCE_MEM, 0);
+	ldev->iomem = devm_ioremap_resource(ldev->dev, res);
+	if (IS_ERR(ldev->iomem)) {
+		dev_err(ldev->dev, "fail to ioremap iomem\n");
+	        err = PTR_ERR(ldev->iomem);
+		goto err_out4;
+	}
+
+	for (i = 0; i < lima_ip_num; i++) {
+		err = lima_init_ip(ldev, i);
+		if (err)
+			goto err_out5;
+	}
+
+	err = lima_init_gp_pipe(ldev);
+	if (err)
+		goto err_out5;
+
+	err = lima_init_pp_pipe(ldev);
+	if (err)
+		goto err_out6;
+
+	return 0;
+
+err_out6:
+	lima_fini_gp_pipe(ldev);
+err_out5:
+	while (--i >= 0)
+		lima_fini_ip(ldev, i);
+err_out4:
+	if (ldev->dlbu_cpu)
+		dma_free_wc(ldev->dev, LIMA_PAGE_SIZE,
+			    ldev->dlbu_cpu, ldev->dlbu_dma);
+err_out3:
+	lima_vm_put(ldev->empty_vm);
+err_out2:
+	lima_ttm_fini(ldev);
+err_out1:
+	lima_regulator_fini(ldev);
+err_out0:
+	lima_clk_fini(ldev);
+	return err;
+}
+
+void lima_device_fini(struct lima_device *ldev)
+{
+	int i;
+
+	lima_fini_pp_pipe(ldev);
+	lima_fini_gp_pipe(ldev);
+
+	for (i = lima_ip_num - 1; i >= 0; i--)
+		lima_fini_ip(ldev, i);
+
+	if (ldev->dlbu_cpu)
+		dma_free_wc(ldev->dev, LIMA_PAGE_SIZE,
+			    ldev->dlbu_cpu, ldev->dlbu_dma);
+
+	lima_vm_put(ldev->empty_vm);
+
+	lima_ttm_fini(ldev);
+
+	lima_regulator_fini(ldev);
+
+	lima_clk_fini(ldev);
+}
diff --git a/drivers/gpu/drm/lima/lima_device.h b/drivers/gpu/drm/lima/lima_device.h
new file mode 100644
index 000000000000..788226490524
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_device.h
@@ -0,0 +1,116 @@ 
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2018 Qiang Yu <yuq825@gmail.com> */
+
+#ifndef __LIMA_DEVICE_H__
+#define __LIMA_DEVICE_H__
+
+#include <drm/drm_device.h>
+
+#include "lima_sched.h"
+#include "lima_ttm.h"
+
+enum lima_gpu_id {
+	lima_gpu_mali400 = 0,
+	lima_gpu_mali450,
+	lima_gpu_num,
+};
+
+enum lima_ip_id {
+	lima_ip_pmu,
+	lima_ip_gpmmu,
+	lima_ip_ppmmu0,
+	lima_ip_ppmmu1,
+	lima_ip_ppmmu2,
+	lima_ip_ppmmu3,
+	lima_ip_ppmmu4,
+	lima_ip_ppmmu5,
+	lima_ip_ppmmu6,
+	lima_ip_ppmmu7,
+	lima_ip_gp,
+	lima_ip_pp0,
+	lima_ip_pp1,
+	lima_ip_pp2,
+	lima_ip_pp3,
+	lima_ip_pp4,
+	lima_ip_pp5,
+	lima_ip_pp6,
+	lima_ip_pp7,
+	lima_ip_l2_cache0,
+	lima_ip_l2_cache1,
+	lima_ip_l2_cache2,
+	lima_ip_dlbu,
+	lima_ip_bcast,
+	lima_ip_pp_bcast,
+	lima_ip_ppmmu_bcast,
+	lima_ip_num,
+};
+
+struct lima_device;
+
+struct lima_ip {
+	struct lima_device *dev;
+	enum lima_ip_id id;
+	bool present;
+
+	void __iomem *iomem;
+	int irq;
+
+	union {
+		/* gp/pp */
+		bool async_reset;
+		/* l2 cache */
+		spinlock_t lock;
+	} data;
+};
+
+enum lima_pipe_id {
+	lima_pipe_gp,
+	lima_pipe_pp,
+	lima_pipe_num,
+};
+
+struct lima_device {
+	struct device *dev;
+	struct drm_device *ddev;
+	struct platform_device *pdev;
+
+	enum lima_gpu_id id;
+	int num_pp;
+
+	void __iomem *iomem;
+	struct clk *clk_bus;
+	struct clk *clk_gpu;
+	struct reset_control *reset;
+	struct regulator *regulator;
+
+	struct lima_ip ip[lima_ip_num];
+	struct lima_sched_pipe pipe[lima_pipe_num];
+
+	struct lima_mman mman;
+
+	struct lima_vm *empty_vm;
+	uint64_t va_start;
+	uint64_t va_end;
+
+	u32 *dlbu_cpu;
+	dma_addr_t dlbu_dma;
+};
+
+static inline struct lima_device *
+to_lima_dev(struct drm_device *dev)
+{
+	return dev->dev_private;
+}
+
+static inline struct lima_device *
+ttm_to_lima_dev(struct ttm_bo_device *dev)
+{
+	return container_of(dev, struct lima_device, mman.bdev);
+}
+
+int lima_device_init(struct lima_device *ldev);
+void lima_device_fini(struct lima_device *ldev);
+
+const char *lima_ip_name(struct lima_ip *ip);
+
+#endif
diff --git a/drivers/gpu/drm/lima/lima_dlbu.c b/drivers/gpu/drm/lima/lima_dlbu.c
new file mode 100644
index 000000000000..6697d4ddd887
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_dlbu.c
@@ -0,0 +1,56 @@ 
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright 2018 Qiang Yu <yuq825@gmail.com> */
+
+#include <linux/io.h>
+#include <linux/device.h>
+
+#include "lima_device.h"
+#include "lima_dlbu.h"
+#include "lima_vm.h"
+#include "lima_regs.h"
+
+#define dlbu_write(reg, data) writel(data, ip->iomem + LIMA_DLBU_##reg)
+#define dlbu_read(reg) readl(ip->iomem + LIMA_DLBU_##reg)
+
+void lima_dlbu_enable(struct lima_device *dev, int num_pp)
+{
+	struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp;
+	struct lima_ip *ip = dev->ip + lima_ip_dlbu;
+	int i, mask = 0;
+
+	for (i = 0; i < num_pp; i++) {
+		struct lima_ip *pp = pipe->processor[i];
+		mask |= 1 << (pp->id - lima_ip_pp0);
+	}
+
+	dlbu_write(PP_ENABLE_MASK, mask);
+}
+
+void lima_dlbu_disable(struct lima_device *dev)
+{
+	struct lima_ip *ip = dev->ip + lima_ip_dlbu;
+	dlbu_write(PP_ENABLE_MASK, 0);
+}
+
+void lima_dlbu_set_reg(struct lima_ip *ip, u32 *reg)
+{
+	dlbu_write(TLLIST_VBASEADDR, reg[0]);
+	dlbu_write(FB_DIM, reg[1]);
+	dlbu_write(TLLIST_CONF, reg[2]);
+	dlbu_write(START_TILE_POS, reg[3]);
+}
+
+int lima_dlbu_init(struct lima_ip *ip)
+{
+	struct lima_device *dev = ip->dev;
+
+	dlbu_write(MASTER_TLLIST_PHYS_ADDR, dev->dlbu_dma | 1);
+	dlbu_write(MASTER_TLLIST_VADDR, LIMA_VA_RESERVE_DLBU);
+
+	return 0;
+}
+
+void lima_dlbu_fini(struct lima_ip *ip)
+{
+	
+}
diff --git a/drivers/gpu/drm/lima/lima_dlbu.h b/drivers/gpu/drm/lima/lima_dlbu.h
new file mode 100644
index 000000000000..60cba387cf30
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_dlbu.h
@@ -0,0 +1,18 @@ 
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2018 Qiang Yu <yuq825@gmail.com> */
+
+#ifndef __LIMA_DLBU_H__
+#define __LIMA_DLBU_H__
+
+struct lima_ip;
+struct lima_device;
+
+void lima_dlbu_enable(struct lima_device *dev, int num_pp);
+void lima_dlbu_disable(struct lima_device *dev);
+
+void lima_dlbu_set_reg(struct lima_ip *ip, u32 *reg);
+
+int lima_dlbu_init(struct lima_ip *ip);
+void lima_dlbu_fini(struct lima_ip *ip);
+
+#endif
diff --git a/drivers/gpu/drm/lima/lima_drv.c b/drivers/gpu/drm/lima/lima_drv.c
new file mode 100644
index 000000000000..132071b9be9b
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_drv.c
@@ -0,0 +1,459 @@ 
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright 2017-2018 Qiang Yu <yuq825@gmail.com> */
+
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/log2.h>
+#include <drm/drm_prime.h>
+#include <drm/lima_drm.h>
+
+#include "lima_drv.h"
+#include "lima_gem.h"
+#include "lima_gem_prime.h"
+#include "lima_vm.h"
+
+int lima_sched_timeout_ms = 0;
+int lima_sched_max_tasks = 32;
+int lima_max_mem = -1;
+
+MODULE_PARM_DESC(sched_timeout_ms, "task run timeout in ms (0 = no timeout (default))");
+module_param_named(sched_timeout_ms, lima_sched_timeout_ms, int, 0444);
+
+MODULE_PARM_DESC(sched_max_tasks, "max queued task num in a context (default 32)");
+module_param_named(sched_max_tasks, lima_sched_max_tasks, int, 0444);
+
+MODULE_PARM_DESC(max_mem, "Max memory size in MB can be used (<0 = auto)");
+module_param_named(max_mem, lima_max_mem, int, 0444);
+
+static int lima_ioctl_info(struct drm_device *dev, void *data, struct drm_file *file)
+{
+	struct drm_lima_info *info = data;
+	struct lima_device *ldev = to_lima_dev(dev);
+
+	switch (ldev->id) {
+	case lima_gpu_mali400:
+		info->gpu_id = LIMA_INFO_GPU_MALI400;
+		break;
+	case lima_gpu_mali450:
+		info->gpu_id = LIMA_INFO_GPU_MALI450;
+		break;
+	default:
+		return -ENODEV;
+	}
+	info->num_pp = ldev->pipe[lima_pipe_pp].num_processor;
+	info->va_start = ldev->va_start;
+	info->va_end = ldev->va_end;
+	return 0;
+}
+
+static int lima_ioctl_gem_create(struct drm_device *dev, void *data, struct drm_file *file)
+{
+	struct drm_lima_gem_create *args = data;
+
+	if (args->flags)
+		return -EINVAL;
+
+	if (args->size == 0)
+		return -EINVAL;
+
+	return lima_gem_create_handle(dev, file, args->size, args->flags, &args->handle);
+}
+
+static int lima_ioctl_gem_info(struct drm_device *dev, void *data, struct drm_file *file)
+{
+	struct drm_lima_gem_info *args = data;
+
+	return lima_gem_mmap_offset(file, args->handle, &args->offset);
+}
+
+static int lima_ioctl_gem_va(struct drm_device *dev, void *data, struct drm_file *file)
+{
+	struct drm_lima_gem_va *args = data;
+
+	switch (args->op) {
+	case LIMA_VA_OP_MAP:
+		return lima_gem_va_map(file, args->handle, args->flags, args->va);
+	case LIMA_VA_OP_UNMAP:
+		return lima_gem_va_unmap(file, args->handle, args->va);
+	default:
+		return -EINVAL;
+	}
+}
+
+static int lima_ioctl_gem_submit(struct drm_device *dev, void *data, struct drm_file *file)
+{
+	struct drm_lima_gem_submit_in *args = data;
+	struct lima_device *ldev = to_lima_dev(dev);
+	struct lima_drm_priv *priv = file->driver_priv;
+	struct drm_lima_gem_submit_bo *bos;
+	struct ttm_validate_buffer *vbs;
+	union drm_lima_gem_submit_dep *deps = NULL;
+	struct lima_sched_pipe *pipe;
+	struct lima_sched_task *task;
+	struct lima_ctx *ctx;
+	struct lima_submit submit = {0};
+	int err = 0, size;
+
+	if (args->pipe >= lima_pipe_num || args->nr_bos == 0)
+		return -EINVAL;
+
+	if (args->flags & ~(LIMA_SUBMIT_FLAG_EXPLICIT_FENCE |
+			    LIMA_SUBMIT_FLAG_SYNC_FD_OUT))
+		return -EINVAL;
+
+	pipe = ldev->pipe + args->pipe;
+	if (args->frame_size != pipe->frame_size)
+		return -EINVAL;
+
+	size = args->nr_bos * (sizeof(*submit.bos) + sizeof(*submit.vbs)) +
+		args->nr_deps * sizeof(*submit.deps);
+	bos = kzalloc(size, GFP_KERNEL);
+	if (!bos)
+		return -ENOMEM;
+
+	size = args->nr_bos * sizeof(*submit.bos);
+	if (copy_from_user(bos, u64_to_user_ptr(args->bos), size)) {
+		err = -EFAULT;
+		goto out0;
+	}
+
+	vbs = (void *)bos + size;
+
+	if (args->nr_deps) {
+		deps = (void *)vbs + args->nr_bos * sizeof(*submit.vbs);
+		size = args->nr_deps * sizeof(*submit.deps);
+		if (copy_from_user(deps, u64_to_user_ptr(args->deps), size)) {
+			err = -EFAULT;
+			goto out0;
+		}
+	}
+
+	task = kmem_cache_zalloc(pipe->task_slab, GFP_KERNEL);
+	if (!task) {
+		err = -ENOMEM;
+		goto out0;
+	}
+
+	task->frame = task + 1;
+	if (copy_from_user(task->frame, u64_to_user_ptr(args->frame), args->frame_size)) {
+		err = -EFAULT;
+		goto out1;
+	}
+
+	err = pipe->task_validate(pipe, task);
+	if (err)
+		goto out1;
+
+	ctx = lima_ctx_get(&priv->ctx_mgr, args->ctx);
+	if (!ctx) {
+		err = -ENOENT;
+		goto out1;
+	}
+
+	submit.pipe = args->pipe;
+	submit.bos = bos;
+	submit.vbs = vbs;
+	submit.nr_bos = args->nr_bos;
+	submit.task = task;
+	submit.ctx = ctx;
+	submit.deps = deps;
+	submit.nr_deps = args->nr_deps;
+	submit.flags = args->flags;
+
+	err = lima_gem_submit(file, &submit);
+	if (!err) {
+		struct drm_lima_gem_submit_out *out = data;
+		out->fence = submit.fence;
+		out->done = submit.done;
+		out->sync_fd = submit.sync_fd;
+	}
+
+	lima_ctx_put(ctx);
+out1:
+	if (err)
+		kmem_cache_free(pipe->task_slab, task);
+out0:
+	kfree(bos);
+	return err;
+}
+
+static int lima_wait_fence(struct dma_fence *fence, u64 timeout_ns)
+{
+	signed long ret;
+
+	if (!timeout_ns)
+		ret = dma_fence_is_signaled(fence) ? 0 : -EBUSY;
+	else {
+		unsigned long timeout = lima_timeout_to_jiffies(timeout_ns);
+
+		/* must use long for result check because in 64bit arch int
+		 * will overflow if timeout is too large and get <0 result
+		 */
+		ret = dma_fence_wait_timeout(fence, true, timeout);
+		if (ret == 0)
+			ret = timeout ? -ETIMEDOUT : -EBUSY;
+		else if (ret > 0)
+			ret = 0;
+	}
+
+	return ret;
+}
+
+static int lima_ioctl_wait_fence(struct drm_device *dev, void *data, struct drm_file *file)
+{
+	struct drm_lima_wait_fence *args = data;
+	struct lima_drm_priv *priv = file->driver_priv;
+	struct dma_fence *fence;
+	int err = 0;
+
+	fence = lima_ctx_get_native_fence(&priv->ctx_mgr, args->ctx,
+					  args->pipe, args->seq);
+	if (IS_ERR(fence))
+		return PTR_ERR(fence);
+
+	if (fence) {
+		err = lima_wait_fence(fence, args->timeout_ns);
+		args->error = fence->error;
+		dma_fence_put(fence);
+	}
+	else
+		args->error = 0;
+
+	return err;
+}
+
+static int lima_ioctl_gem_wait(struct drm_device *dev, void *data, struct drm_file *file)
+{
+	struct drm_lima_gem_wait *args = data;
+
+	if (!(args->op & (LIMA_GEM_WAIT_READ|LIMA_GEM_WAIT_WRITE)))
+	    return -EINVAL;
+
+	return lima_gem_wait(file, args->handle, args->op, args->timeout_ns);
+}
+
+static int lima_ioctl_ctx(struct drm_device *dev, void *data, struct drm_file *file)
+{
+	struct drm_lima_ctx *args = data;
+	struct lima_drm_priv *priv = file->driver_priv;
+	struct lima_device *ldev = to_lima_dev(dev);
+
+	if (args->op == LIMA_CTX_OP_CREATE)
+		return lima_ctx_create(ldev, &priv->ctx_mgr, &args->id);
+	else if (args->op == LIMA_CTX_OP_FREE)
+		return lima_ctx_free(&priv->ctx_mgr, args->id);
+
+	return -EINVAL;
+}
+
+static int lima_ioctl_gem_mod(struct drm_device *dev, void *data, struct drm_file *file)
+{
+	struct drm_lima_gem_mod *args = data;
+
+	if (args->op == LIMA_GEM_MOD_OP_GET)
+		return lima_gem_get_modifier(file, args->handle, &args->modifier);
+	else if (args->op == LIMA_GEM_MOD_OP_SET)
+		return lima_gem_set_modifier(file, args->handle, args->modifier);
+
+	return -EINVAL;
+}
+
+static int lima_drm_driver_open(struct drm_device *dev, struct drm_file *file)
+{
+	int err;
+	struct lima_drm_priv *priv;
+	struct lima_device *ldev = to_lima_dev(dev);
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->vm = lima_vm_create(ldev);
+	if (!priv->vm) {
+		err = -ENOMEM;
+		goto err_out0;
+	}
+
+        lima_ctx_mgr_init(&priv->ctx_mgr);
+
+	file->driver_priv = priv;
+	return 0;
+
+err_out0:
+	kfree(priv);
+	return err;
+}
+
+static void lima_drm_driver_postclose(struct drm_device *dev, struct drm_file *file)
+{
+	struct lima_drm_priv *priv = file->driver_priv;
+
+	lima_ctx_mgr_fini(&priv->ctx_mgr);
+	lima_vm_put(priv->vm);
+	kfree(priv);
+}
+
+static const struct drm_ioctl_desc lima_drm_driver_ioctls[] = {
+	DRM_IOCTL_DEF_DRV(LIMA_INFO, lima_ioctl_info, DRM_AUTH|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(LIMA_GEM_CREATE, lima_ioctl_gem_create, DRM_AUTH|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(LIMA_GEM_INFO, lima_ioctl_gem_info, DRM_AUTH|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(LIMA_GEM_VA, lima_ioctl_gem_va, DRM_AUTH|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(LIMA_GEM_SUBMIT, lima_ioctl_gem_submit, DRM_AUTH|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(LIMA_WAIT_FENCE, lima_ioctl_wait_fence, DRM_AUTH|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(LIMA_GEM_WAIT, lima_ioctl_gem_wait, DRM_AUTH|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(LIMA_CTX, lima_ioctl_ctx, DRM_AUTH|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(LIMA_GEM_MOD, lima_ioctl_gem_mod, DRM_AUTH|DRM_RENDER_ALLOW),
+};
+
+static const struct file_operations lima_drm_driver_fops = {
+	.owner              = THIS_MODULE,
+	.open               = drm_open,
+	.release            = drm_release,
+	.unlocked_ioctl     = drm_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl       = drm_compat_ioctl,
+#endif
+	.mmap               = lima_gem_mmap,
+};
+
+static struct drm_driver lima_drm_driver = {
+	.driver_features    = DRIVER_RENDER | DRIVER_GEM | DRIVER_PRIME,
+	.open               = lima_drm_driver_open,
+	.postclose          = lima_drm_driver_postclose,
+	.ioctls             = lima_drm_driver_ioctls,
+	.num_ioctls         = ARRAY_SIZE(lima_drm_driver_ioctls),
+	.fops               = &lima_drm_driver_fops,
+	.gem_free_object_unlocked = lima_gem_free_object,
+	.gem_open_object    = lima_gem_object_open,
+	.gem_close_object   = lima_gem_object_close,
+	.name               = "lima",
+	.desc               = "lima DRM",
+	.date               = "20170325",
+	.major              = 1,
+	.minor              = 0,
+	.patchlevel         = 0,
+
+	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+	.gem_prime_import   = drm_gem_prime_import,
+	.gem_prime_import_sg_table = lima_gem_prime_import_sg_table,
+	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+	.gem_prime_export   = drm_gem_prime_export,
+	.gem_prime_res_obj  = lima_gem_prime_res_obj,
+	.gem_prime_get_sg_table = lima_gem_prime_get_sg_table,
+	.gem_prime_vmap = lima_gem_prime_vmap,
+	.gem_prime_vunmap = lima_gem_prime_vunmap,
+	.gem_prime_mmap = lima_gem_prime_mmap,
+};
+
+static int lima_pdev_probe(struct platform_device *pdev)
+{
+	struct lima_device *ldev;
+	struct drm_device *ddev;
+	int err;
+
+	ldev = devm_kzalloc(&pdev->dev, sizeof(*ldev), GFP_KERNEL);
+	if (!ldev)
+		return -ENOMEM;
+
+	ldev->pdev = pdev;
+	ldev->dev = &pdev->dev;
+	ldev->id = (enum lima_gpu_id)of_device_get_match_data(&pdev->dev);
+
+	platform_set_drvdata(pdev, ldev);
+
+	/* Allocate and initialize the DRM device. */
+	ddev = drm_dev_alloc(&lima_drm_driver, &pdev->dev);
+	if (IS_ERR(ddev))
+		return PTR_ERR(ddev);
+
+	ddev->dev_private = ldev;
+	ldev->ddev = ddev;
+
+	err = lima_device_init(ldev);
+	if (err) {
+		dev_err(&pdev->dev, "Fatal error during GPU init\n");
+		goto err_out0;
+	}
+
+	/*
+	 * Register the DRM device with the core and the connectors with
+	 * sysfs.
+	 */
+	err = drm_dev_register(ddev, 0);
+	if (err < 0)
+		goto err_out1;
+
+	return 0;
+
+err_out1:
+	lima_device_fini(ldev);
+err_out0:
+	drm_dev_put(ddev);
+	return err;
+}
+
+static int lima_pdev_remove(struct platform_device *pdev)
+{
+	struct lima_device *ldev = platform_get_drvdata(pdev);
+	struct drm_device *ddev = ldev->ddev;
+
+	drm_dev_unregister(ddev);
+	lima_device_fini(ldev);
+	drm_dev_put(ddev);
+	return 0;
+}
+
+static const struct of_device_id dt_match[] = {
+	{ .compatible = "arm,mali-400", .data = (void *)lima_gpu_mali400 },
+	{ .compatible = "arm,mali-450", .data = (void *)lima_gpu_mali450 },
+	{}
+};
+MODULE_DEVICE_TABLE(of, dt_match);
+
+static struct platform_driver lima_platform_driver = {
+	.probe      = lima_pdev_probe,
+	.remove     = lima_pdev_remove,
+	.driver     = {
+		.name   = "lima",
+		.of_match_table = dt_match,
+	},
+};
+
+static void lima_check_module_param(void)
+{
+	if (lima_sched_max_tasks < 4)
+		lima_sched_max_tasks = 4;
+	else
+		lima_sched_max_tasks = roundup_pow_of_two(lima_sched_max_tasks);
+
+	if (lima_max_mem < 32)
+		lima_max_mem = -1;
+}
+
+static int __init lima_init(void)
+{
+	int ret;
+
+	lima_check_module_param();
+	ret = lima_sched_slab_init();
+	if (ret)
+		return ret;
+
+	ret = platform_driver_register(&lima_platform_driver);
+	if (ret)
+		lima_sched_slab_fini();
+
+	return ret;
+}
+module_init(lima_init);
+
+static void __exit lima_exit(void)
+{
+	platform_driver_unregister(&lima_platform_driver);
+	lima_sched_slab_fini();
+}
+module_exit(lima_exit);
+
+MODULE_AUTHOR("Lima Project Developers");
+MODULE_DESCRIPTION("Lima DRM Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/lima/lima_drv.h b/drivers/gpu/drm/lima/lima_drv.h
new file mode 100644
index 000000000000..455bf44ae25e
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_drv.h
@@ -0,0 +1,59 @@ 
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2017-2018 Qiang Yu <yuq825@gmail.com> */
+
+#ifndef __LIMA_DRV_H__
+#define __LIMA_DRV_H__
+
+#include <drm/drmP.h>
+#include <drm/ttm/ttm_execbuf_util.h>
+
+#include "lima_ctx.h"
+
+extern int lima_sched_timeout_ms;
+extern int lima_sched_max_tasks;
+extern int lima_max_mem;
+
+struct lima_vm;
+struct lima_bo;
+struct lima_sched_task;
+
+struct drm_lima_gem_submit_bo;
+
+#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
+
+struct lima_drm_priv {
+	struct lima_vm *vm;
+	struct lima_ctx_mgr ctx_mgr;
+};
+
+struct lima_submit {
+	struct lima_ctx *ctx;
+	int pipe;
+	u32 flags;
+
+	struct drm_lima_gem_submit_bo *bos;
+	struct ttm_validate_buffer *vbs;
+	u32 nr_bos;
+
+	struct ttm_validate_buffer vm_pd_vb;
+	struct ww_acquire_ctx ticket;
+	struct list_head duplicates;
+	struct list_head validated;
+
+	union drm_lima_gem_submit_dep *deps;
+	u32 nr_deps;
+
+	struct lima_sched_task *task;
+
+	uint32_t fence;
+	uint32_t done;
+	int sync_fd;
+};
+
+static inline struct lima_drm_priv *
+to_lima_drm_priv(struct drm_file *file)
+{
+	return file->driver_priv;
+}
+
+#endif
diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c
new file mode 100644
index 000000000000..b5e768d8dd8a
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_gem.c
@@ -0,0 +1,485 @@ 
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright 2017-2018 Qiang Yu <yuq825@gmail.com> */
+
+#include <drm/drmP.h>
+#include <linux/dma-mapping.h>
+#include <linux/pagemap.h>
+#include <linux/sync_file.h>
+
+#include <drm/lima_drm.h>
+
+#include "lima_drv.h"
+#include "lima_gem.h"
+#include "lima_gem_prime.h"
+#include "lima_vm.h"
+#include "lima_object.h"
+
+int lima_gem_create_handle(struct drm_device *dev, struct drm_file *file,
+			   u32 size, u32 flags, u32 *handle)
+{
+	int err;
+	struct lima_bo *bo;
+	struct lima_device *ldev = to_lima_dev(dev);
+
+	bo = lima_bo_create(ldev, size, flags, ttm_bo_type_device, NULL, NULL);
+	if (IS_ERR(bo))
+		return PTR_ERR(bo);
+
+	err = drm_gem_handle_create(file, &bo->gem, handle);
+
+	/* drop reference from allocate - handle holds it now */
+	drm_gem_object_put_unlocked(&bo->gem);
+
+	return err;
+}
+
+void lima_gem_free_object(struct drm_gem_object *obj)
+{
+	struct lima_bo *bo = to_lima_bo(obj);
+
+	if (!list_empty(&bo->va))
+		dev_err(obj->dev->dev, "lima gem free bo still has va\n");
+
+	lima_bo_unref(bo);
+}
+
+int lima_gem_object_open(struct drm_gem_object *obj, struct drm_file *file)
+{
+	struct lima_bo *bo = to_lima_bo(obj);
+	struct lima_drm_priv *priv = to_lima_drm_priv(file);
+	struct lima_vm *vm = priv->vm;
+	int err;
+
+	err = lima_bo_reserve(bo, true);
+	if (err)
+		return err;
+
+	err = lima_vm_bo_add(vm, bo);
+
+	lima_bo_unreserve(bo);
+	return err;
+}
+
+void lima_gem_object_close(struct drm_gem_object *obj, struct drm_file *file)
+{
+	struct lima_bo *bo = to_lima_bo(obj);
+	struct lima_device *dev = to_lima_dev(obj->dev);
+	struct lima_drm_priv *priv = to_lima_drm_priv(file);
+	struct lima_vm *vm = priv->vm;
+
+	LIST_HEAD(list);
+	struct ttm_validate_buffer tv_bo, tv_pd;
+	struct ww_acquire_ctx ticket;
+	int r;
+
+	tv_bo.bo = &bo->tbo;
+	tv_bo.num_shared = 1;
+	list_add(&tv_bo.head, &list);
+
+	tv_pd.bo = &vm->pd->tbo;
+	tv_pd.num_shared = 1;
+	list_add(&tv_pd.head, &list);
+
+	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
+	if (r) {
+		dev_err(dev->dev, "leeking bo va because we "
+			"fail to reserve bo (%d)\n", r);
+		return;
+	}
+
+	lima_vm_bo_del(vm, bo);
+
+	ttm_eu_backoff_reservation(&ticket, &list);
+}
+
+int lima_gem_mmap_offset(struct drm_file *file, u32 handle, u64 *offset)
+{
+	struct drm_gem_object *obj;
+	struct lima_bo *bo;
+
+	obj = drm_gem_object_lookup(file, handle);
+	if (!obj)
+		return -ENOENT;
+
+	bo = to_lima_bo(obj);
+	*offset = drm_vma_node_offset_addr(&bo->tbo.vma_node);
+
+	drm_gem_object_put_unlocked(obj);
+	return 0;
+}
+
+int lima_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	struct drm_file *file_priv;
+	struct lima_device *dev;
+
+	if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
+		return -EINVAL;
+
+	file_priv = filp->private_data;
+	dev = file_priv->minor->dev->dev_private;
+	if (dev == NULL)
+		return -EINVAL;
+
+	if (!lima_gem_prime_dma_buf_mmap(filp, vma))
+		return 0;
+
+	return ttm_bo_mmap(filp, vma, &dev->mman.bdev);
+}
+
+int lima_gem_va_map(struct drm_file *file, u32 handle, u32 flags, u32 va)
+{
+	struct lima_drm_priv *priv = to_lima_drm_priv(file);
+	struct lima_vm *vm = priv->vm;
+	struct drm_gem_object *obj;
+	struct lima_bo *bo;
+	struct lima_device *dev;
+	int err;
+
+	LIST_HEAD(list);
+	struct ttm_validate_buffer tv_bo, tv_pd;
+	struct ww_acquire_ctx ticket;
+
+	if (!PAGE_ALIGNED(va))
+		return -EINVAL;
+
+	obj = drm_gem_object_lookup(file, handle);
+	if (!obj)
+		return -ENOENT;
+
+	bo = to_lima_bo(obj);
+	dev = to_lima_dev(obj->dev);
+
+	/* carefully handle overflow when calculate range */
+	if (va < dev->va_start || dev->va_end - obj->size < va) {
+		err = -EINVAL;
+		goto out;
+	}
+
+	tv_bo.bo = &bo->tbo;
+	tv_bo.num_shared = 1;
+	list_add(&tv_bo.head, &list);
+
+	tv_pd.bo = &vm->pd->tbo;
+	tv_pd.num_shared = 1;
+	list_add(&tv_pd.head, &list);
+
+	err = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
+	if (err)
+		goto out;
+
+	err = lima_vm_bo_map(vm, bo, va);
+
+	ttm_eu_backoff_reservation(&ticket, &list);
+out:
+	drm_gem_object_put_unlocked(obj);
+	return err;
+}
+
+int lima_gem_va_unmap(struct drm_file *file, u32 handle, u32 va)
+{
+	struct lima_drm_priv *priv = to_lima_drm_priv(file);
+	struct lima_vm *vm = priv->vm;
+	struct drm_gem_object *obj;
+	struct lima_bo *bo;
+	int err;
+
+	LIST_HEAD(list);
+	struct ttm_validate_buffer tv_bo, tv_pd;
+	struct ww_acquire_ctx ticket;
+
+	if (!PAGE_ALIGNED(va))
+		return -EINVAL;
+
+	obj = drm_gem_object_lookup(file, handle);
+	if (!obj)
+		return -ENOENT;
+
+	bo = to_lima_bo(obj);
+
+	tv_bo.bo = &bo->tbo;
+	tv_bo.num_shared = 1;
+	list_add(&tv_bo.head, &list);
+
+	tv_pd.bo = &vm->pd->tbo;
+	tv_pd.num_shared = 1;
+	list_add(&tv_pd.head, &list);
+
+	err = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
+	if (err)
+		goto out;
+
+	err = lima_vm_bo_unmap(vm, bo, va);
+
+	ttm_eu_backoff_reservation(&ticket, &list);
+out:
+	drm_gem_object_put_unlocked(obj);
+	return err;
+}
+
+static int lima_gem_sync_bo(struct lima_sched_task *task, struct lima_bo *bo,
+			    bool write, bool explicit)
+{
+	int err = 0;
+
+	if (!write) {
+		err = reservation_object_reserve_shared(bo->tbo.resv, 1);
+		if (err)
+			return err;
+	}
+
+	/* explicit sync use user passed dep fence */
+	if (explicit)
+		return 0;
+
+	/* implicit sync use bo fence in resv obj */
+	if (write) {
+		unsigned nr_fences;
+		struct dma_fence **fences;
+		int i;
+
+		err = reservation_object_get_fences_rcu(
+			bo->tbo.resv, NULL, &nr_fences, &fences);
+		if (err || !nr_fences)
+			return err;
+
+		for (i = 0; i < nr_fences; i++) {
+			err = lima_sched_task_add_dep(task, fences[i]);
+			if (err)
+				break;
+		}
+
+		/* for error case free remaining fences */
+		for ( ; i < nr_fences; i++)
+			dma_fence_put(fences[i]);
+
+		kfree(fences);
+	}
+	else {
+		struct dma_fence *fence;
+		fence = reservation_object_get_excl_rcu(bo->tbo.resv);
+		if (fence) {
+			err = lima_sched_task_add_dep(task, fence);
+			if (err)
+				dma_fence_put(fence);
+		}
+	}
+
+	return err;
+}
+
+static int lima_gem_add_deps(struct lima_ctx_mgr *mgr, struct lima_submit *submit)
+{
+	int i, err = 0;
+
+	for (i = 0; i < submit->nr_deps; i++) {
+		union drm_lima_gem_submit_dep *dep = submit->deps + i;
+		struct dma_fence *fence;
+
+		if (dep->type == LIMA_SUBMIT_DEP_FENCE) {
+			fence = lima_ctx_get_native_fence(
+				mgr, dep->fence.ctx, dep->fence.pipe,
+				dep->fence.seq);
+			if (IS_ERR(fence)) {
+				err = PTR_ERR(fence);
+				break;
+			}
+		}
+		else if (dep->type == LIMA_SUBMIT_DEP_SYNC_FD) {
+			fence = sync_file_get_fence(dep->sync_fd.fd);
+			if (!fence) {
+				err = -EINVAL;
+				break;
+			}
+		}
+		else {
+			err = -EINVAL;
+			break;
+		}
+
+		if (fence) {
+			err = lima_sched_task_add_dep(submit->task, fence);
+			if (err) {
+				dma_fence_put(fence);
+				break;
+			}
+		}
+	}
+
+	return err;
+}
+
+static int lima_gem_get_sync_fd(struct dma_fence *fence)
+{
+	struct sync_file *sync_file;
+	int fd;
+
+	fd = get_unused_fd_flags(O_CLOEXEC);
+	if (fd < 0)
+		return fd;
+
+	sync_file = sync_file_create(fence);
+	if (!sync_file) {
+		put_unused_fd(fd);
+		return -ENOMEM;
+	}
+
+	fd_install(fd, sync_file->file);
+	return fd;
+}
+
+int lima_gem_submit(struct drm_file *file, struct lima_submit *submit)
+{
+	int i, err = 0;
+	struct lima_drm_priv *priv = to_lima_drm_priv(file);
+	struct lima_vm *vm = priv->vm;
+
+	INIT_LIST_HEAD(&submit->validated);
+	INIT_LIST_HEAD(&submit->duplicates);
+
+	for (i = 0; i < submit->nr_bos; i++) {
+		struct drm_gem_object *obj;
+		struct drm_lima_gem_submit_bo *bo = submit->bos + i;
+		struct ttm_validate_buffer *vb = submit->vbs + i;
+
+		obj = drm_gem_object_lookup(file, bo->handle);
+		if (!obj) {
+			err = -ENOENT;
+			goto out0;
+		}
+
+		vb->bo = &to_lima_bo(obj)->tbo;
+		if (bo->flags & LIMA_SUBMIT_BO_WRITE)
+			vb->num_shared = 0;
+		else
+			vb->num_shared = 1;
+		list_add_tail(&vb->head, &submit->validated);
+	}
+
+	submit->vm_pd_vb.bo = &vm->pd->tbo;
+	submit->vm_pd_vb.num_shared = 1;
+	list_add(&submit->vm_pd_vb.head, &submit->validated);
+
+	err = ttm_eu_reserve_buffers(&submit->ticket, &submit->validated,
+				     true, &submit->duplicates);
+	if (err)
+		goto out0;
+
+	err = lima_sched_task_init(
+		submit->task, submit->ctx->context + submit->pipe, vm);
+	if (err)
+		goto out1;
+
+	err = lima_gem_add_deps(&priv->ctx_mgr, submit);
+	if (err)
+		goto out2;
+
+	for (i = 0; i < submit->nr_bos; i++) {
+		struct ttm_validate_buffer *vb = submit->vbs + i;
+		struct lima_bo *bo = ttm_to_lima_bo(vb->bo);
+		err = lima_gem_sync_bo(
+			submit->task, bo, vb->num_shared == 0,
+			submit->flags & LIMA_SUBMIT_FLAG_EXPLICIT_FENCE);
+		if (err)
+			goto out2;
+	}
+
+	if (submit->flags & LIMA_SUBMIT_FLAG_SYNC_FD_OUT) {
+		int fd = lima_gem_get_sync_fd(
+			&submit->task->base.s_fence->finished);
+		if (fd < 0) {
+			err = fd;
+			goto out2;
+		}
+		submit->sync_fd = fd;
+	}
+
+	submit->fence = lima_sched_context_queue_task(
+		submit->ctx->context + submit->pipe, submit->task,
+		&submit->done);
+
+	ttm_eu_fence_buffer_objects(&submit->ticket, &submit->validated,
+				    &submit->task->base.s_fence->finished);
+
+out2:
+	if (err)
+		lima_sched_task_fini(submit->task);
+out1:
+        if (err)
+		ttm_eu_backoff_reservation(&submit->ticket, &submit->validated);
+out0:
+	for (i = 0; i < submit->nr_bos; i++) {
+		struct ttm_validate_buffer *vb = submit->vbs + i;
+		if (!vb->bo)
+			break;
+		drm_gem_object_put_unlocked(&ttm_to_lima_bo(vb->bo)->gem);
+	}
+	return err;
+}
+
+int lima_gem_wait(struct drm_file *file, u32 handle, u32 op, u64 timeout_ns)
+{
+	bool write = op & LIMA_GEM_WAIT_WRITE;
+	struct drm_gem_object *obj;
+	struct lima_bo *bo;
+	signed long ret;
+	unsigned long timeout;
+
+	obj = drm_gem_object_lookup(file, handle);
+	if (!obj)
+		return -ENOENT;
+
+	bo = to_lima_bo(obj);
+
+	timeout = timeout_ns ? lima_timeout_to_jiffies(timeout_ns) : 0;
+
+	ret = lima_bo_reserve(bo, true);
+	if (ret)
+		goto out;
+
+	/* must use long for result check because in 64bit arch int
+	 * will overflow if timeout is too large and get <0 result
+	 */
+	ret = reservation_object_wait_timeout_rcu(bo->tbo.resv, write, true, timeout);
+	if (ret == 0)
+		ret = timeout ? -ETIMEDOUT : -EBUSY;
+	else if (ret > 0)
+		ret = 0;
+
+	lima_bo_unreserve(bo);
+out:
+	drm_gem_object_put_unlocked(obj);
+	return ret;
+}
+
+int lima_gem_get_modifier(struct drm_file *file, u32 handle, u64 *modifier)
+{
+	struct drm_gem_object *obj;
+	struct lima_bo *bo;
+
+	obj = drm_gem_object_lookup(file, handle);
+	if (!obj)
+		return -ENOENT;
+
+	bo = to_lima_bo(obj);
+	*modifier = bo->modifier;
+
+	drm_gem_object_put_unlocked(obj);
+	return 0;
+}
+
+int lima_gem_set_modifier(struct drm_file *file, u32 handle, u64 modifier)
+{
+	struct drm_gem_object *obj;
+	struct lima_bo *bo;
+
+	obj = drm_gem_object_lookup(file, handle);
+	if (!obj)
+		return -ENOENT;
+
+	bo = to_lima_bo(obj);
+	bo->modifier = modifier;
+
+	drm_gem_object_put_unlocked(obj);
+	return 0;
+}
diff --git a/drivers/gpu/drm/lima/lima_gem.h b/drivers/gpu/drm/lima/lima_gem.h
new file mode 100644
index 000000000000..da6968fab6bb
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_gem.h
@@ -0,0 +1,25 @@ 
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2017-2018 Qiang Yu <yuq825@gmail.com> */
+
+#ifndef __LIMA_GEM_H__
+#define __LIMA_GEM_H__
+
+struct lima_bo;
+struct lima_submit;
+
+struct lima_bo *lima_gem_create_bo(struct drm_device *dev, u32 size, u32 flags);
+int lima_gem_create_handle(struct drm_device *dev, struct drm_file *file,
+			   u32 size, u32 flags, u32 *handle);
+void lima_gem_free_object(struct drm_gem_object *obj);
+int lima_gem_object_open(struct drm_gem_object *obj, struct drm_file *file);
+void lima_gem_object_close(struct drm_gem_object *obj, struct drm_file *file);
+int lima_gem_mmap_offset(struct drm_file *file, u32 handle, u64 *offset);
+int lima_gem_mmap(struct file *filp, struct vm_area_struct *vma);
+int lima_gem_va_map(struct drm_file *file, u32 handle, u32 flags, u32 va);
+int lima_gem_va_unmap(struct drm_file *file, u32 handle, u32 va);
+int lima_gem_submit(struct drm_file *file, struct lima_submit *submit);
+int lima_gem_wait(struct drm_file *file, u32 handle, u32 op, u64 timeout_ns);
+int lima_gem_get_modifier(struct drm_file *file, u32 handle, u64 *modifier);
+int lima_gem_set_modifier(struct drm_file *file, u32 handle, u64 modifier);
+
+#endif
diff --git a/drivers/gpu/drm/lima/lima_gem_prime.c b/drivers/gpu/drm/lima/lima_gem_prime.c
new file mode 100644
index 000000000000..b483b8bc2696
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_gem_prime.c
@@ -0,0 +1,144 @@ 
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright 2018 Qiang Yu <yuq825@gmail.com> */
+
+#include <linux/dma-buf.h>
+#include <drm/drm_prime.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+
+#include "lima_device.h"
+#include "lima_object.h"
+#include "lima_gem_prime.h"
+
+struct drm_gem_object *lima_gem_prime_import_sg_table(
+	struct drm_device *dev, struct dma_buf_attachment *attach,
+	struct sg_table *sgt)
+{
+	struct reservation_object *resv = attach->dmabuf->resv;
+	struct lima_device *ldev = to_lima_dev(dev);
+	struct lima_bo *bo;
+
+	ww_mutex_lock(&resv->lock, NULL);
+
+	bo = lima_bo_create(ldev, attach->dmabuf->size, 0,
+			    ttm_bo_type_sg, sgt, resv);
+	if (IS_ERR(bo))
+		goto err_out;
+
+	ww_mutex_unlock(&resv->lock);
+	return &bo->gem;
+
+err_out:
+	ww_mutex_unlock(&resv->lock);
+	return (void *)bo;
+}
+
+struct reservation_object *lima_gem_prime_res_obj(struct drm_gem_object *obj)
+{
+        struct lima_bo *bo = to_lima_bo(obj);
+
+	return bo->tbo.resv;
+}
+
+struct sg_table *lima_gem_prime_get_sg_table(struct drm_gem_object *obj)
+{
+	struct lima_bo *bo = to_lima_bo(obj);
+	int npages = bo->tbo.num_pages;
+
+	return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages);
+}
+
+int lima_gem_prime_dma_buf_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->minor->dev;
+	struct lima_device *ldev = dev->dev_private;
+	struct ttm_bo_device *bdev = &ldev->mman.bdev;
+	struct drm_gem_object *obj = NULL;
+	struct drm_vma_offset_node *node;
+	int ret;
+
+	drm_vma_offset_lock_lookup(&bdev->vma_manager);
+	node = drm_vma_offset_exact_lookup_locked(&bdev->vma_manager,
+						  vma->vm_pgoff,
+						  vma_pages(vma));
+	if (likely(node)) {
+		struct ttm_buffer_object *tbo =
+			container_of(node, struct ttm_buffer_object, vma_node);
+		struct lima_bo *bo = container_of(tbo, struct lima_bo, tbo);
+		obj = &bo->gem;
+		/*
+		 * When the object is being freed, after it hits 0-refcnt it
+		 * proceeds to tear down the object. In the process it will
+		 * attempt to remove the VMA offset and so acquire this
+		 * mgr->vm_lock.  Therefore if we find an object with a 0-refcnt
+		 * that matches our range, we know it is in the process of being
+		 * destroyed and will be freed as soon as we release the lock -
+		 * so we have to check for the 0-refcnted object and treat it as
+		 * invalid.
+		 */
+		if (!kref_get_unless_zero(&obj->refcount))
+			obj = NULL;
+	}
+	drm_vma_offset_unlock_lookup(&bdev->vma_manager);
+
+	if (!obj)
+		return -EINVAL;
+
+	/* only for buffer imported from other device */
+	if (!obj->import_attach) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	ret = dma_buf_mmap(obj->dma_buf, vma, 0);
+
+out:
+	drm_gem_object_put_unlocked(obj);
+	return ret;
+}
+
+void *lima_gem_prime_vmap(struct drm_gem_object *obj)
+{
+	struct lima_bo *bo = to_lima_bo(obj);
+	int ret;
+
+	ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->dma_buf_vmap);
+	if (ret)
+		return ERR_PTR(ret);
+
+	return bo->dma_buf_vmap.virtual;
+}
+
+void lima_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
+{
+	struct lima_bo *bo = to_lima_bo(obj);
+
+	ttm_bo_kunmap(&bo->dma_buf_vmap);
+}
+
+int lima_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+{
+	struct lima_bo *bo = to_lima_bo(obj);
+	struct lima_device *dev = ttm_to_lima_dev(bo->tbo.bdev);
+	int ret;
+
+	if (!vma->vm_file || !dev)
+		return -ENODEV;
+
+	/* Check for valid size. */
+	if (obj->size < vma->vm_end - vma->vm_start)
+		return -EINVAL;
+
+	vma->vm_pgoff += drm_vma_node_offset_addr(&bo->tbo.vma_node) >> PAGE_SHIFT;
+
+	/* prime mmap does not need to check access, so allow here */
+	ret = drm_vma_node_allow(&obj->vma_node, vma->vm_file->private_data);
+	if (ret)
+		return ret;
+
+	ret = ttm_bo_mmap(vma->vm_file, vma, &dev->mman.bdev);
+	drm_vma_node_revoke(&obj->vma_node, vma->vm_file->private_data);
+
+	return ret;
+}
diff --git a/drivers/gpu/drm/lima/lima_gem_prime.h b/drivers/gpu/drm/lima/lima_gem_prime.h
new file mode 100644
index 000000000000..05ddb59b237a
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_gem_prime.h
@@ -0,0 +1,18 @@ 
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2018 Qiang Yu <yuq825@gmail.com> */
+
+#ifndef __LIMA_GEM_PRIME_H__
+#define __LIMA_GEM_PRIME_H__
+
+struct drm_gem_object *lima_gem_prime_import_sg_table(
+	struct drm_device *dev, struct dma_buf_attachment *attach,
+	struct sg_table *sgt);
+struct sg_table *lima_gem_prime_get_sg_table(struct drm_gem_object *obj);
+struct reservation_object *lima_gem_prime_res_obj(struct drm_gem_object *obj);
+void *lima_gem_prime_vmap(struct drm_gem_object *obj);
+void lima_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
+int lima_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
+
+int lima_gem_prime_dma_buf_mmap(struct file *filp, struct vm_area_struct *vma);
+
+#endif
diff --git a/drivers/gpu/drm/lima/lima_gp.c b/drivers/gpu/drm/lima/lima_gp.c
new file mode 100644
index 000000000000..57eba224f74e
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_gp.c
@@ -0,0 +1,280 @@ 
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright 2017-2018 Qiang Yu <yuq825@gmail.com> */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+
+#include <drm/lima_drm.h>
+
+#include "lima_device.h"
+#include "lima_gp.h"
+#include "lima_regs.h"
+
+#define gp_write(reg, data) writel(data, ip->iomem + LIMA_GP_##reg)
+#define gp_read(reg) readl(ip->iomem + LIMA_GP_##reg)
+
+static irqreturn_t lima_gp_irq_handler(int irq, void *data)
+{
+	struct lima_ip *ip = data;
+	struct lima_device *dev = ip->dev;
+	struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_gp;
+	u32 state = gp_read(INT_STAT);
+	u32 status = gp_read(STATUS);
+	bool done = false;
+
+	/* for shared irq case */
+	if (!state)
+		return IRQ_NONE;
+
+	if (state & LIMA_GP_IRQ_MASK_ERROR) {
+		dev_err(dev->dev, "gp error irq state=%x status=%x\n",
+			state, status);
+
+		/* mask all interrupts before hard reset */
+		gp_write(INT_MASK, 0);
+
+		pipe->error = true;
+		done = true;
+	}
+	else {
+		bool valid = state & (LIMA_GP_IRQ_VS_END_CMD_LST |
+				      LIMA_GP_IRQ_PLBU_END_CMD_LST);
+		bool active = status & (LIMA_GP_STATUS_VS_ACTIVE |
+					LIMA_GP_STATUS_PLBU_ACTIVE);
+		done = valid && !active;
+	}
+
+	gp_write(INT_CLEAR, state);
+
+	if (done)
+		lima_sched_pipe_task_done(pipe);
+
+	return IRQ_HANDLED;
+}
+
+static void lima_gp_soft_reset_async(struct lima_ip *ip)
+{
+	if (ip->data.async_reset)
+		return;
+
+	gp_write(INT_MASK, 0);
+	gp_write(INT_CLEAR, LIMA_GP_IRQ_RESET_COMPLETED);
+	gp_write(CMD, LIMA_GP_CMD_SOFT_RESET);
+	ip->data.async_reset = true;
+}
+
+static int lima_gp_soft_reset_async_wait(struct lima_ip *ip)
+{
+	struct lima_device *dev = ip->dev;
+	int timeout;
+
+	if (!ip->data.async_reset)
+		return 0;
+
+	for (timeout = 1000; timeout > 0; timeout--) {
+		if (gp_read(INT_RAWSTAT) & LIMA_GP_IRQ_RESET_COMPLETED)
+			break;
+	}
+	if (!timeout) {
+		dev_err(dev->dev, "gp soft reset time out\n");
+		return -ETIMEDOUT;
+	}
+
+	gp_write(INT_CLEAR, LIMA_GP_IRQ_MASK_ALL);
+	gp_write(INT_MASK, LIMA_GP_IRQ_MASK_USED);
+
+	ip->data.async_reset = false;
+	return 0;
+}
+
+static int lima_gp_task_validate(struct lima_sched_pipe *pipe,
+				 struct lima_sched_task *task)
+{
+	struct drm_lima_gp_frame *frame = task->frame;
+	u32 *f = frame->frame;
+	(void)pipe;
+
+	if (f[LIMA_GP_VSCL_START_ADDR >> 2] >
+	    f[LIMA_GP_VSCL_END_ADDR >> 2] ||
+	    f[LIMA_GP_PLBUCL_START_ADDR >> 2] >
+	    f[LIMA_GP_PLBUCL_END_ADDR >> 2] ||
+	    f[LIMA_GP_PLBU_ALLOC_START_ADDR >> 2] >
+	    f[LIMA_GP_PLBU_ALLOC_END_ADDR >> 2])
+		return -EINVAL;
+
+	if (f[LIMA_GP_VSCL_START_ADDR >> 2] ==
+	    f[LIMA_GP_VSCL_END_ADDR >> 2] &&
+	    f[LIMA_GP_PLBUCL_START_ADDR >> 2] ==
+	    f[LIMA_GP_PLBUCL_END_ADDR >> 2])
+		return -EINVAL;
+
+	return 0;
+}
+
+static void lima_gp_task_run(struct lima_sched_pipe *pipe,
+			     struct lima_sched_task *task)
+{
+	struct lima_ip *ip = pipe->processor[0];
+	struct drm_lima_gp_frame *frame = task->frame;
+	u32 *f = frame->frame;
+	u32 cmd = 0;
+	int i;
+
+	if (f[LIMA_GP_VSCL_START_ADDR >> 2] !=
+	    f[LIMA_GP_VSCL_END_ADDR >> 2])
+		cmd |= LIMA_GP_CMD_START_VS;
+	if (f[LIMA_GP_PLBUCL_START_ADDR >> 2] !=
+	    f[LIMA_GP_PLBUCL_END_ADDR >> 2])
+		cmd |= LIMA_GP_CMD_START_PLBU;
+
+	/* before any hw ops, wait last success task async soft reset */
+	lima_gp_soft_reset_async_wait(ip);
+
+	for (i = 0; i < LIMA_GP_FRAME_REG_NUM; i++)
+		writel(f[i], ip->iomem + LIMA_GP_VSCL_START_ADDR + i * 4);
+
+	gp_write(CMD, LIMA_GP_CMD_UPDATE_PLBU_ALLOC);
+	gp_write(CMD, cmd);
+}
+
+static int lima_gp_hard_reset(struct lima_ip *ip)
+{
+	struct lima_device *dev = ip->dev;
+	int timeout;
+
+	gp_write(PERF_CNT_0_LIMIT, 0xC0FFE000);
+	gp_write(INT_MASK, 0);
+	gp_write(CMD, LIMA_GP_CMD_RESET);
+	for (timeout = 1000; timeout > 0; timeout--) {
+		gp_write(PERF_CNT_0_LIMIT, 0xC01A0000);
+		if (gp_read(PERF_CNT_0_LIMIT) == 0xC01A0000)
+			break;
+	}
+	if (!timeout) {
+		dev_err(dev->dev, "gp hard reset timeout\n");
+		return -ETIMEDOUT;
+	}
+
+	gp_write(PERF_CNT_0_LIMIT, 0);
+	gp_write(INT_CLEAR, LIMA_GP_IRQ_MASK_ALL);
+	gp_write(INT_MASK, LIMA_GP_IRQ_MASK_USED);
+	return 0;
+}
+
+static void lima_gp_task_fini(struct lima_sched_pipe *pipe)
+{
+	lima_gp_soft_reset_async(pipe->processor[0]);
+}
+
+static void lima_gp_task_error(struct lima_sched_pipe *pipe)
+{
+	struct lima_ip *ip = pipe->processor[0];
+
+	dev_err(ip->dev->dev, "gp task error int_state=%x status=%x\n",
+		gp_read(INT_STAT), gp_read(STATUS));
+
+	lima_gp_hard_reset(ip);
+}
+
+static void lima_gp_task_mmu_error(struct lima_sched_pipe *pipe)
+{
+	lima_sched_pipe_task_done(pipe);
+}
+
+static void lima_gp_print_version(struct lima_ip *ip)
+{
+	u32 version, major, minor;
+	char *name;
+
+	version = gp_read(VERSION);
+	major = (version >> 8) & 0xFF;
+	minor = version & 0xFF;
+	switch (version >> 16) {
+	case 0xA07:
+	    name = "mali200";
+		break;
+	case 0xC07:
+		name = "mali300";
+		break;
+	case 0xB07:
+		name = "mali400";
+		break;
+	case 0xD07:
+		name = "mali450";
+		break;
+	default:
+		name = "unknow";
+		break;
+	}
+	dev_info(ip->dev->dev, "%s - %s version major %d minor %d\n",
+		 lima_ip_name(ip), name, major, minor);
+}
+
+static struct kmem_cache *lima_gp_task_slab = NULL;
+static int lima_gp_task_slab_refcnt = 0;
+
+int lima_gp_init(struct lima_ip *ip)
+{
+	struct lima_device *dev = ip->dev;
+	int err;
+
+	lima_gp_print_version(ip);
+
+	ip->data.async_reset = false;
+	lima_gp_soft_reset_async(ip);
+	err = lima_gp_soft_reset_async_wait(ip);
+	if (err)
+		return err;
+
+	err = devm_request_irq(dev->dev, ip->irq, lima_gp_irq_handler,
+			       IRQF_SHARED, lima_ip_name(ip), ip);
+	if (err) {
+		dev_err(dev->dev, "gp %s fail to request irq\n",
+			lima_ip_name(ip));
+		return err;
+	}
+
+	return 0;
+}
+
+void lima_gp_fini(struct lima_ip *ip)
+{
+
+}
+
+int lima_gp_pipe_init(struct lima_device *dev)
+{
+	int frame_size = sizeof(struct drm_lima_gp_frame);
+	struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_gp;
+
+	if (!lima_gp_task_slab) {
+		lima_gp_task_slab = kmem_cache_create_usercopy(
+			"lima_gp_task", sizeof(struct lima_sched_task) + frame_size,
+			0, SLAB_HWCACHE_ALIGN, sizeof(struct lima_sched_task),
+			frame_size, NULL);
+		if (!lima_gp_task_slab)
+			return -ENOMEM;
+	}
+	lima_gp_task_slab_refcnt++;
+
+	pipe->frame_size = frame_size;
+	pipe->task_slab = lima_gp_task_slab;
+
+	pipe->task_validate = lima_gp_task_validate;
+	pipe->task_run = lima_gp_task_run;
+	pipe->task_fini = lima_gp_task_fini;
+	pipe->task_error = lima_gp_task_error;
+	pipe->task_mmu_error = lima_gp_task_mmu_error;
+
+	return 0;
+}
+
+void lima_gp_pipe_fini(struct lima_device *dev)
+{
+	if (!--lima_gp_task_slab_refcnt) {
+		kmem_cache_destroy(lima_gp_task_slab);
+		lima_gp_task_slab = NULL;
+	}
+}
diff --git a/drivers/gpu/drm/lima/lima_gp.h b/drivers/gpu/drm/lima/lima_gp.h
new file mode 100644
index 000000000000..55bc48ec7603
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_gp.h
@@ -0,0 +1,16 @@ 
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2017-2018 Qiang Yu <yuq825@gmail.com> */
+
+#ifndef __LIMA_GP_H__
+#define __LIMA_GP_H__
+
+struct lima_ip;
+struct lima_device;
+
+int lima_gp_init(struct lima_ip *ip);
+void lima_gp_fini(struct lima_ip *ip);
+
+int lima_gp_pipe_init(struct lima_device *dev);
+void lima_gp_pipe_fini(struct lima_device *dev);
+
+#endif
diff --git a/drivers/gpu/drm/lima/lima_l2_cache.c b/drivers/gpu/drm/lima/lima_l2_cache.c
new file mode 100644
index 000000000000..e7cdec720e5d
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_l2_cache.c
@@ -0,0 +1,79 @@ 
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright 2017-2018 Qiang Yu <yuq825@gmail.com> */
+
+#include <linux/io.h>
+#include <linux/device.h>
+
+#include "lima_device.h"
+#include "lima_l2_cache.h"
+#include "lima_regs.h"
+
+#define l2_cache_write(reg, data) writel(data, ip->iomem + LIMA_L2_CACHE_##reg)
+#define l2_cache_read(reg) readl(ip->iomem + LIMA_L2_CACHE_##reg)
+
+static int lima_l2_cache_wait_idle(struct lima_ip *ip)
+{
+	int timeout;
+	struct lima_device *dev = ip->dev;
+
+	for (timeout = 100000; timeout > 0; timeout--) {
+	    if (!(l2_cache_read(STATUS) & LIMA_L2_CACHE_STATUS_COMMAND_BUSY))
+		break;
+	}
+	if (!timeout) {
+	    dev_err(dev->dev, "l2 cache wait command timeout\n");
+	    return -ETIMEDOUT;
+	}
+	return 0;
+}
+
+int lima_l2_cache_flush(struct lima_ip *ip)
+{
+	int ret;
+
+	spin_lock(&ip->data.lock);
+	l2_cache_write(COMMAND, LIMA_L2_CACHE_COMMAND_CLEAR_ALL);
+	ret = lima_l2_cache_wait_idle(ip);
+	spin_unlock(&ip->data.lock);
+	return ret;
+}
+
+int lima_l2_cache_init(struct lima_ip *ip)
+{
+	int i, err;
+	u32 size;
+	struct lima_device *dev = ip->dev;
+
+	/* l2_cache2 only exists when one of PP4-7 present */
+	if (ip->id == lima_ip_l2_cache2) {
+		for (i = lima_ip_pp4; i <= lima_ip_pp7; i++) {
+			if (dev->ip[i].present)
+				break;
+		}
+		if (i > lima_ip_pp7)
+			return -ENODEV;
+	}
+
+	spin_lock_init(&ip->data.lock);
+
+	size = l2_cache_read(SIZE);
+	dev_info(dev->dev, "l2 cache %uK, %u-way, %ubyte cache line, %ubit external bus\n",
+		 1 << (((size >> 16) & 0xff) - 10),
+		 1 << ((size >> 8) & 0xff),
+		 1 << (size & 0xff),
+		 1 << ((size >> 24) & 0xff));
+
+	err = lima_l2_cache_flush(ip);
+	if (err)
+		return err;
+
+	l2_cache_write(ENABLE, LIMA_L2_CACHE_ENABLE_ACCESS | LIMA_L2_CACHE_ENABLE_READ_ALLOCATE);
+	l2_cache_write(MAX_READS, 0x1c);
+
+	return 0;
+}
+
+void lima_l2_cache_fini(struct lima_ip *ip)
+{
+
+}
diff --git a/drivers/gpu/drm/lima/lima_l2_cache.h b/drivers/gpu/drm/lima/lima_l2_cache.h
new file mode 100644
index 000000000000..2ff91eafefbe
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_l2_cache.h
@@ -0,0 +1,14 @@ 
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2017-2018 Qiang Yu <yuq825@gmail.com> */
+
+#ifndef __LIMA_L2_CACHE_H__
+#define __LIMA_L2_CACHE_H__
+
+struct lima_ip;
+
+int lima_l2_cache_init(struct lima_ip *ip);
+void lima_l2_cache_fini(struct lima_ip *ip);
+
+int lima_l2_cache_flush(struct lima_ip *ip);
+
+#endif
diff --git a/drivers/gpu/drm/lima/lima_mmu.c b/drivers/gpu/drm/lima/lima_mmu.c
new file mode 100644
index 000000000000..234fb90a4285
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_mmu.c
@@ -0,0 +1,135 @@ 
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright 2017-2018 Qiang Yu <yuq825@gmail.com> */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/device.h>
+
+#include "lima_device.h"
+#include "lima_mmu.h"
+#include "lima_vm.h"
+#include "lima_object.h"
+#include "lima_regs.h"
+
+#define mmu_write(reg, data) writel(data, ip->iomem + LIMA_MMU_##reg)
+#define mmu_read(reg) readl(ip->iomem + LIMA_MMU_##reg)
+
+#define lima_mmu_send_command(command, condition)	     \
+({							     \
+	int __timeout, __ret = 0;			     \
+							     \
+	mmu_write(COMMAND, command);			     \
+	for (__timeout = 1000; __timeout > 0; __timeout--) { \
+		if (condition)				     \
+			break;				     \
+	}						     \
+	if (!__timeout)	{				     \
+		dev_err(dev->dev, "mmu command %x timeout\n", command); \
+		__ret = -ETIMEDOUT;			     \
+	}						     \
+	__ret;						     \
+})
+
+static irqreturn_t lima_mmu_irq_handler(int irq, void *data)
+{
+	struct lima_ip *ip = data;
+	struct lima_device *dev = ip->dev;
+	u32 status = mmu_read(INT_STATUS);
+	struct lima_sched_pipe *pipe;
+
+	/* for shared irq case */
+	if (!status)
+		return IRQ_NONE;
+
+	if (status & LIMA_MMU_INT_PAGE_FAULT) {
+		u32 fault = mmu_read(PAGE_FAULT_ADDR);
+		dev_err(dev->dev, "mmu page fault at 0x%x from bus id %d of type %s on %s\n",
+			fault, LIMA_MMU_STATUS_BUS_ID(status),
+			status & LIMA_MMU_STATUS_PAGE_FAULT_IS_WRITE ? "write" : "read",
+			lima_ip_name(ip));
+	}
+
+	if (status & LIMA_MMU_INT_READ_BUS_ERROR) {
+		dev_err(dev->dev, "mmu %s irq bus error\n", lima_ip_name(ip));
+	}
+
+	/* mask all interrupts before resume */
+	mmu_write(INT_MASK, 0);
+	mmu_write(INT_CLEAR, status);
+
+	pipe = dev->pipe + (ip->id == lima_ip_gpmmu ? lima_pipe_gp : lima_pipe_pp);
+	lima_sched_pipe_mmu_error(pipe);
+
+	return IRQ_HANDLED;
+}
+
+int lima_mmu_init(struct lima_ip *ip)
+{
+	struct lima_device *dev = ip->dev;
+	int err;
+
+	if (ip->id == lima_ip_ppmmu_bcast)
+		return 0;
+
+	mmu_write(DTE_ADDR, 0xCAFEBABE);
+	if (mmu_read(DTE_ADDR) != 0xCAFEB000) {
+		dev_err(dev->dev, "mmu %s dte write test fail\n", lima_ip_name(ip));
+		return -EIO;
+	}
+
+	err = lima_mmu_send_command(LIMA_MMU_COMMAND_HARD_RESET, mmu_read(DTE_ADDR) == 0);
+	if (err)
+		return err;
+
+	err = devm_request_irq(dev->dev, ip->irq, lima_mmu_irq_handler,
+			       IRQF_SHARED, lima_ip_name(ip), ip);
+	if (err) {
+		dev_err(dev->dev, "mmu %s fail to request irq\n", lima_ip_name(ip));
+		return err;
+	}
+
+	mmu_write(INT_MASK, LIMA_MMU_INT_PAGE_FAULT | LIMA_MMU_INT_READ_BUS_ERROR);
+	mmu_write(DTE_ADDR, *lima_bo_get_pages(dev->empty_vm->pd));
+	return lima_mmu_send_command(LIMA_MMU_COMMAND_ENABLE_PAGING,
+				     mmu_read(STATUS) & LIMA_MMU_STATUS_PAGING_ENABLED);
+}
+
+void lima_mmu_fini(struct lima_ip *ip)
+{
+
+}
+
+void lima_mmu_switch_vm(struct lima_ip *ip, struct lima_vm *vm)
+{
+	struct lima_device *dev = ip->dev;
+
+	lima_mmu_send_command(LIMA_MMU_COMMAND_ENABLE_STALL,
+			      mmu_read(STATUS) & LIMA_MMU_STATUS_STALL_ACTIVE);
+
+	if (vm)
+		mmu_write(DTE_ADDR, *lima_bo_get_pages(vm->pd));
+
+	/* flush the TLB */
+	mmu_write(COMMAND, LIMA_MMU_COMMAND_ZAP_CACHE);
+
+	lima_mmu_send_command(LIMA_MMU_COMMAND_DISABLE_STALL,
+			      !(mmu_read(STATUS) & LIMA_MMU_STATUS_STALL_ACTIVE));
+}
+
+void lima_mmu_page_fault_resume(struct lima_ip *ip)
+{
+	struct lima_device *dev = ip->dev;
+	u32 status = mmu_read(STATUS);
+
+	if (status & LIMA_MMU_STATUS_PAGE_FAULT_ACTIVE) {
+		dev_info(dev->dev, "mmu resume\n");
+
+		mmu_write(INT_MASK, 0);
+		mmu_write(DTE_ADDR, 0xCAFEBABE);
+		lima_mmu_send_command(LIMA_MMU_COMMAND_HARD_RESET, mmu_read(DTE_ADDR) == 0);
+	        mmu_write(INT_MASK, LIMA_MMU_INT_PAGE_FAULT | LIMA_MMU_INT_READ_BUS_ERROR);
+		mmu_write(DTE_ADDR, *lima_bo_get_pages(dev->empty_vm->pd));
+		lima_mmu_send_command(LIMA_MMU_COMMAND_ENABLE_PAGING,
+				      mmu_read(STATUS) & LIMA_MMU_STATUS_PAGING_ENABLED);
+	}
+}
diff --git a/drivers/gpu/drm/lima/lima_mmu.h b/drivers/gpu/drm/lima/lima_mmu.h
new file mode 100644
index 000000000000..ca173b60fc73
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_mmu.h
@@ -0,0 +1,16 @@ 
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2017-2018 Qiang Yu <yuq825@gmail.com> */
+
+#ifndef __LIMA_MMU_H__
+#define __LIMA_MMU_H__
+
+struct lima_ip;
+struct lima_vm;
+
+int lima_mmu_init(struct lima_ip *ip);
+void lima_mmu_fini(struct lima_ip *ip);
+
+void lima_mmu_switch_vm(struct lima_ip *ip, struct lima_vm *vm);
+void lima_mmu_page_fault_resume(struct lima_ip *ip);
+
+#endif
diff --git a/drivers/gpu/drm/lima/lima_object.c b/drivers/gpu/drm/lima/lima_object.c
new file mode 100644
index 000000000000..34d9b4dc2df6
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_object.c
@@ -0,0 +1,103 @@ 
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright 2018 Qiang Yu <yuq825@gmail.com> */
+
+#include <drm/drm_prime.h>
+#include <drm/drm_fourcc.h>
+
+#include "lima_object.h"
+
+static void lima_bo_init_placement(struct lima_bo *bo)
+{
+	struct ttm_placement *placement = &bo->placement;
+	struct ttm_place *place = &bo->place;
+
+	place->fpfn = 0;
+	place->lpfn = 0;
+	place->flags = TTM_PL_FLAG_TT | TTM_PL_FLAG_WC;
+
+	/* pin all bo for now */
+	place->flags |= TTM_PL_FLAG_NO_EVICT;
+
+	placement->num_placement = 1;
+	placement->placement = place;
+
+	placement->num_busy_placement = 1;
+	placement->busy_placement = place;
+}
+
+static void lima_bo_destroy(struct ttm_buffer_object *tbo)
+{
+	struct lima_bo *bo = ttm_to_lima_bo(tbo);
+
+	if (bo->gem.import_attach)
+		drm_prime_gem_destroy(&bo->gem, bo->tbo.sg);
+	drm_gem_object_release(&bo->gem);
+	kfree(bo);
+}
+
+struct lima_bo *lima_bo_create(struct lima_device *dev, u64 size,
+			       u32 flags, enum ttm_bo_type type,
+			       struct sg_table *sg,
+			       struct reservation_object *resv)
+{
+	struct lima_bo *bo;
+	struct ttm_mem_type_manager *man;
+	size_t acc_size;
+	int err;
+
+	size = PAGE_ALIGN(size);
+	man = dev->mman.bdev.man + TTM_PL_TT;
+	if (size >= (man->size << PAGE_SHIFT))
+		return ERR_PTR(-ENOMEM);
+
+	acc_size = ttm_bo_dma_acc_size(&dev->mman.bdev, size,
+				       sizeof(struct lima_bo));
+
+	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
+	if (!bo)
+		return ERR_PTR(-ENOMEM);
+
+	drm_gem_private_object_init(dev->ddev, &bo->gem, size);
+
+	INIT_LIST_HEAD(&bo->va);
+
+	bo->tbo.bdev = &dev->mman.bdev;
+
+	lima_bo_init_placement(bo);
+
+	err = ttm_bo_init(&dev->mman.bdev, &bo->tbo, size, type,
+			  &bo->placement, 0, type != ttm_bo_type_kernel,
+			  acc_size, sg, resv, &lima_bo_destroy);
+	if (err)
+		goto err_out;
+
+	bo->modifier = DRM_FORMAT_MOD_INVALID;
+	return bo;
+
+err_out:
+	kfree(bo);
+	return ERR_PTR(err);
+}
+
+dma_addr_t *lima_bo_get_pages(struct lima_bo *bo)
+{
+	struct lima_ttm_tt *ttm = (void *)bo->tbo.ttm;
+	return ttm->ttm.dma_address;
+}
+
+void *lima_bo_kmap(struct lima_bo *bo)
+{
+	bool is_iomem;
+	void *ret;
+	int err;
+
+	ret = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
+	if (ret)
+		return ret;
+
+	err = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
+	if (err)
+		return ERR_PTR(err);
+
+	return ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
+}
diff --git a/drivers/gpu/drm/lima/lima_object.h b/drivers/gpu/drm/lima/lima_object.h
new file mode 100644
index 000000000000..54ffcc416ed2
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_object.h
@@ -0,0 +1,72 @@ 
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2018 Qiang Yu <yuq825@gmail.com> */
+
+#ifndef __LIMA_OBJECT_H__
+#define __LIMA_OBJECT_H__
+
+#include <drm/drm_gem.h>
+#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_bo_api.h>
+
+#include "lima_device.h"
+
+struct lima_bo {
+	struct drm_gem_object gem;
+
+	struct ttm_place place;
+	struct ttm_placement placement;
+	struct ttm_buffer_object tbo;
+	struct ttm_bo_kmap_obj kmap;
+	struct ttm_bo_kmap_obj dma_buf_vmap;
+
+	struct list_head va;
+
+	u64 modifier;
+};
+
+static inline struct lima_bo *
+to_lima_bo(struct drm_gem_object *obj)
+{
+	return container_of(obj, struct lima_bo, gem);
+}
+
+static inline struct lima_bo *
+ttm_to_lima_bo(struct ttm_buffer_object *tbo)
+{
+	return container_of(tbo, struct lima_bo, tbo);
+}
+
+static inline int lima_bo_reserve(struct lima_bo *bo, bool intr)
+{
+	struct lima_device *dev = ttm_to_lima_dev(bo->tbo.bdev);
+	int r;
+
+	r = ttm_bo_reserve(&bo->tbo, intr, false, NULL);
+	if (unlikely(r != 0)) {
+		if (r != -ERESTARTSYS)
+			dev_err(dev->dev, "%p reserve failed\n", bo);
+		return r;
+	}
+	return 0;
+}
+
+static inline void lima_bo_unreserve(struct lima_bo *bo)
+{
+	ttm_bo_unreserve(&bo->tbo);
+}
+
+struct lima_bo *lima_bo_create(struct lima_device *dev, u64 size,
+			       u32 flags, enum ttm_bo_type type,
+			       struct sg_table *sg,
+			       struct reservation_object *resv);
+
+static inline void lima_bo_unref(struct lima_bo *bo)
+{
+	struct ttm_buffer_object *tbo = &bo->tbo;
+	ttm_bo_unref(&tbo);
+}
+
+dma_addr_t *lima_bo_get_pages(struct lima_bo *bo);
+void *lima_bo_kmap(struct lima_bo *bo);
+
+#endif
diff --git a/drivers/gpu/drm/lima/lima_pmu.c b/drivers/gpu/drm/lima/lima_pmu.c
new file mode 100644
index 000000000000..31a84b9a0a3a
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_pmu.c
@@ -0,0 +1,61 @@ 
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright 2017-2018 Qiang Yu <yuq825@gmail.com> */
+
+#include <linux/io.h>
+#include <linux/device.h>
+
+#include "lima_device.h"
+#include "lima_pmu.h"
+#include "lima_regs.h"
+
+#define pmu_write(reg, data) writel(data, ip->iomem + LIMA_PMU_##reg)
+#define pmu_read(reg) readl(ip->iomem + LIMA_PMU_##reg)
+
+static int lima_pmu_wait_cmd(struct lima_ip *ip)
+{
+	struct lima_device *dev = ip->dev;
+	u32 stat, timeout;
+
+	for (timeout = 1000000; timeout > 0; timeout--) {
+		stat = pmu_read(INT_RAWSTAT);
+		if (stat & LIMA_PMU_INT_CMD_MASK)
+			break;
+	}
+
+	if (!timeout) {
+		dev_err(dev->dev, "timeout wait pmd cmd\n");
+		return -ETIMEDOUT;
+	}
+
+	pmu_write(INT_CLEAR, LIMA_PMU_INT_CMD_MASK);
+	return 0;
+}
+
+int lima_pmu_init(struct lima_ip *ip)
+{
+	int err;
+	u32 stat;
+
+	pmu_write(INT_MASK, 0);
+
+	/* If this value is too low, when in high GPU clk freq,
+	 * GPU will be in unstable state. */
+	pmu_write(SW_DELAY, 0xffff);
+
+	/* status reg 1=off 0=on */
+	stat = pmu_read(STATUS);
+
+	/* power up all ip */
+	if (stat) {
+		pmu_write(POWER_UP, stat);
+		err = lima_pmu_wait_cmd(ip);
+		if (err)
+			return err;
+	}
+	return 0;
+}
+
+void lima_pmu_fini(struct lima_ip *ip)
+{
+
+}
diff --git a/drivers/gpu/drm/lima/lima_pmu.h b/drivers/gpu/drm/lima/lima_pmu.h
new file mode 100644
index 000000000000..1cf94a35bdf9
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_pmu.h
@@ -0,0 +1,12 @@ 
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2017-2018 Qiang Yu <yuq825@gmail.com> */
+
+#ifndef __LIMA_PMU_H__
+#define __LIMA_PMU_H__
+
+struct lima_ip;
+
+int lima_pmu_init(struct lima_ip *ip);
+void lima_pmu_fini(struct lima_ip *ip);
+
+#endif
diff --git a/drivers/gpu/drm/lima/lima_pp.c b/drivers/gpu/drm/lima/lima_pp.c
new file mode 100644
index 000000000000..f65c499d0005
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_pp.c
@@ -0,0 +1,419 @@ 
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright 2017-2018 Qiang Yu <yuq825@gmail.com> */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+
+#include <drm/lima_drm.h>
+
+#include "lima_device.h"
+#include "lima_pp.h"
+#include "lima_dlbu.h"
+#include "lima_bcast.h"
+#include "lima_vm.h"
+#include "lima_regs.h"
+
+#define pp_write(reg, data) writel(data, ip->iomem + LIMA_PP_##reg)
+#define pp_read(reg) readl(ip->iomem + LIMA_PP_##reg)
+
+static void lima_pp_handle_irq(struct lima_ip *ip, u32 state)
+{
+	struct lima_device *dev = ip->dev;
+	struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp;
+
+	if (state & LIMA_PP_IRQ_MASK_ERROR) {
+		u32 status = pp_read(STATUS);
+
+		dev_err(dev->dev, "pp error irq state=%x status=%x\n",
+			state, status);
+
+		pipe->error = true;
+
+		/* mask all interrupts before hard reset */
+		pp_write(INT_MASK, 0);
+	}
+
+	pp_write(INT_CLEAR, state);
+}
+
+static irqreturn_t lima_pp_irq_handler(int irq, void *data)
+{
+	struct lima_ip *ip = data;
+	struct lima_device *dev = ip->dev;
+	struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp;
+	u32 state = pp_read(INT_STATUS);
+
+	/* for shared irq case */
+	if (!state)
+		return IRQ_NONE;
+
+	lima_pp_handle_irq(ip, state);
+
+	if (atomic_dec_and_test(&pipe->task))
+		lima_sched_pipe_task_done(pipe);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t lima_pp_bcast_irq_handler(int irq, void *data)
+{
+	int i;
+	irqreturn_t ret = IRQ_NONE;
+	struct lima_ip *pp_bcast = data;
+	struct lima_device *dev = pp_bcast->dev;
+	struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp;
+	struct drm_lima_m450_pp_frame *frame = pipe->current_task->frame;
+
+	for (i = 0; i < frame->num_pp; i++) {
+		struct lima_ip *ip = pipe->processor[i];
+		u32 status, state;
+
+		if (pipe->done & (1 << i))
+			continue;
+
+		/* status read first in case int state change in the middle
+		 * which may miss the interrupt handling */
+		status = pp_read(STATUS);
+		state = pp_read(INT_STATUS);
+
+		if (state) {
+			lima_pp_handle_irq(ip, state);
+			ret = IRQ_HANDLED;
+		}
+		else {
+			if (status & LIMA_PP_STATUS_RENDERING_ACTIVE)
+				continue;
+		}
+
+		pipe->done |= (1 << i);
+		if (atomic_dec_and_test(&pipe->task))
+			lima_sched_pipe_task_done(pipe);
+	}
+
+	return ret;
+}
+
+static void lima_pp_soft_reset_async(struct lima_ip *ip)
+{
+	if (ip->data.async_reset)
+		return;
+
+	pp_write(INT_MASK, 0);
+	pp_write(INT_RAWSTAT, LIMA_PP_IRQ_MASK_ALL);
+	pp_write(CTRL, LIMA_PP_CTRL_SOFT_RESET);
+	ip->data.async_reset = true;
+}
+
+static int lima_pp_soft_reset_async_wait_one(struct lima_ip *ip)
+{
+	struct lima_device *dev = ip->dev;
+	int timeout;
+
+	for (timeout = 1000; timeout > 0; timeout--) {
+		if (!(pp_read(STATUS) & LIMA_PP_STATUS_RENDERING_ACTIVE) &&
+		    pp_read(INT_RAWSTAT) == LIMA_PP_IRQ_RESET_COMPLETED)
+			break;
+	}
+	if (!timeout) {
+		dev_err(dev->dev, "pp %s reset time out\n", lima_ip_name(ip));
+		return -ETIMEDOUT;
+	}
+
+	pp_write(INT_CLEAR, LIMA_PP_IRQ_MASK_ALL);
+	pp_write(INT_MASK, LIMA_PP_IRQ_MASK_USED);
+	return 0;
+}
+
+static int lima_pp_soft_reset_async_wait(struct lima_ip *ip)
+{
+	int i, err = 0;
+
+	if (!ip->data.async_reset)
+		return 0;
+
+	if (ip->id == lima_ip_pp_bcast) {
+		struct lima_device *dev = ip->dev;
+		struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp;
+		struct drm_lima_m450_pp_frame *frame = pipe->current_task->frame;
+
+		for (i = 0; i < frame->num_pp; i++)
+			err |= lima_pp_soft_reset_async_wait_one(pipe->processor[i]);
+	}
+	else
+		err = lima_pp_soft_reset_async_wait_one(ip);
+
+	ip->data.async_reset = false;
+	return err;
+}
+
+static void lima_pp_write_frame(struct lima_ip *ip, u32 *frame, u32 *wb)
+{
+	int i, j, n = 0;
+
+	for (i = 0; i < LIMA_PP_FRAME_REG_NUM; i++)
+		writel(frame[i], ip->iomem + LIMA_PP_FRAME + i * 4);
+
+	for (i = 0; i < 3; i++) {
+		for (j = 0; j < LIMA_PP_WB_REG_NUM; j++)
+			writel(wb[n++], ip->iomem + LIMA_PP_WB(i) + j * 4);
+	}
+}
+
+static int lima_pp_hard_reset(struct lima_ip *ip)
+{
+	struct lima_device *dev = ip->dev;
+	int timeout;
+
+	pp_write(PERF_CNT_0_LIMIT, 0xC0FFE000);
+	pp_write(INT_MASK, 0);
+	pp_write(CTRL, LIMA_PP_CTRL_FORCE_RESET);
+	for (timeout = 1000; timeout > 0; timeout--) {
+		pp_write(PERF_CNT_0_LIMIT, 0xC01A0000);
+		if (pp_read(PERF_CNT_0_LIMIT) == 0xC01A0000)
+			break;
+	}
+	if (!timeout) {
+		dev_err(dev->dev, "pp hard reset timeout\n");
+		return -ETIMEDOUT;
+	}
+
+	pp_write(PERF_CNT_0_LIMIT, 0);
+	pp_write(INT_CLEAR, LIMA_PP_IRQ_MASK_ALL);
+	pp_write(INT_MASK, LIMA_PP_IRQ_MASK_USED);
+	return 0;
+}
+
+static void lima_pp_print_version(struct lima_ip *ip)
+{
+	u32 version, major, minor;
+	char *name;
+
+	version = pp_read(VERSION);
+	major = (version >> 8) & 0xFF;
+	minor = version & 0xFF;
+	switch (version >> 16) {
+	case 0xC807:
+	    name = "mali200";
+		break;
+	case 0xCE07:
+		name = "mali300";
+		break;
+	case 0xCD07:
+		name = "mali400";
+		break;
+	case 0xCF07:
+		name = "mali450";
+		break;
+	default:
+		name = "unknow";
+		break;
+	}
+	dev_info(ip->dev->dev, "%s - %s version major %d minor %d\n",
+		 lima_ip_name(ip), name, major, minor);
+}
+
+int lima_pp_init(struct lima_ip *ip)
+{
+	struct lima_device *dev = ip->dev;
+	int err;
+
+	lima_pp_print_version(ip);
+
+	ip->data.async_reset = false;
+	lima_pp_soft_reset_async(ip);
+	err = lima_pp_soft_reset_async_wait(ip);
+	if (err)
+		return err;
+
+	err = devm_request_irq(dev->dev, ip->irq, lima_pp_irq_handler,
+			       IRQF_SHARED, lima_ip_name(ip), ip);
+	if (err) {
+		dev_err(dev->dev, "pp %s fail to request irq\n",
+			lima_ip_name(ip));
+		return err;
+	}
+
+	return 0;
+}
+
+void lima_pp_fini(struct lima_ip *ip)
+{
+	
+}
+
+int lima_pp_bcast_init(struct lima_ip *ip)
+{
+	struct lima_device *dev = ip->dev;
+	int err;
+
+	err = devm_request_irq(dev->dev, ip->irq, lima_pp_bcast_irq_handler,
+			       IRQF_SHARED, lima_ip_name(ip), ip);
+	if (err) {
+		dev_err(dev->dev, "pp %s fail to request irq\n",
+			lima_ip_name(ip));
+		return err;
+	}
+
+	return 0;
+}
+
+void lima_pp_bcast_fini(struct lima_ip *ip)
+{
+	
+}
+
+static int lima_pp_task_validate(struct lima_sched_pipe *pipe,
+				 struct lima_sched_task *task)
+{
+	u32 num_pp;
+
+	if (pipe->bcast_processor) {
+		struct drm_lima_m450_pp_frame *f = task->frame;
+	        num_pp = f->num_pp;
+	}
+	else {
+		struct drm_lima_m400_pp_frame *f = task->frame;
+		num_pp = f->num_pp;
+	}
+
+	if (num_pp == 0 || num_pp > pipe->num_processor)
+		return -EINVAL;
+
+	return 0;
+}
+
+static void lima_pp_task_run(struct lima_sched_pipe *pipe,
+			     struct lima_sched_task *task)
+{
+	if (pipe->bcast_processor) {
+		struct drm_lima_m450_pp_frame *frame = task->frame;
+		struct lima_device *dev = pipe->bcast_processor->dev;
+		struct lima_ip *ip = pipe->bcast_processor;
+		int i;
+
+		pipe->done = 0;
+		atomic_set(&pipe->task, frame->num_pp);
+
+		if (frame->use_dlbu) {
+			lima_dlbu_enable(dev, frame->num_pp);
+
+		        frame->frame[LIMA_PP_FRAME >> 2] = LIMA_VA_RESERVE_DLBU;
+			lima_dlbu_set_reg(dev->ip + lima_ip_dlbu, frame->dlbu_regs);
+		}
+		else
+			lima_dlbu_disable(dev);
+
+		lima_bcast_enable(dev, frame->num_pp);
+
+		lima_pp_soft_reset_async_wait(ip);
+
+	        lima_pp_write_frame(ip, frame->frame, frame->wb);
+
+		for (i = 0; i < frame->num_pp; i++) {
+			struct lima_ip *ip = pipe->processor[i];
+
+			pp_write(STACK, frame->fragment_stack_address[i]);
+			if (!frame->use_dlbu)
+				pp_write(FRAME, frame->plbu_array_address[i]);
+		}
+
+		pp_write(CTRL, LIMA_PP_CTRL_START_RENDERING);
+	}
+	else {
+		struct drm_lima_m400_pp_frame *frame = task->frame;
+		int i;
+
+		atomic_set(&pipe->task, frame->num_pp);
+
+		for (i = 0; i < frame->num_pp; i++) {
+			struct lima_ip *ip = pipe->processor[i];
+
+			frame->frame[LIMA_PP_FRAME >> 2] =
+				frame->plbu_array_address[i];
+			frame->frame[LIMA_PP_STACK >> 2] =
+				frame->fragment_stack_address[i];
+
+			lima_pp_soft_reset_async_wait(ip);
+
+		        lima_pp_write_frame(ip, frame->frame, frame->wb);
+
+			pp_write(CTRL, LIMA_PP_CTRL_START_RENDERING);
+		}
+	}
+}
+
+static void lima_pp_task_fini(struct lima_sched_pipe *pipe)
+{
+	if (pipe->bcast_processor)
+		lima_pp_soft_reset_async(pipe->bcast_processor);
+	else {
+		int i;
+		for (i = 0; i < pipe->num_processor; i++)
+			lima_pp_soft_reset_async(pipe->processor[i]);
+	}
+}
+
+static void lima_pp_task_error(struct lima_sched_pipe *pipe)
+{
+	int i;
+
+	for (i = 0; i < pipe->num_processor; i++) {
+		struct lima_ip *ip = pipe->processor[i];
+
+		dev_err(ip->dev->dev, "pp task error %d int_state=%x status=%x\n",
+			i, pp_read(INT_STATUS), pp_read(STATUS));
+
+		lima_pp_hard_reset(ip);
+	}
+}
+
+static void lima_pp_task_mmu_error(struct lima_sched_pipe *pipe)
+{
+	if (atomic_dec_and_test(&pipe->task))
+		lima_sched_pipe_task_done(pipe);
+}
+
+static struct kmem_cache *lima_pp_task_slab = NULL;
+static int lima_pp_task_slab_refcnt = 0;
+
+int lima_pp_pipe_init(struct lima_device *dev)
+{
+	int frame_size;
+	struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp;
+
+	if (dev->id == lima_gpu_mali400)
+		frame_size = sizeof(struct drm_lima_m400_pp_frame);
+	else
+		frame_size = sizeof(struct drm_lima_m450_pp_frame);
+
+	if (!lima_pp_task_slab) {
+		lima_pp_task_slab = kmem_cache_create_usercopy(
+			"lima_pp_task", sizeof(struct lima_sched_task) + frame_size,
+			0, SLAB_HWCACHE_ALIGN, sizeof(struct lima_sched_task),
+			frame_size, NULL);
+		if (!lima_pp_task_slab)
+			return -ENOMEM;
+	}
+	lima_pp_task_slab_refcnt++;
+
+	pipe->frame_size = frame_size;
+	pipe->task_slab = lima_pp_task_slab;
+
+	pipe->task_validate = lima_pp_task_validate;
+	pipe->task_run = lima_pp_task_run;
+	pipe->task_fini = lima_pp_task_fini;
+	pipe->task_error = lima_pp_task_error;
+	pipe->task_mmu_error = lima_pp_task_mmu_error;
+
+	return 0;
+}
+
+void lima_pp_pipe_fini(struct lima_device *dev)
+{
+	if (!--lima_pp_task_slab_refcnt) {
+		kmem_cache_destroy(lima_pp_task_slab);
+		lima_pp_task_slab = NULL;
+	}
+}
diff --git a/drivers/gpu/drm/lima/lima_pp.h b/drivers/gpu/drm/lima/lima_pp.h
new file mode 100644
index 000000000000..f83f8cb4d30a
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_pp.h
@@ -0,0 +1,19 @@ 
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2017-2018 Qiang Yu <yuq825@gmail.com> */
+
+#ifndef __LIMA_PP_H__
+#define __LIMA_PP_H__
+
+struct lima_ip;
+struct lima_device;
+
+int lima_pp_init(struct lima_ip *ip);
+void lima_pp_fini(struct lima_ip *ip);
+
+int lima_pp_bcast_init(struct lima_ip *ip);
+void lima_pp_bcast_fini(struct lima_ip *ip);
+
+int lima_pp_pipe_init(struct lima_device *dev);
+void lima_pp_pipe_fini(struct lima_device *dev);
+
+#endif
diff --git a/drivers/gpu/drm/lima/lima_regs.h b/drivers/gpu/drm/lima/lima_regs.h
new file mode 100644
index 000000000000..d5ade8fc8901
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_regs.h
@@ -0,0 +1,298 @@ 
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright 2010-2017 ARM Limited. All rights reserved.
+ * Copyright 2017-2018 Qiang Yu <yuq825@gmail.com>
+ */
+
+#ifndef __LIMA_REGS_H__
+#define __LIMA_REGS_H__
+
+/* This file's register definition is collected from the
+ * official ARM Mali Utgard GPU kernel driver source code
+ */
+
+/* PMU regs */
+#define LIMA_PMU_POWER_UP                  0x00
+#define LIMA_PMU_POWER_DOWN                0x04
+#define   LIMA_PMU_POWER_GP0_MASK          BIT(0)
+#define   LIMA_PMU_POWER_L2_MASK           BIT(1)
+#define   LIMA_PMU_POWER_PP_MASK(i)        BIT(2 + i)
+
+/*
+ * On Mali450 each block automatically starts up its corresponding L2
+ * and the PPs are not fully independent controllable.
+ * Instead PP0, PP1-3 and PP4-7 can be turned on or off.
+ */
+#define   LIMA450_PMU_POWER_PP0_MASK       BIT(1)
+#define   LIMA450_PMU_POWER_PP13_MASK      BIT(2)
+#define   LIMA450_PMU_POWER_PP47_MASK      BIT(3)
+
+#define LIMA_PMU_STATUS                    0x08
+#define LIMA_PMU_INT_MASK                  0x0C
+#define LIMA_PMU_INT_RAWSTAT               0x10
+#define LIMA_PMU_INT_CLEAR                 0x18
+#define   LIMA_PMU_INT_CMD_MASK            BIT(0)
+#define LIMA_PMU_SW_DELAY                  0x1C
+
+/* L2 cache regs */
+#define LIMA_L2_CACHE_SIZE                   0x0004
+#define LIMA_L2_CACHE_STATUS                 0x0008
+#define   LIMA_L2_CACHE_STATUS_COMMAND_BUSY  BIT(0)
+#define   LIMA_L2_CACHE_STATUS_DATA_BUSY     BIT(1)
+#define LIMA_L2_CACHE_COMMAND                0x0010
+#define   LIMA_L2_CACHE_COMMAND_CLEAR_ALL    BIT(0)
+#define LIMA_L2_CACHE_CLEAR_PAGE             0x0014
+#define LIMA_L2_CACHE_MAX_READS              0x0018
+#define LIMA_L2_CACHE_ENABLE                 0x001C
+#define   LIMA_L2_CACHE_ENABLE_ACCESS        BIT(0)
+#define   LIMA_L2_CACHE_ENABLE_READ_ALLOCATE BIT(1)
+#define LIMA_L2_CACHE_PERFCNT_SRC0           0x0020
+#define LIMA_L2_CACHE_PERFCNT_VAL0           0x0024
+#define LIMA_L2_CACHE_PERFCNT_SRC1           0x0028
+#define LIMA_L2_CACHE_ERFCNT_VAL1            0x002C
+
+/* GP regs */
+#define LIMA_GP_VSCL_START_ADDR                0x00
+#define LIMA_GP_VSCL_END_ADDR                  0x04
+#define LIMA_GP_PLBUCL_START_ADDR              0x08
+#define LIMA_GP_PLBUCL_END_ADDR                0x0c
+#define LIMA_GP_PLBU_ALLOC_START_ADDR          0x10
+#define LIMA_GP_PLBU_ALLOC_END_ADDR            0x14
+#define LIMA_GP_CMD                            0x20
+#define   LIMA_GP_CMD_START_VS                 BIT(0)
+#define   LIMA_GP_CMD_START_PLBU               BIT(1)
+#define   LIMA_GP_CMD_UPDATE_PLBU_ALLOC        BIT(4)
+#define   LIMA_GP_CMD_RESET                    BIT(5)
+#define   LIMA_GP_CMD_FORCE_HANG               BIT(6)
+#define   LIMA_GP_CMD_STOP_BUS                 BIT(9)
+#define   LIMA_GP_CMD_SOFT_RESET               BIT(10)
+#define LIMA_GP_INT_RAWSTAT                    0x24
+#define LIMA_GP_INT_CLEAR                      0x28
+#define LIMA_GP_INT_MASK                       0x2C
+#define LIMA_GP_INT_STAT                       0x30
+#define   LIMA_GP_IRQ_VS_END_CMD_LST           BIT(0)
+#define   LIMA_GP_IRQ_PLBU_END_CMD_LST         BIT(1)
+#define   LIMA_GP_IRQ_PLBU_OUT_OF_MEM          BIT(2)
+#define   LIMA_GP_IRQ_VS_SEM_IRQ               BIT(3)
+#define   LIMA_GP_IRQ_PLBU_SEM_IRQ             BIT(4)
+#define   LIMA_GP_IRQ_HANG                     BIT(5)
+#define   LIMA_GP_IRQ_FORCE_HANG               BIT(6)
+#define   LIMA_GP_IRQ_PERF_CNT_0_LIMIT         BIT(7)
+#define   LIMA_GP_IRQ_PERF_CNT_1_LIMIT         BIT(8)
+#define   LIMA_GP_IRQ_WRITE_BOUND_ERR          BIT(9)
+#define   LIMA_GP_IRQ_SYNC_ERROR               BIT(10)
+#define   LIMA_GP_IRQ_AXI_BUS_ERROR            BIT(11)
+#define   LIMA_GP_IRQ_AXI_BUS_STOPPED          BIT(12)
+#define   LIMA_GP_IRQ_VS_INVALID_CMD           BIT(13)
+#define   LIMA_GP_IRQ_PLB_INVALID_CMD          BIT(14)
+#define   LIMA_GP_IRQ_RESET_COMPLETED          BIT(19)
+#define   LIMA_GP_IRQ_SEMAPHORE_UNDERFLOW      BIT(20)
+#define   LIMA_GP_IRQ_SEMAPHORE_OVERFLOW       BIT(21)
+#define   LIMA_GP_IRQ_PTR_ARRAY_OUT_OF_BOUNDS  BIT(22)
+#define LIMA_GP_WRITE_BOUND_LOW                0x34
+#define LIMA_GP_PERF_CNT_0_ENABLE              0x3C
+#define LIMA_GP_PERF_CNT_1_ENABLE              0x40
+#define LIMA_GP_PERF_CNT_0_SRC                 0x44
+#define LIMA_GP_PERF_CNT_1_SRC                 0x48
+#define LIMA_GP_PERF_CNT_0_VALUE               0x4C
+#define LIMA_GP_PERF_CNT_1_VALUE               0x50
+#define LIMA_GP_PERF_CNT_0_LIMIT               0x54
+#define LIMA_GP_STATUS                         0x68
+#define   LIMA_GP_STATUS_VS_ACTIVE             BIT(1)
+#define   LIMA_GP_STATUS_BUS_STOPPED           BIT(2)
+#define   LIMA_GP_STATUS_PLBU_ACTIVE           BIT(3)
+#define   LIMA_GP_STATUS_BUS_ERROR             BIT(6)
+#define   LIMA_GP_STATUS_WRITE_BOUND_ERR       BIT(8)
+#define LIMA_GP_VERSION                        0x6C
+#define LIMA_GP_VSCL_START_ADDR_READ           0x80
+#define LIMA_GP_PLBCL_START_ADDR_READ          0x84
+#define LIMA_GP_CONTR_AXI_BUS_ERROR_STAT       0x94
+
+#define LIMA_GP_IRQ_MASK_ALL		   \
+	(				   \
+	 LIMA_GP_IRQ_VS_END_CMD_LST      | \
+	 LIMA_GP_IRQ_PLBU_END_CMD_LST    | \
+	 LIMA_GP_IRQ_PLBU_OUT_OF_MEM     | \
+	 LIMA_GP_IRQ_VS_SEM_IRQ          | \
+	 LIMA_GP_IRQ_PLBU_SEM_IRQ        | \
+	 LIMA_GP_IRQ_HANG                | \
+	 LIMA_GP_IRQ_FORCE_HANG          | \
+	 LIMA_GP_IRQ_PERF_CNT_0_LIMIT    | \
+	 LIMA_GP_IRQ_PERF_CNT_1_LIMIT    | \
+	 LIMA_GP_IRQ_WRITE_BOUND_ERR     | \
+	 LIMA_GP_IRQ_SYNC_ERROR          | \
+	 LIMA_GP_IRQ_AXI_BUS_ERROR       | \
+	 LIMA_GP_IRQ_AXI_BUS_STOPPED     | \
+	 LIMA_GP_IRQ_VS_INVALID_CMD      | \
+	 LIMA_GP_IRQ_PLB_INVALID_CMD     | \
+	 LIMA_GP_IRQ_RESET_COMPLETED     | \
+	 LIMA_GP_IRQ_SEMAPHORE_UNDERFLOW | \
+	 LIMA_GP_IRQ_SEMAPHORE_OVERFLOW  | \
+	 LIMA_GP_IRQ_PTR_ARRAY_OUT_OF_BOUNDS)
+
+#define LIMA_GP_IRQ_MASK_ERROR             \
+	(                                  \
+	 LIMA_GP_IRQ_PLBU_OUT_OF_MEM     | \
+	 LIMA_GP_IRQ_FORCE_HANG          | \
+	 LIMA_GP_IRQ_WRITE_BOUND_ERR     | \
+	 LIMA_GP_IRQ_SYNC_ERROR          | \
+	 LIMA_GP_IRQ_AXI_BUS_ERROR       | \
+	 LIMA_GP_IRQ_VS_INVALID_CMD      | \
+	 LIMA_GP_IRQ_PLB_INVALID_CMD     | \
+	 LIMA_GP_IRQ_SEMAPHORE_UNDERFLOW | \
+	 LIMA_GP_IRQ_SEMAPHORE_OVERFLOW  | \
+	 LIMA_GP_IRQ_PTR_ARRAY_OUT_OF_BOUNDS)
+
+#define LIMA_GP_IRQ_MASK_USED		   \
+	(				   \
+	 LIMA_GP_IRQ_VS_END_CMD_LST      | \
+	 LIMA_GP_IRQ_PLBU_END_CMD_LST    | \
+	 LIMA_GP_IRQ_MASK_ERROR)
+
+/* PP regs */
+#define LIMA_PP_FRAME                        0x0000
+#define LIMA_PP_RSW			     0x0004
+#define LIMA_PP_STACK			     0x0030
+#define LIMA_PP_STACK_SIZE		     0x0034
+#define LIMA_PP_ORIGIN_OFFSET_X	             0x0040
+#define LIMA_PP_WB(i) 			     (0x0100 * (i + 1))
+#define   LIMA_PP_WB_SOURCE_SELECT           0x0000
+#define	  LIMA_PP_WB_SOURCE_ADDR             0x0004
+
+#define LIMA_PP_VERSION                      0x1000
+#define LIMA_PP_CURRENT_REND_LIST_ADDR       0x1004
+#define LIMA_PP_STATUS                       0x1008
+#define   LIMA_PP_STATUS_RENDERING_ACTIVE    BIT(0)
+#define   LIMA_PP_STATUS_BUS_STOPPED         BIT(4)
+#define LIMA_PP_CTRL                         0x100c
+#define   LIMA_PP_CTRL_STOP_BUS              BIT(0)
+#define   LIMA_PP_CTRL_FLUSH_CACHES          BIT(3)
+#define   LIMA_PP_CTRL_FORCE_RESET           BIT(5)
+#define   LIMA_PP_CTRL_START_RENDERING       BIT(6)
+#define   LIMA_PP_CTRL_SOFT_RESET            BIT(7)
+#define LIMA_PP_INT_RAWSTAT                  0x1020
+#define LIMA_PP_INT_CLEAR                    0x1024
+#define LIMA_PP_INT_MASK                     0x1028
+#define LIMA_PP_INT_STATUS                   0x102c
+#define   LIMA_PP_IRQ_END_OF_FRAME           BIT(0)
+#define   LIMA_PP_IRQ_END_OF_TILE            BIT(1)
+#define   LIMA_PP_IRQ_HANG                   BIT(2)
+#define   LIMA_PP_IRQ_FORCE_HANG             BIT(3)
+#define   LIMA_PP_IRQ_BUS_ERROR              BIT(4)
+#define   LIMA_PP_IRQ_BUS_STOP               BIT(5)
+#define   LIMA_PP_IRQ_CNT_0_LIMIT            BIT(6)
+#define   LIMA_PP_IRQ_CNT_1_LIMIT            BIT(7)
+#define   LIMA_PP_IRQ_WRITE_BOUNDARY_ERROR   BIT(8)
+#define   LIMA_PP_IRQ_INVALID_PLIST_COMMAND  BIT(9)
+#define   LIMA_PP_IRQ_CALL_STACK_UNDERFLOW   BIT(10)
+#define   LIMA_PP_IRQ_CALL_STACK_OVERFLOW    BIT(11)
+#define   LIMA_PP_IRQ_RESET_COMPLETED        BIT(12)
+#define LIMA_PP_WRITE_BOUNDARY_LOW           0x1044
+#define LIMA_PP_BUS_ERROR_STATUS             0x1050
+#define LIMA_PP_PERF_CNT_0_ENABLE            0x1080
+#define LIMA_PP_PERF_CNT_0_SRC               0x1084
+#define LIMA_PP_PERF_CNT_0_LIMIT             0x1088
+#define LIMA_PP_PERF_CNT_0_VALUE             0x108c
+#define LIMA_PP_PERF_CNT_1_ENABLE            0x10a0
+#define LIMA_PP_PERF_CNT_1_SRC               0x10a4
+#define LIMA_PP_PERF_CNT_1_LIMIT             0x10a8
+#define LIMA_PP_PERF_CNT_1_VALUE             0x10ac
+#define LIMA_PP_PERFMON_CONTR                0x10b0
+#define LIMA_PP_PERFMON_BASE                 0x10b4
+
+#define LIMA_PP_IRQ_MASK_ALL                 \
+	(                                    \
+	 LIMA_PP_IRQ_END_OF_FRAME          | \
+	 LIMA_PP_IRQ_END_OF_TILE           | \
+	 LIMA_PP_IRQ_HANG                  | \
+	 LIMA_PP_IRQ_FORCE_HANG            | \
+	 LIMA_PP_IRQ_BUS_ERROR             | \
+	 LIMA_PP_IRQ_BUS_STOP              | \
+	 LIMA_PP_IRQ_CNT_0_LIMIT           | \
+	 LIMA_PP_IRQ_CNT_1_LIMIT           | \
+	 LIMA_PP_IRQ_WRITE_BOUNDARY_ERROR  | \
+	 LIMA_PP_IRQ_INVALID_PLIST_COMMAND | \
+	 LIMA_PP_IRQ_CALL_STACK_UNDERFLOW  | \
+	 LIMA_PP_IRQ_CALL_STACK_OVERFLOW   | \
+	 LIMA_PP_IRQ_RESET_COMPLETED)
+
+#define LIMA_PP_IRQ_MASK_ERROR               \
+	(                                    \
+	 LIMA_PP_IRQ_FORCE_HANG            | \
+	 LIMA_PP_IRQ_BUS_ERROR             | \
+	 LIMA_PP_IRQ_WRITE_BOUNDARY_ERROR  | \
+	 LIMA_PP_IRQ_INVALID_PLIST_COMMAND | \
+	 LIMA_PP_IRQ_CALL_STACK_UNDERFLOW  | \
+	 LIMA_PP_IRQ_CALL_STACK_OVERFLOW)
+
+#define LIMA_PP_IRQ_MASK_USED                \
+	(                                    \
+	 LIMA_PP_IRQ_END_OF_FRAME          | \
+	 LIMA_PP_IRQ_MASK_ERROR)
+
+/* MMU regs */
+#define LIMA_MMU_DTE_ADDR                     0x0000
+#define LIMA_MMU_STATUS                       0x0004
+#define   LIMA_MMU_STATUS_PAGING_ENABLED      BIT(0)
+#define   LIMA_MMU_STATUS_PAGE_FAULT_ACTIVE   BIT(1)
+#define   LIMA_MMU_STATUS_STALL_ACTIVE        BIT(2)
+#define   LIMA_MMU_STATUS_IDLE                BIT(3)
+#define   LIMA_MMU_STATUS_REPLAY_BUFFER_EMPTY BIT(4)
+#define   LIMA_MMU_STATUS_PAGE_FAULT_IS_WRITE BIT(5)
+#define   LIMA_MMU_STATUS_BUS_ID(x)           ((x >> 6) & 0x1F)
+#define LIMA_MMU_COMMAND                      0x0008
+#define   LIMA_MMU_COMMAND_ENABLE_PAGING      0x00
+#define   LIMA_MMU_COMMAND_DISABLE_PAGING     0x01
+#define   LIMA_MMU_COMMAND_ENABLE_STALL       0x02
+#define   LIMA_MMU_COMMAND_DISABLE_STALL      0x03
+#define   LIMA_MMU_COMMAND_ZAP_CACHE          0x04
+#define   LIMA_MMU_COMMAND_PAGE_FAULT_DONE    0x05
+#define   LIMA_MMU_COMMAND_HARD_RESET         0x06
+#define LIMA_MMU_PAGE_FAULT_ADDR              0x000C
+#define LIMA_MMU_ZAP_ONE_LINE                 0x0010
+#define LIMA_MMU_INT_RAWSTAT                  0x0014
+#define LIMA_MMU_INT_CLEAR                    0x0018
+#define LIMA_MMU_INT_MASK                     0x001C
+#define   LIMA_MMU_INT_PAGE_FAULT             BIT(0)
+#define   LIMA_MMU_INT_READ_BUS_ERROR         BIT(1)
+#define LIMA_MMU_INT_STATUS                   0x0020
+
+#define LIMA_VM_FLAG_PRESENT          BIT(0)
+#define LIMA_VM_FLAG_READ_PERMISSION  BIT(1)
+#define LIMA_VM_FLAG_WRITE_PERMISSION BIT(2)
+#define LIMA_VM_FLAG_OVERRIDE_CACHE   BIT(3)
+#define LIMA_VM_FLAG_WRITE_CACHEABLE  BIT(4)
+#define LIMA_VM_FLAG_WRITE_ALLOCATE   BIT(5)
+#define LIMA_VM_FLAG_WRITE_BUFFERABLE BIT(6)
+#define LIMA_VM_FLAG_READ_CACHEABLE   BIT(7)
+#define LIMA_VM_FLAG_READ_ALLOCATE    BIT(8)
+#define LIMA_VM_FLAG_MASK             0x1FF
+
+#define LIMA_VM_FLAGS_CACHE (			 \
+		LIMA_VM_FLAG_PRESENT |		 \
+		LIMA_VM_FLAG_READ_PERMISSION |	 \
+		LIMA_VM_FLAG_WRITE_PERMISSION |	 \
+		LIMA_VM_FLAG_OVERRIDE_CACHE |	 \
+		LIMA_VM_FLAG_WRITE_CACHEABLE |	 \
+		LIMA_VM_FLAG_WRITE_BUFFERABLE |	 \
+		LIMA_VM_FLAG_READ_CACHEABLE |	 \
+		LIMA_VM_FLAG_READ_ALLOCATE )
+
+#define LIMA_VM_FLAGS_UNCACHE (			\
+		LIMA_VM_FLAG_PRESENT |		\
+		LIMA_VM_FLAG_READ_PERMISSION |	\
+		LIMA_VM_FLAG_WRITE_PERMISSION )
+
+/* DLBU regs */
+#define LIMA_DLBU_MASTER_TLLIST_PHYS_ADDR  0x0000
+#define	LIMA_DLBU_MASTER_TLLIST_VADDR      0x0004
+#define	LIMA_DLBU_TLLIST_VBASEADDR         0x0008
+#define	LIMA_DLBU_FB_DIM                   0x000C
+#define	LIMA_DLBU_TLLIST_CONF              0x0010
+#define	LIMA_DLBU_START_TILE_POS           0x0014
+#define	LIMA_DLBU_PP_ENABLE_MASK           0x0018
+
+/* BCAST regs */
+#define LIMA_BCAST_BROADCAST_MASK    0x0
+#define LIMA_BCAST_INTERRUPT_MASK    0x4
+
+#endif
diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c
new file mode 100644
index 000000000000..0c6b7c3b0cd4
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_sched.c
@@ -0,0 +1,486 @@ 
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright 2017-2018 Qiang Yu <yuq825@gmail.com> */
+
+#include <linux/kthread.h>
+#include <linux/slab.h>
+
+#include "lima_drv.h"
+#include "lima_sched.h"
+#include "lima_vm.h"
+#include "lima_mmu.h"
+#include "lima_l2_cache.h"
+
+struct lima_fence {
+	struct dma_fence base;
+	struct lima_sched_pipe *pipe;
+};
+
+static struct kmem_cache *lima_fence_slab = NULL;
+
+int lima_sched_slab_init(void)
+{
+	lima_fence_slab = kmem_cache_create(
+		"lima_fence", sizeof(struct lima_fence), 0,
+		SLAB_HWCACHE_ALIGN, NULL);
+	if (!lima_fence_slab)
+		return -ENOMEM;
+
+	return 0;
+}
+
+void lima_sched_slab_fini(void)
+{
+	if (lima_fence_slab)
+		kmem_cache_destroy(lima_fence_slab);
+}
+
+static inline struct lima_fence *to_lima_fence(struct dma_fence *fence)
+{
+	return container_of(fence, struct lima_fence, base);
+}
+
+static const char *lima_fence_get_driver_name(struct dma_fence *fence)
+{
+	return "lima";
+}
+
+static const char *lima_fence_get_timeline_name(struct dma_fence *fence)
+{
+	struct lima_fence *f = to_lima_fence(fence);
+
+	return f->pipe->base.name;
+}
+
+static bool lima_fence_enable_signaling(struct dma_fence *fence)
+{
+	return true;
+}
+
+static void lima_fence_release_rcu(struct rcu_head *rcu)
+{
+	struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
+	struct lima_fence *fence = to_lima_fence(f);
+
+	kmem_cache_free(lima_fence_slab, fence);
+}
+
+static void lima_fence_release(struct dma_fence *fence)
+{
+	struct lima_fence *f = to_lima_fence(fence);
+
+	call_rcu(&f->base.rcu, lima_fence_release_rcu);
+}
+
+static const struct dma_fence_ops lima_fence_ops = {
+	.get_driver_name = lima_fence_get_driver_name,
+	.get_timeline_name = lima_fence_get_timeline_name,
+	.enable_signaling = lima_fence_enable_signaling,
+	.wait = dma_fence_default_wait,
+	.release = lima_fence_release,
+};
+
+static struct lima_fence *lima_fence_create(struct lima_sched_pipe *pipe)
+{
+	struct lima_fence *fence;
+
+	fence = kmem_cache_zalloc(lima_fence_slab, GFP_KERNEL);
+	if (!fence)
+	       return NULL;
+
+	fence->pipe = pipe;
+	dma_fence_init(&fence->base, &lima_fence_ops, &pipe->fence_lock,
+		       pipe->fence_context, ++pipe->fence_seqno);
+
+	return fence;
+}
+
+static inline struct lima_sched_task *to_lima_task(struct drm_sched_job *job)
+{
+	return container_of(job, struct lima_sched_task, base);
+}
+
+static inline struct lima_sched_pipe *to_lima_pipe(struct drm_gpu_scheduler *sched)
+{
+	return container_of(sched, struct lima_sched_pipe, base);
+}
+
+int lima_sched_task_init(struct lima_sched_task *task,
+			 struct lima_sched_context *context,
+			 struct lima_vm *vm)
+{
+	int err;
+
+	err = drm_sched_job_init(&task->base, &context->base, vm);
+	if (err)
+		return err;
+
+	task->vm = lima_vm_get(vm);
+	return 0;
+}
+
+void lima_sched_task_fini(struct lima_sched_task *task)
+{
+	dma_fence_put(&task->base.s_fence->finished);
+}
+
+int lima_sched_task_add_dep(struct lima_sched_task *task, struct dma_fence *fence)
+{
+	int i, new_dep = 4;
+
+	/* same context's fence is definitly earlier then this task */
+	if (fence->context == task->base.s_fence->finished.context) {
+		dma_fence_put(fence);
+		return 0;
+	}
+
+	if (task->dep && task->num_dep == task->max_dep)
+		new_dep = task->max_dep * 2;
+
+	if (task->max_dep < new_dep) {
+		void *dep = krealloc(task->dep, sizeof(*task->dep) * new_dep, GFP_KERNEL);
+		if (!dep)
+			return -ENOMEM;
+		task->max_dep = new_dep;
+		task->dep = dep;
+	}
+
+	for (i = 0; i < task->num_dep; i++) {
+		if (task->dep[i]->context == fence->context &&
+		    dma_fence_is_later(fence, task->dep[i])) {
+			dma_fence_put(task->dep[i]);
+			task->dep[i] = fence;
+			return 0;
+		}
+	}
+
+	task->dep[task->num_dep++] = fence;
+	return 0;
+}
+
+int lima_sched_context_init(struct lima_sched_pipe *pipe,
+			    struct lima_sched_context *context,
+			    atomic_t *guilty)
+{
+	struct drm_sched_rq *rq = pipe->base.sched_rq + DRM_SCHED_PRIORITY_NORMAL;
+	int err;
+
+	context->fences =
+		kzalloc(sizeof(*context->fences) * lima_sched_max_tasks, GFP_KERNEL);
+	if (!context->fences)
+		return -ENOMEM;
+
+	mutex_init(&context->lock);
+	err = drm_sched_entity_init(&context->base, &rq, 1, guilty);
+	if (err) {
+		kfree(context->fences);
+		context->fences = NULL;
+		return err;
+	}
+
+	return 0;
+}
+
+void lima_sched_context_fini(struct lima_sched_pipe *pipe,
+			     struct lima_sched_context *context)
+{
+	drm_sched_entity_fini(&context->base);
+
+	mutex_destroy(&context->lock);
+
+	if (context->fences) {
+		int i;
+		for (i = 0; i < lima_sched_max_tasks; i++)
+			dma_fence_put(context->fences[i]);
+		kfree(context->fences);
+	}
+}
+
+static uint32_t lima_sched_context_add_fence(struct lima_sched_context *context,
+					     struct dma_fence *fence,
+					     uint32_t *done)
+{
+	uint32_t seq, idx, i;
+	struct dma_fence *other;
+
+	mutex_lock(&context->lock);
+
+	seq = context->sequence;
+	idx = seq & (lima_sched_max_tasks - 1);
+	other = context->fences[idx];
+
+	if (other) {
+		int err = dma_fence_wait(other, false);
+		if (err)
+			DRM_ERROR("Error %d waiting context fence\n", err);
+	}
+
+	context->fences[idx] = dma_fence_get(fence);
+	context->sequence++;
+
+	/* get finished fence offset from seq */
+	for (i = 1; i < lima_sched_max_tasks; i++) {
+		idx = (seq - i) & (lima_sched_max_tasks - 1);
+		if (!context->fences[idx] ||
+		    dma_fence_is_signaled(context->fences[idx]))
+			break;
+	}
+
+	mutex_unlock(&context->lock);
+
+	dma_fence_put(other);
+
+	*done = i;
+	return seq;
+}
+
+struct dma_fence *lima_sched_context_get_fence(
+	struct lima_sched_context *context, uint32_t seq)
+{
+	struct dma_fence *fence;
+	int idx;
+	uint32_t max, min;
+
+	mutex_lock(&context->lock);
+
+	max = context->sequence - 1;
+	min = context->sequence - lima_sched_max_tasks;
+
+	/* handle overflow case */
+	if ((min < max && (seq < min || seq > max)) ||
+	    (min > max && (seq < min && seq > max))) {
+		    fence = NULL;
+		    goto out;
+	}
+
+	idx = seq & (lima_sched_max_tasks - 1);
+	fence = dma_fence_get(context->fences[idx]);
+
+out:
+	mutex_unlock(&context->lock);
+
+	return fence;
+}
+
+uint32_t lima_sched_context_queue_task(struct lima_sched_context *context,
+				       struct lima_sched_task *task,
+				       uint32_t *done)
+{
+	uint32_t seq = lima_sched_context_add_fence(
+		context, &task->base.s_fence->finished, done);
+	drm_sched_entity_push_job(&task->base, &context->base);
+	return seq;
+}
+
+static struct dma_fence *lima_sched_dependency(struct drm_sched_job *job,
+					       struct drm_sched_entity *entity)
+{
+	struct lima_sched_task *task = to_lima_task(job);
+	int i;
+
+	for (i = 0; i < task->num_dep; i++) {
+		struct dma_fence *fence = task->dep[i];
+
+		if (!task->dep[i])
+			continue;
+
+		task->dep[i] = NULL;
+
+		if (!dma_fence_is_signaled(fence))
+			return fence;
+
+		dma_fence_put(fence);
+	}
+
+	return NULL;
+}
+
+static struct dma_fence *lima_sched_run_job(struct drm_sched_job *job)
+{
+	struct lima_sched_task *task = to_lima_task(job);
+	struct lima_sched_pipe *pipe = to_lima_pipe(job->sched);
+	struct lima_fence *fence;
+	struct dma_fence *ret;
+	struct lima_vm *vm = NULL, *last_vm = NULL;
+	int i;
+
+	/* after GPU reset */
+	if (job->s_fence->finished.error < 0)
+		return NULL;
+
+	fence = lima_fence_create(pipe);
+	if (!fence)
+		return NULL;
+	task->fence = &fence->base;
+
+	/* for caller usage of the fence, otherwise irq handler 
+	 * may consume the fence before caller use it */
+	ret = dma_fence_get(task->fence);
+
+	pipe->current_task = task;
+
+	/* this is needed for MMU to work correctly, otherwise GP/PP
+	 * will hang or page fault for unknown reason after running for
+	 * a while.
+	 *
+	 * Need to investigate:
+	 * 1. is it related to TLB
+	 * 2. how much performance will be affected by L2 cache flush
+	 * 3. can we reduce the calling of this function because all
+	 *    GP/PP use the same L2 cache on mali400
+	 *
+	 * TODO:
+	 * 1. move this to task fini to save some wait time?
+	 * 2. when GP/PP use different l2 cache, need PP wait GP l2
+	 *    cache flush?
+	 */
+	for (i = 0; i < pipe->num_l2_cache; i++)
+		lima_l2_cache_flush(pipe->l2_cache[i]);
+
+	if (task->vm != pipe->current_vm) {
+		vm = lima_vm_get(task->vm);
+		last_vm = pipe->current_vm;
+		pipe->current_vm = task->vm;
+	}
+
+	if (pipe->bcast_mmu)
+		lima_mmu_switch_vm(pipe->bcast_mmu, vm);
+	else {
+		for (i = 0; i < pipe->num_mmu; i++)
+			lima_mmu_switch_vm(pipe->mmu[i], vm);
+	}
+
+	if (last_vm)
+		lima_vm_put(last_vm);
+
+	pipe->error = false;
+	pipe->task_run(pipe, task);
+
+	return task->fence;
+}
+
+static void lima_sched_handle_error_task(struct lima_sched_pipe *pipe,
+					 struct lima_sched_task *task)
+{
+	kthread_park(pipe->base.thread);
+	drm_sched_hw_job_reset(&pipe->base, &task->base);
+
+	pipe->task_error(pipe);
+
+	if (pipe->bcast_mmu)
+		lima_mmu_page_fault_resume(pipe->bcast_mmu);
+	else {
+		int i;
+		for (i = 0; i < pipe->num_mmu; i++)
+			lima_mmu_page_fault_resume(pipe->mmu[i]);
+	}
+
+	if (pipe->current_vm)
+		lima_vm_put(pipe->current_vm);
+
+	pipe->current_vm = NULL;
+	pipe->current_task = NULL;
+
+	drm_sched_job_recovery(&pipe->base);
+	kthread_unpark(pipe->base.thread);
+}
+
+static void lima_sched_timedout_job(struct drm_sched_job *job)
+{
+	struct lima_sched_pipe *pipe = to_lima_pipe(job->sched);
+	struct lima_sched_task *task = to_lima_task(job);
+
+	DRM_ERROR("lima job timeout\n");
+
+	lima_sched_handle_error_task(pipe, task);
+}
+
+static void lima_sched_free_job(struct drm_sched_job *job)
+{
+	struct lima_sched_task *task = to_lima_task(job);
+	struct lima_sched_pipe *pipe = to_lima_pipe(job->sched);
+	int i;
+
+	dma_fence_put(task->fence);
+
+	for (i = 0; i < task->num_dep; i++) {
+		if (task->dep[i])
+			dma_fence_put(task->dep[i]);
+	}
+
+	if (task->dep)
+		kfree(task->dep);
+
+	lima_vm_put(task->vm);
+	kmem_cache_free(pipe->task_slab, task);
+}
+
+const struct drm_sched_backend_ops lima_sched_ops = {
+	.dependency = lima_sched_dependency,
+	.run_job = lima_sched_run_job,
+	.timedout_job = lima_sched_timedout_job,
+	.free_job = lima_sched_free_job,
+};
+
+static void lima_sched_error_work(struct work_struct *work)
+{
+	struct lima_sched_pipe *pipe =
+		container_of(work, struct lima_sched_pipe, error_work);
+	struct lima_sched_task *task = pipe->current_task;
+
+	lima_sched_handle_error_task(pipe, task);
+}
+
+int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name)
+{
+	long timeout;
+
+	if (lima_sched_timeout_ms <= 0)
+		timeout = MAX_SCHEDULE_TIMEOUT;
+	else
+		timeout = msecs_to_jiffies(lima_sched_timeout_ms);
+
+	pipe->fence_context = dma_fence_context_alloc(1);
+	spin_lock_init(&pipe->fence_lock);
+
+	INIT_WORK(&pipe->error_work, lima_sched_error_work);
+
+	return drm_sched_init(&pipe->base, &lima_sched_ops, 1, 0, timeout, name);
+}
+
+void lima_sched_pipe_fini(struct lima_sched_pipe *pipe)
+{
+	drm_sched_fini(&pipe->base);
+}
+
+unsigned long lima_timeout_to_jiffies(u64 timeout_ns)
+{
+	unsigned long timeout_jiffies;
+	ktime_t timeout;
+
+	/* clamp timeout if it's to large */
+	if (((s64)timeout_ns) < 0)
+		return MAX_SCHEDULE_TIMEOUT;
+
+	timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get());
+	if (ktime_to_ns(timeout) < 0)
+		return 0;
+
+	timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout));
+	/*  clamp timeout to avoid unsigned-> signed overflow */
+	if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT )
+		return MAX_SCHEDULE_TIMEOUT;
+
+	return timeout_jiffies;
+}
+
+void lima_sched_pipe_task_done(struct lima_sched_pipe *pipe)
+{
+	if (pipe->error)
+	        schedule_work(&pipe->error_work);
+	else {
+		struct lima_sched_task *task = pipe->current_task;
+
+		pipe->task_fini(pipe);
+		dma_fence_signal(task->fence);
+	}
+}
diff --git a/drivers/gpu/drm/lima/lima_sched.h b/drivers/gpu/drm/lima/lima_sched.h
new file mode 100644
index 000000000000..e1393767128f
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_sched.h
@@ -0,0 +1,108 @@ 
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2017-2018 Qiang Yu <yuq825@gmail.com> */
+
+#ifndef __LIMA_SCHED_H__
+#define __LIMA_SCHED_H__
+
+#include <drm/gpu_scheduler.h>
+
+struct lima_vm;
+
+struct lima_sched_task {
+	struct drm_sched_job base;
+
+	struct lima_vm *vm;
+	void *frame;
+
+	struct dma_fence **dep;
+	int num_dep;
+	int max_dep;
+
+	/* pipe fence */
+	struct dma_fence *fence;
+};
+
+struct lima_sched_context {
+	struct drm_sched_entity base;
+	struct mutex lock;
+	struct dma_fence **fences;
+	uint32_t sequence;
+};
+
+#define LIMA_SCHED_PIPE_MAX_MMU       8
+#define LIMA_SCHED_PIPE_MAX_L2_CACHE  2
+#define LIMA_SCHED_PIPE_MAX_PROCESSOR 8
+
+struct lima_ip;
+
+struct lima_sched_pipe {
+	struct drm_gpu_scheduler base;
+
+	u64 fence_context;
+	u32 fence_seqno;
+	spinlock_t fence_lock;
+
+	struct lima_sched_task *current_task;
+	struct lima_vm *current_vm;
+
+	struct lima_ip *mmu[LIMA_SCHED_PIPE_MAX_MMU];
+	int num_mmu;
+
+	struct lima_ip *l2_cache[LIMA_SCHED_PIPE_MAX_L2_CACHE];
+	int num_l2_cache;
+
+	struct lima_ip *processor[LIMA_SCHED_PIPE_MAX_PROCESSOR];
+	int num_processor;
+
+	struct lima_ip *bcast_processor;
+	struct lima_ip *bcast_mmu;
+
+	u32 done;
+	bool error;
+	atomic_t task;
+
+	int frame_size;
+	struct kmem_cache *task_slab;
+
+	int (*task_validate)(struct lima_sched_pipe *pipe, struct lima_sched_task *task);
+	void (*task_run)(struct lima_sched_pipe *pipe, struct lima_sched_task *task);
+	void (*task_fini)(struct lima_sched_pipe *pipe);
+	void (*task_error)(struct lima_sched_pipe *pipe);
+	void (*task_mmu_error)(struct lima_sched_pipe *pipe);
+
+	struct work_struct error_work;
+};
+
+int lima_sched_task_init(struct lima_sched_task *task,
+			 struct lima_sched_context *context,
+			 struct lima_vm *vm);
+void lima_sched_task_fini(struct lima_sched_task *task);
+int lima_sched_task_add_dep(struct lima_sched_task *task, struct dma_fence *fence);
+
+int lima_sched_context_init(struct lima_sched_pipe *pipe,
+			    struct lima_sched_context *context,
+			    atomic_t *guilty);
+void lima_sched_context_fini(struct lima_sched_pipe *pipe,
+			     struct lima_sched_context *context);
+uint32_t lima_sched_context_queue_task(struct lima_sched_context *context,
+				       struct lima_sched_task *task,
+				       uint32_t *done);
+struct dma_fence *lima_sched_context_get_fence(
+	struct lima_sched_context *context, uint32_t seq);
+
+int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name);
+void lima_sched_pipe_fini(struct lima_sched_pipe *pipe);
+void lima_sched_pipe_task_done(struct lima_sched_pipe *pipe);
+
+static inline void lima_sched_pipe_mmu_error(struct lima_sched_pipe *pipe)
+{
+	pipe->error = true;
+	pipe->task_mmu_error(pipe);
+}
+
+int lima_sched_slab_init(void);
+void lima_sched_slab_fini(void);
+
+unsigned long lima_timeout_to_jiffies(u64 timeout_ns);
+
+#endif
diff --git a/drivers/gpu/drm/lima/lima_ttm.c b/drivers/gpu/drm/lima/lima_ttm.c
new file mode 100644
index 000000000000..3c1904e33ff5
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_ttm.c
@@ -0,0 +1,319 @@ 
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright 2018 Qiang Yu <yuq825@gmail.com> */
+
+#include <linux/mm.h>
+#include <drm/ttm/ttm_page_alloc.h>
+
+#include "lima_drv.h"
+#include "lima_device.h"
+#include "lima_object.h"
+
+
+struct lima_tt_mgr {
+	spinlock_t lock;
+	unsigned long available;
+};
+
+static int lima_ttm_bo_man_init(struct ttm_mem_type_manager *man,
+				unsigned long p_size)
+{
+	struct lima_tt_mgr *mgr;
+
+	mgr = kmalloc(sizeof(*mgr), GFP_KERNEL);
+	if (!mgr)
+		return -ENOMEM;
+
+	spin_lock_init(&mgr->lock);
+	mgr->available = p_size;
+	man->priv = mgr;
+	return 0;
+}
+
+static int lima_ttm_bo_man_takedown(struct ttm_mem_type_manager *man)
+{
+	struct lima_tt_mgr *mgr = man->priv;
+
+	kfree(mgr);
+	man->priv = NULL;
+	return 0;
+}
+
+static int lima_ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
+				    struct ttm_buffer_object *bo,
+				    const struct ttm_place *place,
+				    struct ttm_mem_reg *mem)
+{
+	struct lima_tt_mgr *mgr = man->priv;
+
+	/* don't exceed the mem limit */
+	spin_lock(&mgr->lock);
+	if (mgr->available < mem->num_pages) {
+		spin_unlock(&mgr->lock);
+		return 0;
+	}
+	mgr->available -= mem->num_pages;
+	spin_unlock(&mgr->lock);
+
+	/* just fake a non-null pointer to tell caller success */
+	mem->mm_node = (void *)1;
+	return 0;
+}
+
+static void lima_ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
+				     struct ttm_mem_reg *mem)
+{
+	struct lima_tt_mgr *mgr = man->priv;
+
+	spin_lock(&mgr->lock);
+	mgr->available += mem->num_pages;
+	spin_unlock(&mgr->lock);
+
+	mem->mm_node = NULL;
+}
+
+static void lima_ttm_bo_man_debug(struct ttm_mem_type_manager *man,
+				  struct drm_printer *printer)
+{
+}
+
+static const struct ttm_mem_type_manager_func lima_bo_manager_func = {
+	.init = lima_ttm_bo_man_init,
+	.takedown = lima_ttm_bo_man_takedown,
+	.get_node = lima_ttm_bo_man_get_node,
+	.put_node = lima_ttm_bo_man_put_node,
+	.debug = lima_ttm_bo_man_debug
+};
+
+static int lima_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
+			      struct ttm_mem_type_manager *man)
+{
+	struct lima_device *dev = ttm_to_lima_dev(bdev);
+
+	switch (type) {
+	case TTM_PL_SYSTEM:
+		/* System memory */
+		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+		man->available_caching = TTM_PL_MASK_CACHING;
+		man->default_caching = TTM_PL_FLAG_CACHED;
+		break;
+	case TTM_PL_TT:
+		man->func = &lima_bo_manager_func;
+		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+		man->available_caching = TTM_PL_MASK_CACHING;
+		man->default_caching = TTM_PL_FLAG_CACHED;
+		break;
+	default:
+		dev_err(dev->dev, "Unsupported memory type %u\n",
+			(unsigned int)type);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int lima_ttm_backend_bind(struct ttm_tt *ttm,
+				 struct ttm_mem_reg *bo_mem)
+{
+	return 0;
+}
+
+static int lima_ttm_backend_unbind(struct ttm_tt *ttm)
+{
+	return 0;
+}
+
+static void lima_ttm_backend_destroy(struct ttm_tt *ttm)
+{
+	struct lima_ttm_tt *tt = (void *)ttm;
+
+	ttm_dma_tt_fini(&tt->ttm);
+	kfree(tt);
+}
+
+static struct ttm_backend_func lima_ttm_backend_func = {
+	.bind = &lima_ttm_backend_bind,
+	.unbind = &lima_ttm_backend_unbind,
+	.destroy = &lima_ttm_backend_destroy,
+};
+
+static struct ttm_tt *lima_ttm_tt_create(struct ttm_buffer_object *bo,
+					 uint32_t page_flags)
+{
+	struct lima_ttm_tt *tt;
+
+	tt = kzalloc(sizeof(struct lima_ttm_tt), GFP_KERNEL);
+	if (tt == NULL)
+		return NULL;
+
+	tt->ttm.ttm.func = &lima_ttm_backend_func;
+
+	if (ttm_sg_tt_init(&tt->ttm, bo, page_flags)) {
+		kfree(tt);
+		return NULL;
+	}
+
+	return &tt->ttm.ttm;
+}
+
+static int lima_ttm_tt_populate(struct ttm_tt *ttm,
+				struct ttm_operation_ctx *ctx)
+{
+	struct lima_device *dev = ttm_to_lima_dev(ttm->bdev);
+	struct lima_ttm_tt *tt = (void *)ttm;
+	bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
+
+	if (slave) {
+		drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
+						 tt->ttm.dma_address,
+						 ttm->num_pages);
+		ttm->state = tt_unbound;
+		return 0;
+	}
+
+	return ttm_populate_and_map_pages(dev->dev, &tt->ttm, ctx);
+}
+
+static void lima_ttm_tt_unpopulate(struct ttm_tt *ttm)
+{
+	struct lima_device *dev = ttm_to_lima_dev(ttm->bdev);
+	struct lima_ttm_tt *tt = (void *)ttm;
+	bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
+
+	if (slave)
+		return;
+
+	ttm_unmap_and_unpopulate_pages(dev->dev, &tt->ttm);
+}
+
+static int lima_invalidate_caches(struct ttm_bo_device *bdev,
+				  uint32_t flags)
+{
+	struct lima_device *dev = ttm_to_lima_dev(bdev);
+
+	dev_err(dev->dev, "%s not implemented\n", __FUNCTION__);
+	return 0;
+}
+
+static void lima_evict_flags(struct ttm_buffer_object *tbo,
+			     struct ttm_placement *placement)
+{
+	struct lima_bo *bo = ttm_to_lima_bo(tbo);
+	struct lima_device *dev = to_lima_dev(bo->gem.dev);
+
+	dev_err(dev->dev, "%s not implemented\n", __FUNCTION__);
+}
+
+static int lima_verify_access(struct ttm_buffer_object *tbo,
+			      struct file *filp)
+{
+	struct lima_bo *bo = ttm_to_lima_bo(tbo);
+
+	return drm_vma_node_verify_access(&bo->gem.vma_node,
+					  filp->private_data);
+}
+
+static int lima_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
+				   struct ttm_mem_reg *mem)
+{
+	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+
+	mem->bus.addr = NULL;
+	mem->bus.offset = 0;
+	mem->bus.size = mem->num_pages << PAGE_SHIFT;
+	mem->bus.base = 0;
+	mem->bus.is_iomem = false;
+
+	if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+		return -EINVAL;
+
+	switch (mem->mem_type) {
+	case TTM_PL_SYSTEM:
+	case TTM_PL_TT:
+		return 0;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void lima_ttm_io_mem_free(struct ttm_bo_device *bdev,
+				 struct ttm_mem_reg *mem)
+{
+
+}
+
+static void lima_bo_move_notify(struct ttm_buffer_object *tbo, bool evict,
+				struct ttm_mem_reg *new_mem)
+{
+	struct lima_bo *bo = ttm_to_lima_bo(tbo);
+	struct lima_device *dev = to_lima_dev(bo->gem.dev);
+
+	if (evict)
+		dev_err(dev->dev, "%s not implemented\n", __FUNCTION__);
+}
+
+static void lima_bo_swap_notify(struct ttm_buffer_object *tbo)
+{
+	struct lima_bo *bo = ttm_to_lima_bo(tbo);
+	struct lima_device *dev = to_lima_dev(bo->gem.dev);
+
+	dev_err(dev->dev, "%s not implemented\n", __FUNCTION__);
+}
+
+static struct ttm_bo_driver lima_bo_driver = {
+	.ttm_tt_create = lima_ttm_tt_create,
+	.ttm_tt_populate = lima_ttm_tt_populate,
+	.ttm_tt_unpopulate = lima_ttm_tt_unpopulate,
+	.invalidate_caches = lima_invalidate_caches,
+	.init_mem_type = lima_init_mem_type,
+	.eviction_valuable = ttm_bo_eviction_valuable,
+	.evict_flags = lima_evict_flags,
+	.verify_access = lima_verify_access,
+	.io_mem_reserve = lima_ttm_io_mem_reserve,
+	.io_mem_free = lima_ttm_io_mem_free,
+	.move_notify = lima_bo_move_notify,
+	.swap_notify = lima_bo_swap_notify,
+};
+
+int lima_ttm_init(struct lima_device *dev)
+{
+	int err;
+	u64 gtt_size;
+
+	err = ttm_bo_device_init(&dev->mman.bdev,
+				 &lima_bo_driver,
+				 dev->ddev->anon_inode->i_mapping,
+				 DRM_FILE_PAGE_OFFSET,
+				 true);
+	if (err) {
+		dev_err(dev->dev, "failed initializing buffer object "
+			"driver(%d).\n", err);
+		return err;
+	}
+
+	if (lima_max_mem < 0) {
+		struct sysinfo si;
+		si_meminfo(&si);
+		/* TODO: better to have lower 32 mem size */
+		gtt_size = min(((u64)si.totalram * si.mem_unit * 3/4),
+			       0x100000000ULL);
+	}
+	else
+		gtt_size = (u64)lima_max_mem << 20;
+
+	err = ttm_bo_init_mm(&dev->mman.bdev, TTM_PL_TT, gtt_size >> PAGE_SHIFT);
+	if (err) {
+		dev_err(dev->dev, "Failed initializing GTT heap.\n");
+		goto err_out0;
+	}
+	return 0;
+
+err_out0:
+	ttm_bo_device_release(&dev->mman.bdev);
+	return err;
+}
+
+void lima_ttm_fini(struct lima_device *dev)
+{
+	ttm_bo_device_release(&dev->mman.bdev);
+	dev_info(dev->dev, "ttm finalized\n");
+}
diff --git a/drivers/gpu/drm/lima/lima_ttm.h b/drivers/gpu/drm/lima/lima_ttm.h
new file mode 100644
index 000000000000..c2759a43f06b
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_ttm.h
@@ -0,0 +1,24 @@ 
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2018 Qiang Yu <yuq825@gmail.com> */
+
+#ifndef __LIMA_TTM_H__
+#define __LIMA_TTM_H__
+
+#include <drm/ttm/ttm_bo_driver.h>
+
+struct lima_mman {
+	struct ttm_bo_device bdev;
+	bool mem_global_referenced;
+};
+
+struct lima_ttm_tt {
+	struct ttm_dma_tt ttm;
+};
+
+struct lima_device;
+struct lima_bo;
+
+int lima_ttm_init(struct lima_device *dev);
+void lima_ttm_fini(struct lima_device *dev);
+
+#endif
diff --git a/drivers/gpu/drm/lima/lima_vm.c b/drivers/gpu/drm/lima/lima_vm.c
new file mode 100644
index 000000000000..a264f3ae83fe
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_vm.c
@@ -0,0 +1,354 @@ 
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright 2017-2018 Qiang Yu <yuq825@gmail.com> */
+
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/interval_tree_generic.h>
+
+#include "lima_device.h"
+#include "lima_vm.h"
+#include "lima_object.h"
+#include "lima_regs.h"
+
+struct lima_bo_va_mapping {
+	struct list_head list;
+	struct rb_node rb;
+	uint32_t start;
+	uint32_t last;
+	uint32_t __subtree_last;
+};
+
+struct lima_bo_va {
+	struct list_head list;
+	unsigned ref_count;
+
+	struct list_head mapping;
+
+	struct lima_vm *vm;
+};
+
+#define LIMA_VM_PD_SHIFT 22
+#define LIMA_VM_PT_SHIFT 12
+#define LIMA_VM_PB_SHIFT (LIMA_VM_PD_SHIFT + LIMA_VM_NUM_PT_PER_BT_SHIFT)
+#define LIMA_VM_BT_SHIFT LIMA_VM_PT_SHIFT
+
+#define LIMA_VM_PT_MASK ((1 << LIMA_VM_PD_SHIFT) - 1)
+#define LIMA_VM_BT_MASK ((1 << LIMA_VM_PB_SHIFT) - 1)
+
+#define LIMA_PDE(va) (va >> LIMA_VM_PD_SHIFT)
+#define LIMA_PTE(va) ((va & LIMA_VM_PT_MASK) >> LIMA_VM_PT_SHIFT)
+#define LIMA_PBE(va) (va >> LIMA_VM_PB_SHIFT)
+#define LIMA_BTE(va) ((va & LIMA_VM_BT_MASK) >> LIMA_VM_BT_SHIFT)
+
+#define START(node) ((node)->start)
+#define LAST(node) ((node)->last)
+
+INTERVAL_TREE_DEFINE(struct lima_bo_va_mapping, rb, uint32_t, __subtree_last,
+		     START, LAST, static, lima_vm_it)
+
+#undef START
+#undef LAST
+
+static void lima_vm_unmap_page_table(struct lima_vm *vm, u32 start, u32 end)
+{
+	u32 addr;
+
+	for (addr = start; addr <= end; addr += LIMA_PAGE_SIZE) {
+		u32 pbe = LIMA_PBE(addr);
+		u32 bte = LIMA_BTE(addr);
+		u32 *bt;
+
+		bt = lima_bo_kmap(vm->bts[pbe]);
+		bt[bte] = 0;
+	}
+}
+
+static int lima_vm_map_page_table(struct lima_vm *vm, dma_addr_t *dma,
+				  u32 start, u32 end)
+{
+	u64 addr;
+	int err, i = 0;
+
+	for (addr = start; addr <= end; addr += LIMA_PAGE_SIZE) {
+		u32 pbe = LIMA_PBE(addr);
+		u32 bte = LIMA_BTE(addr);
+		u32 *bt;
+
+		if (vm->bts[pbe])
+			bt = lima_bo_kmap(vm->bts[pbe]);
+		else {
+			struct lima_bo *bt_bo;
+			dma_addr_t *pts;
+			u32 *pd;
+			int j;
+
+			bt_bo = lima_bo_create(
+				vm->dev, LIMA_PAGE_SIZE << LIMA_VM_NUM_PT_PER_BT_SHIFT,
+				0, ttm_bo_type_kernel,
+				NULL, vm->pd->tbo.resv);
+			if (IS_ERR(bt_bo)) {
+				err = PTR_ERR(bt_bo);
+				goto err_out;
+			}
+
+			bt = lima_bo_kmap(bt_bo);
+			if (IS_ERR(bt)) {
+				lima_bo_unref(bt_bo);
+				err = PTR_ERR(bt);
+				goto err_out;
+			}
+			memset(bt, 0, LIMA_PAGE_SIZE << LIMA_VM_NUM_PT_PER_BT_SHIFT);
+
+			vm->bts[pbe] = bt_bo;
+			pd = lima_bo_kmap(vm->pd);
+			pd += pbe << LIMA_VM_NUM_PT_PER_BT_SHIFT;
+			pts = lima_bo_get_pages(bt_bo);
+			for (j = 0; j < LIMA_VM_NUM_PT_PER_BT; j++)
+				*pd++ = *pts++ | LIMA_VM_FLAG_PRESENT;
+		}
+
+		bt[bte] = dma[i++] | LIMA_VM_FLAGS_CACHE;
+	}
+
+	return 0;
+
+err_out:
+	if (addr != start)
+		lima_vm_unmap_page_table(vm, start, addr - 1);
+	return err;
+}
+
+static struct lima_bo_va *
+lima_vm_bo_find(struct lima_vm *vm, struct lima_bo *bo)
+{
+	struct lima_bo_va *bo_va, *ret = NULL;
+
+	list_for_each_entry(bo_va, &bo->va, list) {
+		if (bo_va->vm == vm) {
+			ret = bo_va;
+			break;
+		}
+	}
+
+	return ret;
+}
+
+int lima_vm_bo_map(struct lima_vm *vm, struct lima_bo *bo, u32 start)
+{
+	int err;
+	struct lima_bo_va_mapping *it, *mapping;
+	u32 end = start + bo->gem.size - 1;
+	dma_addr_t *pages_dma = lima_bo_get_pages(bo);
+	struct lima_bo_va *bo_va;
+
+	it = lima_vm_it_iter_first(&vm->va, start, end);
+	if (it) {
+		dev_dbg(bo->gem.dev->dev, "lima vm map va overlap %x-%x %x-%x\n",
+			start, end, it->start, it->last);
+		return -EINVAL;
+	}
+
+	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
+	if (!mapping)
+		return -ENOMEM;
+	mapping->start = start;
+	mapping->last = end;
+
+	err = lima_vm_map_page_table(vm, pages_dma, start, end);
+	if (err) {
+		kfree(mapping);
+		return err;
+	}
+
+	lima_vm_it_insert(mapping, &vm->va);
+
+	bo_va = lima_vm_bo_find(vm, bo);
+	list_add_tail(&mapping->list, &bo_va->mapping);
+
+	return 0;
+}
+
+static void lima_vm_unmap(struct lima_vm *vm,
+			  struct lima_bo_va_mapping *mapping)
+{
+	lima_vm_it_remove(mapping, &vm->va);
+
+	lima_vm_unmap_page_table(vm, mapping->start, mapping->last);
+
+	list_del(&mapping->list);
+	kfree(mapping);
+}
+
+int lima_vm_bo_unmap(struct lima_vm *vm, struct lima_bo *bo, u32 start)
+{
+	struct lima_bo_va *bo_va;
+	struct lima_bo_va_mapping *mapping;
+
+	bo_va = lima_vm_bo_find(vm, bo);
+	list_for_each_entry(mapping, &bo_va->mapping, list) {
+		if (mapping->start == start) {
+		        lima_vm_unmap(vm, mapping);
+			break;
+		}
+	}
+
+	return 0;
+}
+
+int lima_vm_bo_add(struct lima_vm *vm, struct lima_bo *bo)
+{
+	struct lima_bo_va *bo_va;
+
+	bo_va = lima_vm_bo_find(vm, bo);
+	if (bo_va) {
+		bo_va->ref_count++;
+		return 0;
+	}
+
+	bo_va = kmalloc(sizeof(*bo_va), GFP_KERNEL);
+	if (!bo_va)
+		return -ENOMEM;
+
+	bo_va->vm = vm;
+	bo_va->ref_count = 1;
+	INIT_LIST_HEAD(&bo_va->mapping);
+	list_add_tail(&bo_va->list, &bo->va);
+	return 0;
+}
+
+/* wait only fence of resv from task using vm */
+static int lima_vm_wait_resv(struct lima_vm *vm,
+			     struct reservation_object *resv)
+{
+	unsigned nr_fences;
+	struct dma_fence **fences;
+	int i;
+	long err;
+
+	err = reservation_object_get_fences_rcu(resv, NULL, &nr_fences, &fences);
+	if (err || !nr_fences)
+		return err;
+
+	for (i = 0; i < nr_fences; i++) {
+		struct drm_sched_fence *sf = to_drm_sched_fence(fences[i]);
+		if (sf && sf->owner == vm)
+			err |= dma_fence_wait(fences[i], false);
+		dma_fence_put(fences[i]);
+	}
+
+	kfree(fences);
+	return err;
+}
+
+int lima_vm_bo_del(struct lima_vm *vm, struct lima_bo *bo)
+{
+	struct lima_bo_va *bo_va;
+	struct lima_bo_va_mapping *mapping, *tmp;
+	int err;
+
+	bo_va = lima_vm_bo_find(vm, bo);
+	if (--bo_va->ref_count > 0)
+		return 0;
+
+	/* wait bo idle before unmap it from vm in case user
+	 * space application is terminated when bo is busy.
+	 */
+	err = lima_vm_wait_resv(vm, bo->tbo.resv);
+	if (err)
+		dev_err(vm->dev->dev, "bo del fail to wait (%d)\n", err);
+
+	list_for_each_entry_safe(mapping, tmp, &bo_va->mapping, list) {
+	        lima_vm_unmap(vm, mapping);
+	}
+	list_del(&bo_va->list);
+	kfree(bo_va);
+	return 0;
+}
+
+struct lima_vm *lima_vm_create(struct lima_device *dev)
+{
+	struct lima_vm *vm;
+	void *pd;
+
+	vm = kzalloc(sizeof(*vm), GFP_KERNEL);
+	if (!vm)
+		return NULL;
+
+	vm->dev = dev;
+	vm->va = RB_ROOT_CACHED;
+	kref_init(&vm->refcount);
+
+	vm->pd = lima_bo_create(dev, LIMA_PAGE_SIZE, 0,
+				ttm_bo_type_kernel, NULL, NULL);
+	if (IS_ERR(vm->pd))
+		goto err_out0;
+
+	pd = lima_bo_kmap(vm->pd);
+	if (IS_ERR(pd))
+		goto err_out1;
+	memset(pd, 0, LIMA_PAGE_SIZE);
+
+	if (dev->dlbu_cpu) {
+		int err = lima_vm_map_page_table(
+			vm, &dev->dlbu_dma, LIMA_VA_RESERVE_DLBU,
+			LIMA_VA_RESERVE_DLBU + LIMA_PAGE_SIZE - 1);
+		if (err)
+			goto err_out1;
+	}
+
+	return vm;
+
+err_out1:
+	lima_bo_unref(vm->pd);
+err_out0:
+	kfree(vm);
+	return NULL;
+}
+
+void lima_vm_release(struct kref *kref)
+{
+	struct lima_vm *vm = container_of(kref, struct lima_vm, refcount);
+	struct lima_device *dev = vm->dev;
+	int i;
+
+	if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
+		dev_err(dev->dev, "still active bo inside vm\n");
+	}
+
+	for (i = 0; i < LIMA_VM_NUM_BT; i++) {
+		if (vm->bts[i])
+			lima_bo_unref(vm->bts[i]);
+	}
+
+	if (vm->pd)
+	        lima_bo_unref(vm->pd);
+
+	kfree(vm);
+}
+
+void lima_vm_print(struct lima_vm *vm)
+{
+	int i, j, k;
+	u32 *pd, *pt;
+
+	/* to avoid the defined by not used warning */
+	(void)&lima_vm_it_iter_next;
+
+	pd = lima_bo_kmap(vm->pd);
+	for (i = 0; i < LIMA_VM_NUM_BT; i++) {
+		if (!vm->bts[i])
+			continue;
+
+		pt = lima_bo_kmap(vm->bts[i]);
+		for (j = 0; j < LIMA_VM_NUM_PT_PER_BT; j++) {
+			int idx = (i << LIMA_VM_NUM_PT_PER_BT_SHIFT) + j;
+			printk(KERN_INFO "lima vm pd %03x:%08x\n", idx, pd[idx]);
+
+			for (k = 0; k < LIMA_PAGE_ENT_NUM; k++) {
+				u32 pte = *pt++;
+				if (pte)
+					printk(KERN_INFO "  pt %03x:%08x\n", k, pte);
+			}
+		}
+	}
+}
diff --git a/drivers/gpu/drm/lima/lima_vm.h b/drivers/gpu/drm/lima/lima_vm.h
new file mode 100644
index 000000000000..f615f8dfe71d
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_vm.h
@@ -0,0 +1,59 @@ 
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2017-2018 Qiang Yu <yuq825@gmail.com> */
+
+#ifndef __LIMA_VM_H__
+#define __LIMA_VM_H__
+
+#include <linux/rbtree.h>
+#include <linux/kref.h>
+
+#define LIMA_PAGE_SIZE    4096
+#define LIMA_PAGE_MASK    (LIMA_PAGE_SIZE - 1)
+#define LIMA_PAGE_ENT_NUM (LIMA_PAGE_SIZE / sizeof(u32))
+
+#define LIMA_VM_NUM_PT_PER_BT_SHIFT 3
+#define LIMA_VM_NUM_PT_PER_BT (1 << LIMA_VM_NUM_PT_PER_BT_SHIFT)
+#define LIMA_VM_NUM_BT (LIMA_PAGE_ENT_NUM >> LIMA_VM_NUM_PT_PER_BT_SHIFT)
+
+#define LIMA_VA_RESERVE_START  0xFFF00000
+#define LIMA_VA_RESERVE_DLBU   LIMA_VA_RESERVE_START
+#define LIMA_VA_RESERVE_END    0x100000000
+
+struct lima_bo;
+struct lima_device;
+
+struct lima_vm {
+	struct kref refcount;
+
+	/* tree of virtual addresses mapped */
+	struct rb_root_cached va;
+
+	struct lima_device *dev;
+
+	struct lima_bo *pd;
+	struct lima_bo *bts[LIMA_VM_NUM_BT];
+};
+
+int lima_vm_bo_map(struct lima_vm *vm, struct lima_bo *bo, u32 start);
+int lima_vm_bo_unmap(struct lima_vm *vm, struct lima_bo *bo, u32 start);
+
+int lima_vm_bo_add(struct lima_vm *vm, struct lima_bo *bo);
+int lima_vm_bo_del(struct lima_vm *vm, struct lima_bo *bo);
+
+struct lima_vm *lima_vm_create(struct lima_device *dev);
+void lima_vm_release(struct kref *kref);
+
+static inline struct lima_vm *lima_vm_get(struct lima_vm *vm)
+{
+	kref_get(&vm->refcount);
+	return vm;
+}
+
+static inline void lima_vm_put(struct lima_vm *vm)
+{
+	kref_put(&vm->refcount, lima_vm_release);
+}
+
+void lima_vm_print(struct lima_vm *vm);
+
+#endif
diff --git a/include/uapi/drm/lima_drm.h b/include/uapi/drm/lima_drm.h
new file mode 100644
index 000000000000..c44757b4be39
--- /dev/null
+++ b/include/uapi/drm/lima_drm.h
@@ -0,0 +1,193 @@ 
+/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR MIT */
+/* Copyright 2017-2018 Qiang Yu <yuq825@gmail.com> */
+
+#ifndef __LIMA_DRM_H__
+#define __LIMA_DRM_H__
+
+#include "drm.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#define LIMA_INFO_GPU_MALI400 0x00
+#define LIMA_INFO_GPU_MALI450 0x01
+
+struct drm_lima_info {
+	__u32 gpu_id;   /* out */
+	__u32 num_pp;   /* out */
+	__u64 va_start; /* out */
+	__u64 va_end;   /* out */
+};
+
+struct drm_lima_gem_create {
+	__u32 size;    /* in */
+	__u32 flags;   /* in */
+	__u32 handle;  /* out */
+	__u32 pad;
+};
+
+struct drm_lima_gem_info {
+	__u32 handle;  /* in */
+	__u32 pad;
+	__u64 offset;  /* out */
+};
+
+#define LIMA_VA_OP_MAP    1
+#define LIMA_VA_OP_UNMAP  2
+
+struct drm_lima_gem_va {
+	__u32 handle;  /* in */
+	__u32 op;      /* in */
+	__u32 flags;   /* in */
+	__u32 va;      /* in */
+};
+
+#define LIMA_SUBMIT_BO_READ   0x01
+#define LIMA_SUBMIT_BO_WRITE  0x02
+
+struct drm_lima_gem_submit_bo {
+	__u32 handle;  /* in */
+	__u32 flags;   /* in */
+};
+
+#define LIMA_SUBMIT_DEP_FENCE   0x00
+#define LIMA_SUBMIT_DEP_SYNC_FD 0x01
+
+struct drm_lima_gem_submit_dep_fence {
+	__u32 type;
+	__u32 ctx;
+	__u32 pipe;
+	__u32 seq;
+};
+
+struct drm_lima_gem_submit_dep_sync_fd {
+	__u32 type;
+	__u32 fd;
+};
+
+union drm_lima_gem_submit_dep {
+	__u32 type;
+	struct drm_lima_gem_submit_dep_fence fence;
+	struct drm_lima_gem_submit_dep_sync_fd sync_fd;
+};
+
+#define LIMA_GP_FRAME_REG_NUM 6
+
+struct drm_lima_gp_frame {
+	__u32 frame[LIMA_GP_FRAME_REG_NUM];
+};
+
+#define LIMA_PP_FRAME_REG_NUM 23
+#define LIMA_PP_WB_REG_NUM 12
+
+struct drm_lima_m400_pp_frame {
+	__u32 frame[LIMA_PP_FRAME_REG_NUM];
+	__u32 num_pp;
+	__u32 wb[3 * LIMA_PP_WB_REG_NUM];
+	__u32 plbu_array_address[4];
+	__u32 fragment_stack_address[4];
+};
+
+struct drm_lima_m450_pp_frame {
+	__u32 frame[LIMA_PP_FRAME_REG_NUM];
+	__u32 num_pp;
+	__u32 wb[3 * LIMA_PP_WB_REG_NUM];
+	__u32 use_dlbu;
+	__u32 _pad;
+	union {
+		__u32 plbu_array_address[8];
+		__u32 dlbu_regs[4];
+	};
+	__u32 fragment_stack_address[8];
+};
+
+#define LIMA_PIPE_GP  0x00
+#define LIMA_PIPE_PP  0x01
+
+#define LIMA_SUBMIT_FLAG_EXPLICIT_FENCE (1 << 0)
+#define LIMA_SUBMIT_FLAG_SYNC_FD_OUT    (1 << 1)
+
+struct drm_lima_gem_submit_in {
+	__u32 ctx;
+	__u32 pipe;
+	__u32 nr_bos;
+	__u32 frame_size;
+	__u64 bos;
+	__u64 frame;
+	__u64 deps;
+	__u32 nr_deps;
+	__u32 flags;
+};
+
+struct drm_lima_gem_submit_out {
+	__u32 fence;
+	__u32 done;
+	__u32 sync_fd;
+	__u32 _pad;
+};
+
+union drm_lima_gem_submit {
+	struct drm_lima_gem_submit_in in;
+	struct drm_lima_gem_submit_out out;
+};
+
+struct drm_lima_wait_fence {
+	__u32 ctx;         /* in */
+	__u32 pipe;        /* in */
+	__u64 timeout_ns;  /* in */
+	__u32 seq;         /* in */
+	__u32 error;       /* out */
+};
+
+#define LIMA_GEM_WAIT_READ   0x01
+#define LIMA_GEM_WAIT_WRITE  0x02
+
+struct drm_lima_gem_wait {
+	__u32 handle;      /* in */
+	__u32 op;          /* in */
+	__u64 timeout_ns;  /* in */
+};
+
+#define LIMA_CTX_OP_CREATE 1
+#define LIMA_CTX_OP_FREE   2
+
+struct drm_lima_ctx {
+	__u32 op;          /* in */
+	__u32 id;          /* in/out */
+};
+
+#define LIMA_GEM_MOD_OP_GET 0
+#define LIMA_GEM_MOD_OP_SET 1
+
+struct drm_lima_gem_mod {
+	__u32 handle;      /* in */
+	__u32 op;          /* in */
+	__u64 modifier;    /* in/out */
+};
+
+#define DRM_LIMA_INFO        0x00
+#define DRM_LIMA_GEM_CREATE  0x01
+#define DRM_LIMA_GEM_INFO    0x02
+#define DRM_LIMA_GEM_VA      0x03
+#define DRM_LIMA_GEM_SUBMIT  0x04
+#define DRM_LIMA_WAIT_FENCE  0x05
+#define DRM_LIMA_GEM_WAIT    0x06
+#define DRM_LIMA_CTX         0x07
+#define DRM_LIMA_GEM_MOD     0x08
+
+#define DRM_IOCTL_LIMA_INFO DRM_IOR(DRM_COMMAND_BASE + DRM_LIMA_INFO, struct drm_lima_info)
+#define DRM_IOCTL_LIMA_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_LIMA_GEM_CREATE, struct drm_lima_gem_create)
+#define DRM_IOCTL_LIMA_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_LIMA_GEM_INFO, struct drm_lima_gem_info)
+#define DRM_IOCTL_LIMA_GEM_VA DRM_IOW(DRM_COMMAND_BASE + DRM_LIMA_GEM_VA, struct drm_lima_gem_va)
+#define DRM_IOCTL_LIMA_GEM_SUBMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_LIMA_GEM_SUBMIT, union drm_lima_gem_submit)
+#define DRM_IOCTL_LIMA_WAIT_FENCE DRM_IOWR(DRM_COMMAND_BASE + DRM_LIMA_WAIT_FENCE, struct drm_lima_wait_fence)
+#define DRM_IOCTL_LIMA_GEM_WAIT DRM_IOW(DRM_COMMAND_BASE + DRM_LIMA_GEM_WAIT, struct drm_lima_gem_wait)
+#define DRM_IOCTL_LIMA_CTX DRM_IOWR(DRM_COMMAND_BASE + DRM_LIMA_CTX, struct drm_lima_ctx)
+#define DRM_IOCTL_LIMA_GEM_MOD DRM_IOWR(DRM_COMMAND_BASE + DRM_LIMA_GEM_MOD, struct drm_lima_gem_mod)
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* __LIMA_DRM_H__ */