diff mbox

[v2,09/10] drm/imx: atomic phase 3 step 4: Use generic atomic page flip

Message ID 1464686671-20299-10-git-send-email-gnuiyl@gmail.com (mailing list archive)
State New, archived
Headers show

Commit Message

Ying Liu May 31, 2016, 9:24 a.m. UTC
To support generic atomic page flip, this patch customizes ->atomic_commit
for nonblock commits.

Signed-off-by: Liu Ying <gnuiyl@gmail.com>
---
v1->v2:
* s/async/nonblock/ on this patch to address Daniel Vetter's comment.
* Wait for pending commit on each CRTC for both block and nonblock
  atomic mode settings.  This way, a block commit will not overwrite
  the hardware setting when a nonblock page flip is about to finish,
  so that the page flip may wait for vblank successfully.

 drivers/gpu/drm/imx/imx-drm-core.c | 135 +++++++++++++++++++++++++++++++-
 drivers/gpu/drm/imx/ipuv3-crtc.c   | 156 ++-----------------------------------
 2 files changed, 142 insertions(+), 149 deletions(-)

Comments

Daniel Vetter May 31, 2016, 11:03 a.m. UTC | #1
On Tue, May 31, 2016 at 05:24:30PM +0800, Liu Ying wrote:
> To support generic atomic page flip, this patch customizes ->atomic_commit
> for nonblock commits.
> 
> Signed-off-by: Liu Ying <gnuiyl@gmail.com>
> ---
> v1->v2:
> * s/async/nonblock/ on this patch to address Daniel Vetter's comment.
> * Wait for pending commit on each CRTC for both block and nonblock
>   atomic mode settings.  This way, a block commit will not overwrite
>   the hardware setting when a nonblock page flip is about to finish,
>   so that the page flip may wait for vblank successfully.

Ok, my generic nonblocking code seems pretty stable nowadays, please
rebase on top of that. Mostly you just have to replace your
commit_complete with an atomic_commit_tail that you plug into
dev->mode_config.helper_private->atomic_commit_tail.

Note that the nonblocking helpers are _very_ picky about your handling of
crtc_state->event. If anything times out it's probably because your driver
didn't send out that event correctly. But that's just atomic event
handling, so really just a driver bug if you hit it.

For an example that closely resembles your driver look at rockchip. We're
still hunting down some of the event handling issues.
-Daniel

> 
>  drivers/gpu/drm/imx/imx-drm-core.c | 135 +++++++++++++++++++++++++++++++-
>  drivers/gpu/drm/imx/ipuv3-crtc.c   | 156 ++-----------------------------------
>  2 files changed, 142 insertions(+), 149 deletions(-)
> 
> diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
> index 2a2ab8c..d2bb743 100644
> --- a/drivers/gpu/drm/imx/imx-drm-core.c
> +++ b/drivers/gpu/drm/imx/imx-drm-core.c
> @@ -15,10 +15,14 @@
>   */
>  #include <linux/component.h>
>  #include <linux/device.h>
> +#include <linux/dma-buf.h>
>  #include <linux/fb.h>
>  #include <linux/module.h>
>  #include <linux/platform_device.h>
> +#include <linux/reservation.h>
> +#include <linux/wait.h>
>  #include <drm/drmP.h>
> +#include <drm/drm_atomic.h>
>  #include <drm/drm_atomic_helper.h>
>  #include <drm/drm_fb_helper.h>
>  #include <drm/drm_crtc_helper.h>
> @@ -48,6 +52,14 @@ struct imx_drm_device {
>  struct imx_drm_crtc {
>  	struct drm_crtc				*crtc;
>  	struct imx_drm_crtc_helper_funcs	imx_drm_helper_funcs;
> +	wait_queue_head_t			commit_wait;
> +	bool					commit_pending;
> +};
> +
> +struct imx_drm_commit {
> +	struct work_struct work;
> +	struct drm_device *dev;
> +	struct drm_atomic_state *state;
>  };
>  
>  #if IS_ENABLED(CONFIG_DRM_FBDEV_EMULATION)
> @@ -168,11 +180,130 @@ static void imx_drm_output_poll_changed(struct drm_device *drm)
>  	drm_fbdev_cma_hotplug_event(imxdrm->fbhelper);
>  }
>  
> +static void imx_drm_atomic_complete(struct imx_drm_commit *commit)
> +{
> +	struct drm_device *dev = commit->dev;
> +	struct imx_drm_device *imxdrm = dev->dev_private;
> +	struct imx_drm_crtc *imx_crtc;
> +	struct drm_atomic_state *old_state = commit->state;
> +	struct drm_crtc *crtc;
> +	struct drm_crtc_state *old_crtc_state;
> +	struct drm_plane_state *plane_state;
> +	struct drm_gem_cma_object *cma_obj;
> +	struct fence *excl;
> +	unsigned shared_count;
> +	struct fence **shared;
> +	unsigned int i, j;
> +	int ret;
> +
> +	/* Wait for fences. */
> +	for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
> +		if (crtc->state->event) {
> +			plane_state = crtc->primary->state;
> +			cma_obj = drm_fb_cma_get_gem_obj(plane_state->fb, 0);
> +			if (cma_obj->base.dma_buf) {
> +				ret = reservation_object_get_fences_rcu(
> +					cma_obj->base.dma_buf->resv, &excl,
> +					&shared_count, &shared);
> +				if (unlikely(ret))
> +					DRM_ERROR("failed to get fences "
> +						  "for buffer\n");
> +
> +				if (excl) {
> +					fence_wait(excl, false);
> +					fence_put(excl);
> +				}
> +				for (j = 0; j < shared_count; i++) {
> +					fence_wait(shared[j], false);
> +					fence_put(shared[j]);
> +				}
> +			}
> +		}
> +	}
> +
> +	/* Apply the atomic update. */
> +	drm_atomic_helper_commit_modeset_disables(dev, old_state);
> +	drm_atomic_helper_commit_modeset_enables(dev, old_state);
> +	drm_atomic_helper_commit_planes(dev, old_state, false);
> +	drm_atomic_helper_wait_for_vblanks(dev, old_state);
> +	drm_atomic_helper_cleanup_planes(dev, old_state);
> +
> +	for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
> +		imx_crtc = imxdrm->crtc[i];
> +
> +		/* Complete the commit, wake up any waiter. */
> +		spin_lock(&imx_crtc->commit_wait.lock);
> +		imx_crtc->commit_pending = false;
> +		wake_up_all_locked(&imx_crtc->commit_wait);
> +		spin_unlock(&imx_crtc->commit_wait.lock);
> +	}
> +
> +	drm_atomic_state_free(old_state);
> +
> +	kfree(commit);
> +}
> +
> +static void imx_drm_atomic_work(struct work_struct *work)
> +{
> +	struct imx_drm_commit *commit =
> +		container_of(work, struct imx_drm_commit, work);
> +
> +	imx_drm_atomic_complete(commit);
> +}
> +
> +static int imx_drm_atomic_commit(struct drm_device *dev,
> +				 struct drm_atomic_state *state, bool nonblock)
> +{
> +	struct imx_drm_device *imxdrm = dev->dev_private;
> +	struct imx_drm_crtc *imx_crtc;
> +	struct imx_drm_commit *commit;
> +	struct drm_crtc *crtc;
> +	struct drm_crtc_state *crtc_state;
> +	unsigned int i;
> +	int ret;
> +
> +	commit = kzalloc(sizeof(*commit), GFP_KERNEL);
> +	if (commit == NULL)
> +		return -ENOMEM;
> +
> +	commit->dev = dev;
> +	commit->state = state;
> +
> +	for_each_crtc_in_state(state, crtc, crtc_state, i) {
> +		imx_crtc = imxdrm->crtc[i];
> +
> +		spin_lock(&imx_crtc->commit_wait.lock);
> +		ret = wait_event_interruptible_locked(
> +				imx_crtc->commit_wait,
> +				!imx_crtc->commit_pending);
> +		if (ret == 0)
> +			imx_crtc->commit_pending = true;
> +		spin_unlock(&imx_crtc->commit_wait.lock);
> +
> +		if (ret) {
> +			kfree(commit);
> +			return ret;
> +		}
> +	}
> +
> +	if (nonblock)
> +		INIT_WORK(&commit->work, imx_drm_atomic_work);
> +
> +	drm_atomic_helper_swap_state(dev, state);
> +
> +	if (nonblock)
> +		schedule_work(&commit->work);
> +	else
> +		imx_drm_atomic_complete(commit);
> +
> +	return 0;
> +}
> +
>  static const struct drm_mode_config_funcs imx_drm_mode_config_funcs = {
>  	.fb_create = drm_fb_cma_create,
>  	.output_poll_changed = imx_drm_output_poll_changed,
>  	.atomic_check = drm_atomic_helper_check,
> -	.atomic_commit = drm_atomic_helper_commit,
> +	.atomic_commit = imx_drm_atomic_commit,
>  };
>  
>  /*
> @@ -307,6 +438,8 @@ int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
>  	imx_drm_crtc->imx_drm_helper_funcs = *imx_drm_helper_funcs;
>  	imx_drm_crtc->crtc = crtc;
>  
> +	init_waitqueue_head(&imx_drm_crtc->commit_wait);
> +
>  	crtc->port = port;
>  
>  	imxdrm->crtc[imxdrm->pipes++] = imx_drm_crtc;
> diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
> index f20340f..7ee51db 100644
> --- a/drivers/gpu/drm/imx/ipuv3-crtc.c
> +++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
> @@ -24,8 +24,6 @@
>  #include <linux/fb.h>
>  #include <linux/clk.h>
>  #include <linux/errno.h>
> -#include <linux/reservation.h>
> -#include <linux/dma-buf.h>
>  #include <drm/drm_gem_cma_helper.h>
>  #include <drm/drm_fb_cma_helper.h>
>  
> @@ -35,23 +33,6 @@
>  
>  #define DRIVER_DESC		"i.MX IPUv3 Graphics"
>  
> -enum ipu_flip_status {
> -	IPU_FLIP_NONE,
> -	IPU_FLIP_PENDING,
> -	IPU_FLIP_SUBMITTED,
> -};
> -
> -struct ipu_flip_work {
> -	struct work_struct		unref_work;
> -	struct drm_gem_object		*bo;
> -	struct drm_pending_vblank_event *page_flip_event;
> -	struct work_struct		fence_work;
> -	struct ipu_crtc			*crtc;
> -	struct fence			*excl;
> -	unsigned			shared_count;
> -	struct fence			**shared;
> -};
> -
>  struct ipu_crtc {
>  	struct device		*dev;
>  	struct drm_crtc		base;
> @@ -62,9 +43,6 @@ struct ipu_crtc {
>  
>  	struct ipu_dc		*dc;
>  	struct ipu_di		*di;
> -	enum ipu_flip_status	flip_state;
> -	struct workqueue_struct *flip_queue;
> -	struct ipu_flip_work	*flip_work;
>  	int			irq;
>  };
>  
> @@ -92,150 +70,35 @@ static void ipu_crtc_disable(struct drm_crtc *crtc)
>  	drm_crtc_vblank_off(&ipu_crtc->base);
>  }
>  
> -static void ipu_flip_unref_work_func(struct work_struct *__work)
> -{
> -	struct ipu_flip_work *work =
> -			container_of(__work, struct ipu_flip_work, unref_work);
> -
> -	drm_gem_object_unreference_unlocked(work->bo);
> -	kfree(work);
> -}
> -
> -static void ipu_flip_fence_work_func(struct work_struct *__work)
> -{
> -	struct ipu_flip_work *work =
> -			container_of(__work, struct ipu_flip_work, fence_work);
> -	int i;
> -
> -	/* wait for all fences attached to the FB obj to signal */
> -	if (work->excl) {
> -		fence_wait(work->excl, false);
> -		fence_put(work->excl);
> -	}
> -	for (i = 0; i < work->shared_count; i++) {
> -		fence_wait(work->shared[i], false);
> -		fence_put(work->shared[i]);
> -	}
> -
> -	work->crtc->flip_state = IPU_FLIP_SUBMITTED;
> -}
> -
> -static int ipu_page_flip(struct drm_crtc *crtc,
> -		struct drm_framebuffer *fb,
> -		struct drm_pending_vblank_event *event,
> -		uint32_t page_flip_flags)
> -{
> -	struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
> -	struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
> -	struct ipu_flip_work *flip_work;
> -	int ret;
> -
> -	if (ipu_crtc->flip_state != IPU_FLIP_NONE)
> -		return -EBUSY;
> -
> -	ret = imx_drm_crtc_vblank_get(ipu_crtc->imx_crtc);
> -	if (ret) {
> -		dev_dbg(ipu_crtc->dev, "failed to acquire vblank counter\n");
> -		list_del(&event->base.link);
> -
> -		return ret;
> -	}
> -
> -	flip_work = kzalloc(sizeof *flip_work, GFP_KERNEL);
> -	if (!flip_work) {
> -		ret = -ENOMEM;
> -		goto put_vblank;
> -	}
> -	INIT_WORK(&flip_work->unref_work, ipu_flip_unref_work_func);
> -	flip_work->page_flip_event = event;
> -
> -	/* get BO backing the old framebuffer and take a reference */
> -	flip_work->bo = &drm_fb_cma_get_gem_obj(crtc->primary->fb, 0)->base;
> -	drm_gem_object_reference(flip_work->bo);
> -
> -	ipu_crtc->flip_work = flip_work;
> -	/*
> -	 * If the object has a DMABUF attached, we need to wait on its fences
> -	 * if there are any.
> -	 */
> -	if (cma_obj->base.dma_buf) {
> -		INIT_WORK(&flip_work->fence_work, ipu_flip_fence_work_func);
> -		flip_work->crtc = ipu_crtc;
> -
> -		ret = reservation_object_get_fences_rcu(
> -				cma_obj->base.dma_buf->resv, &flip_work->excl,
> -				&flip_work->shared_count, &flip_work->shared);
> -
> -		if (unlikely(ret)) {
> -			DRM_ERROR("failed to get fences for buffer\n");
> -			goto free_flip_work;
> -		}
> -
> -		/* No need to queue the worker if the are no fences */
> -		if (!flip_work->excl && !flip_work->shared_count) {
> -			ipu_crtc->flip_state = IPU_FLIP_SUBMITTED;
> -		} else {
> -			ipu_crtc->flip_state = IPU_FLIP_PENDING;
> -			queue_work(ipu_crtc->flip_queue,
> -				   &flip_work->fence_work);
> -		}
> -	} else {
> -		ipu_crtc->flip_state = IPU_FLIP_SUBMITTED;
> -	}
> -
> -	if (crtc->primary->state)
> -		drm_atomic_set_fb_for_plane(crtc->primary->state, fb);
> -
> -	return 0;
> -
> -free_flip_work:
> -	drm_gem_object_unreference_unlocked(flip_work->bo);
> -	kfree(flip_work);
> -	ipu_crtc->flip_work = NULL;
> -put_vblank:
> -	imx_drm_crtc_vblank_put(ipu_crtc->imx_crtc);
> -
> -	return ret;
> -}
> -
>  static const struct drm_crtc_funcs ipu_crtc_funcs = {
>  	.set_config = drm_atomic_helper_set_config,
>  	.destroy = drm_crtc_cleanup,
> -	.page_flip = ipu_page_flip,
> +	.page_flip = drm_atomic_helper_page_flip,
>  	.reset = drm_atomic_helper_crtc_reset,
>  	.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
>  	.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
>  };
>  
> -static void ipu_crtc_handle_pageflip(struct ipu_crtc *ipu_crtc)
> +static void ipu_crtc_handle_pageflip(struct drm_crtc *crtc)
>  {
> +	struct drm_device *drm = crtc->dev;
>  	unsigned long flags;
> -	struct drm_device *drm = ipu_crtc->base.dev;
> -	struct ipu_flip_work *work = ipu_crtc->flip_work;
>  
>  	spin_lock_irqsave(&drm->event_lock, flags);
> -	if (work->page_flip_event)
> -		drm_crtc_send_vblank_event(&ipu_crtc->base,
> -					   work->page_flip_event);
> -	imx_drm_crtc_vblank_put(ipu_crtc->imx_crtc);
> +	drm_crtc_send_vblank_event(crtc, crtc->state->event);
> +	crtc->state->event = NULL;
>  	spin_unlock_irqrestore(&drm->event_lock, flags);
>  }
>  
>  static irqreturn_t ipu_irq_handler(int irq, void *dev_id)
>  {
>  	struct ipu_crtc *ipu_crtc = dev_id;
> +	struct drm_crtc *crtc = &ipu_crtc->base;
>  
>  	imx_drm_handle_vblank(ipu_crtc->imx_crtc);
>  
> -	if (ipu_crtc->flip_state == IPU_FLIP_SUBMITTED) {
> -		struct ipu_plane *plane = ipu_crtc->plane[0];
> -
> -		ipu_plane_set_base(plane, ipu_crtc->base.primary->fb);
> -		ipu_crtc_handle_pageflip(ipu_crtc);
> -		queue_work(ipu_crtc->flip_queue,
> -			   &ipu_crtc->flip_work->unref_work);
> -		ipu_crtc->flip_state = IPU_FLIP_NONE;
> -	}
> +	if (crtc->state->event)
> +		ipu_crtc_handle_pageflip(crtc);
>  
>  	return IRQ_HANDLED;
>  }
> @@ -458,8 +321,6 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc,
>  	/* Only enable IRQ when we actually need it to trigger work. */
>  	disable_irq(ipu_crtc->irq);
>  
> -	ipu_crtc->flip_queue = create_singlethread_workqueue("ipu-crtc-flip");
> -
>  	return 0;
>  
>  err_put_plane1_res:
> @@ -504,7 +365,6 @@ static void ipu_drm_unbind(struct device *dev, struct device *master,
>  
>  	imx_drm_remove_crtc(ipu_crtc->imx_crtc);
>  
> -	destroy_workqueue(ipu_crtc->flip_queue);
>  	ipu_put_resources(ipu_crtc);
>  	if (ipu_crtc->plane[1])
>  		ipu_plane_put_resources(ipu_crtc->plane[1]);
> -- 
> 2.7.4
>
Daniel Vetter June 11, 2016, 8:29 p.m. UTC | #2
On Tue, May 31, 2016 at 11:24 AM, Liu Ying <gnuiyl@gmail.com> wrote:
> To support generic atomic page flip, this patch customizes ->atomic_commit
> for nonblock commits.
>
> Signed-off-by: Liu Ying <gnuiyl@gmail.com>
> ---
> v1->v2:
> * s/async/nonblock/ on this patch to address Daniel Vetter's comment.
> * Wait for pending commit on each CRTC for both block and nonblock
>   atomic mode settings.  This way, a block commit will not overwrite
>   the hardware setting when a nonblock page flip is about to finish,
>   so that the page flip may wait for vblank successfully.

Generic nonblocking commit just landed in drm-misc, which means you
can remove all your special imx commit functions and work items here.
Please do, less boilerplate in driver code is good ;-) Note that if
you don't handle crtc_state->event correctly and in all cases this
will results in some issues and errors in dmesg. But should be easy to
fix up your driver to be compliant.

Also another thing to double check after the conversion is done is
that you don't have any uncessary boilerplate code left:
- ipu_crtc->enabled checks in enable/disable can be remove, atomic
helpers already ensure that you don't get enabled/disabled twice
- all the legacy hooks like dpms, off, prepare and commit should
removed, instead use enable/disable (both crtc and encoder)
- best_encoder can be removed (except when you dynamically select
encoders, but then you need to switch to atomic_best_encoder and can
still remove best_encoder)
- mode_fixup can be removed if empty
- all other hooks which are empty can be removed too (but I didn't spot any)

Cheers, Daniel
diff mbox

Patch

diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index 2a2ab8c..d2bb743 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -15,10 +15,14 @@ 
  */
 #include <linux/component.h>
 #include <linux/device.h>
+#include <linux/dma-buf.h>
 #include <linux/fb.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
+#include <linux/reservation.h>
+#include <linux/wait.h>
 #include <drm/drmP.h>
+#include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_crtc_helper.h>
@@ -48,6 +52,14 @@  struct imx_drm_device {
 struct imx_drm_crtc {
 	struct drm_crtc				*crtc;
 	struct imx_drm_crtc_helper_funcs	imx_drm_helper_funcs;
+	wait_queue_head_t			commit_wait;
+	bool					commit_pending;
+};
+
+struct imx_drm_commit {
+	struct work_struct work;
+	struct drm_device *dev;
+	struct drm_atomic_state *state;
 };
 
 #if IS_ENABLED(CONFIG_DRM_FBDEV_EMULATION)
@@ -168,11 +180,130 @@  static void imx_drm_output_poll_changed(struct drm_device *drm)
 	drm_fbdev_cma_hotplug_event(imxdrm->fbhelper);
 }
 
+static void imx_drm_atomic_complete(struct imx_drm_commit *commit)
+{
+	struct drm_device *dev = commit->dev;
+	struct imx_drm_device *imxdrm = dev->dev_private;
+	struct imx_drm_crtc *imx_crtc;
+	struct drm_atomic_state *old_state = commit->state;
+	struct drm_crtc *crtc;
+	struct drm_crtc_state *old_crtc_state;
+	struct drm_plane_state *plane_state;
+	struct drm_gem_cma_object *cma_obj;
+	struct fence *excl;
+	unsigned shared_count;
+	struct fence **shared;
+	unsigned int i, j;
+	int ret;
+
+	/* Wait for fences. */
+	for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+		if (crtc->state->event) {
+			plane_state = crtc->primary->state;
+			cma_obj = drm_fb_cma_get_gem_obj(plane_state->fb, 0);
+			if (cma_obj->base.dma_buf) {
+				ret = reservation_object_get_fences_rcu(
+					cma_obj->base.dma_buf->resv, &excl,
+					&shared_count, &shared);
+				if (unlikely(ret))
+					DRM_ERROR("failed to get fences "
+						  "for buffer\n");
+
+				if (excl) {
+					fence_wait(excl, false);
+					fence_put(excl);
+				}
+				for (j = 0; j < shared_count; i++) {
+					fence_wait(shared[j], false);
+					fence_put(shared[j]);
+				}
+			}
+		}
+	}
+
+	/* Apply the atomic update. */
+	drm_atomic_helper_commit_modeset_disables(dev, old_state);
+	drm_atomic_helper_commit_modeset_enables(dev, old_state);
+	drm_atomic_helper_commit_planes(dev, old_state, false);
+	drm_atomic_helper_wait_for_vblanks(dev, old_state);
+	drm_atomic_helper_cleanup_planes(dev, old_state);
+
+	for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+		imx_crtc = imxdrm->crtc[i];
+
+		/* Complete the commit, wake up any waiter. */
+		spin_lock(&imx_crtc->commit_wait.lock);
+		imx_crtc->commit_pending = false;
+		wake_up_all_locked(&imx_crtc->commit_wait);
+		spin_unlock(&imx_crtc->commit_wait.lock);
+	}
+
+	drm_atomic_state_free(old_state);
+
+	kfree(commit);
+}
+
+static void imx_drm_atomic_work(struct work_struct *work)
+{
+	struct imx_drm_commit *commit =
+		container_of(work, struct imx_drm_commit, work);
+
+	imx_drm_atomic_complete(commit);
+}
+
+static int imx_drm_atomic_commit(struct drm_device *dev,
+				 struct drm_atomic_state *state, bool nonblock)
+{
+	struct imx_drm_device *imxdrm = dev->dev_private;
+	struct imx_drm_crtc *imx_crtc;
+	struct imx_drm_commit *commit;
+	struct drm_crtc *crtc;
+	struct drm_crtc_state *crtc_state;
+	unsigned int i;
+	int ret;
+
+	commit = kzalloc(sizeof(*commit), GFP_KERNEL);
+	if (commit == NULL)
+		return -ENOMEM;
+
+	commit->dev = dev;
+	commit->state = state;
+
+	for_each_crtc_in_state(state, crtc, crtc_state, i) {
+		imx_crtc = imxdrm->crtc[i];
+
+		spin_lock(&imx_crtc->commit_wait.lock);
+		ret = wait_event_interruptible_locked(
+				imx_crtc->commit_wait,
+				!imx_crtc->commit_pending);
+		if (ret == 0)
+			imx_crtc->commit_pending = true;
+		spin_unlock(&imx_crtc->commit_wait.lock);
+
+		if (ret) {
+			kfree(commit);
+			return ret;
+		}
+	}
+
+	if (nonblock)
+		INIT_WORK(&commit->work, imx_drm_atomic_work);
+
+	drm_atomic_helper_swap_state(dev, state);
+
+	if (nonblock)
+		schedule_work(&commit->work);
+	else
+		imx_drm_atomic_complete(commit);
+
+	return 0;
+}
+
 static const struct drm_mode_config_funcs imx_drm_mode_config_funcs = {
 	.fb_create = drm_fb_cma_create,
 	.output_poll_changed = imx_drm_output_poll_changed,
 	.atomic_check = drm_atomic_helper_check,
-	.atomic_commit = drm_atomic_helper_commit,
+	.atomic_commit = imx_drm_atomic_commit,
 };
 
 /*
@@ -307,6 +438,8 @@  int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
 	imx_drm_crtc->imx_drm_helper_funcs = *imx_drm_helper_funcs;
 	imx_drm_crtc->crtc = crtc;
 
+	init_waitqueue_head(&imx_drm_crtc->commit_wait);
+
 	crtc->port = port;
 
 	imxdrm->crtc[imxdrm->pipes++] = imx_drm_crtc;
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index f20340f..7ee51db 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -24,8 +24,6 @@ 
 #include <linux/fb.h>
 #include <linux/clk.h>
 #include <linux/errno.h>
-#include <linux/reservation.h>
-#include <linux/dma-buf.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_fb_cma_helper.h>
 
@@ -35,23 +33,6 @@ 
 
 #define DRIVER_DESC		"i.MX IPUv3 Graphics"
 
-enum ipu_flip_status {
-	IPU_FLIP_NONE,
-	IPU_FLIP_PENDING,
-	IPU_FLIP_SUBMITTED,
-};
-
-struct ipu_flip_work {
-	struct work_struct		unref_work;
-	struct drm_gem_object		*bo;
-	struct drm_pending_vblank_event *page_flip_event;
-	struct work_struct		fence_work;
-	struct ipu_crtc			*crtc;
-	struct fence			*excl;
-	unsigned			shared_count;
-	struct fence			**shared;
-};
-
 struct ipu_crtc {
 	struct device		*dev;
 	struct drm_crtc		base;
@@ -62,9 +43,6 @@  struct ipu_crtc {
 
 	struct ipu_dc		*dc;
 	struct ipu_di		*di;
-	enum ipu_flip_status	flip_state;
-	struct workqueue_struct *flip_queue;
-	struct ipu_flip_work	*flip_work;
 	int			irq;
 };
 
@@ -92,150 +70,35 @@  static void ipu_crtc_disable(struct drm_crtc *crtc)
 	drm_crtc_vblank_off(&ipu_crtc->base);
 }
 
-static void ipu_flip_unref_work_func(struct work_struct *__work)
-{
-	struct ipu_flip_work *work =
-			container_of(__work, struct ipu_flip_work, unref_work);
-
-	drm_gem_object_unreference_unlocked(work->bo);
-	kfree(work);
-}
-
-static void ipu_flip_fence_work_func(struct work_struct *__work)
-{
-	struct ipu_flip_work *work =
-			container_of(__work, struct ipu_flip_work, fence_work);
-	int i;
-
-	/* wait for all fences attached to the FB obj to signal */
-	if (work->excl) {
-		fence_wait(work->excl, false);
-		fence_put(work->excl);
-	}
-	for (i = 0; i < work->shared_count; i++) {
-		fence_wait(work->shared[i], false);
-		fence_put(work->shared[i]);
-	}
-
-	work->crtc->flip_state = IPU_FLIP_SUBMITTED;
-}
-
-static int ipu_page_flip(struct drm_crtc *crtc,
-		struct drm_framebuffer *fb,
-		struct drm_pending_vblank_event *event,
-		uint32_t page_flip_flags)
-{
-	struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
-	struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
-	struct ipu_flip_work *flip_work;
-	int ret;
-
-	if (ipu_crtc->flip_state != IPU_FLIP_NONE)
-		return -EBUSY;
-
-	ret = imx_drm_crtc_vblank_get(ipu_crtc->imx_crtc);
-	if (ret) {
-		dev_dbg(ipu_crtc->dev, "failed to acquire vblank counter\n");
-		list_del(&event->base.link);
-
-		return ret;
-	}
-
-	flip_work = kzalloc(sizeof *flip_work, GFP_KERNEL);
-	if (!flip_work) {
-		ret = -ENOMEM;
-		goto put_vblank;
-	}
-	INIT_WORK(&flip_work->unref_work, ipu_flip_unref_work_func);
-	flip_work->page_flip_event = event;
-
-	/* get BO backing the old framebuffer and take a reference */
-	flip_work->bo = &drm_fb_cma_get_gem_obj(crtc->primary->fb, 0)->base;
-	drm_gem_object_reference(flip_work->bo);
-
-	ipu_crtc->flip_work = flip_work;
-	/*
-	 * If the object has a DMABUF attached, we need to wait on its fences
-	 * if there are any.
-	 */
-	if (cma_obj->base.dma_buf) {
-		INIT_WORK(&flip_work->fence_work, ipu_flip_fence_work_func);
-		flip_work->crtc = ipu_crtc;
-
-		ret = reservation_object_get_fences_rcu(
-				cma_obj->base.dma_buf->resv, &flip_work->excl,
-				&flip_work->shared_count, &flip_work->shared);
-
-		if (unlikely(ret)) {
-			DRM_ERROR("failed to get fences for buffer\n");
-			goto free_flip_work;
-		}
-
-		/* No need to queue the worker if the are no fences */
-		if (!flip_work->excl && !flip_work->shared_count) {
-			ipu_crtc->flip_state = IPU_FLIP_SUBMITTED;
-		} else {
-			ipu_crtc->flip_state = IPU_FLIP_PENDING;
-			queue_work(ipu_crtc->flip_queue,
-				   &flip_work->fence_work);
-		}
-	} else {
-		ipu_crtc->flip_state = IPU_FLIP_SUBMITTED;
-	}
-
-	if (crtc->primary->state)
-		drm_atomic_set_fb_for_plane(crtc->primary->state, fb);
-
-	return 0;
-
-free_flip_work:
-	drm_gem_object_unreference_unlocked(flip_work->bo);
-	kfree(flip_work);
-	ipu_crtc->flip_work = NULL;
-put_vblank:
-	imx_drm_crtc_vblank_put(ipu_crtc->imx_crtc);
-
-	return ret;
-}
-
 static const struct drm_crtc_funcs ipu_crtc_funcs = {
 	.set_config = drm_atomic_helper_set_config,
 	.destroy = drm_crtc_cleanup,
-	.page_flip = ipu_page_flip,
+	.page_flip = drm_atomic_helper_page_flip,
 	.reset = drm_atomic_helper_crtc_reset,
 	.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
 	.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
 };
 
-static void ipu_crtc_handle_pageflip(struct ipu_crtc *ipu_crtc)
+static void ipu_crtc_handle_pageflip(struct drm_crtc *crtc)
 {
+	struct drm_device *drm = crtc->dev;
 	unsigned long flags;
-	struct drm_device *drm = ipu_crtc->base.dev;
-	struct ipu_flip_work *work = ipu_crtc->flip_work;
 
 	spin_lock_irqsave(&drm->event_lock, flags);
-	if (work->page_flip_event)
-		drm_crtc_send_vblank_event(&ipu_crtc->base,
-					   work->page_flip_event);
-	imx_drm_crtc_vblank_put(ipu_crtc->imx_crtc);
+	drm_crtc_send_vblank_event(crtc, crtc->state->event);
+	crtc->state->event = NULL;
 	spin_unlock_irqrestore(&drm->event_lock, flags);
 }
 
 static irqreturn_t ipu_irq_handler(int irq, void *dev_id)
 {
 	struct ipu_crtc *ipu_crtc = dev_id;
+	struct drm_crtc *crtc = &ipu_crtc->base;
 
 	imx_drm_handle_vblank(ipu_crtc->imx_crtc);
 
-	if (ipu_crtc->flip_state == IPU_FLIP_SUBMITTED) {
-		struct ipu_plane *plane = ipu_crtc->plane[0];
-
-		ipu_plane_set_base(plane, ipu_crtc->base.primary->fb);
-		ipu_crtc_handle_pageflip(ipu_crtc);
-		queue_work(ipu_crtc->flip_queue,
-			   &ipu_crtc->flip_work->unref_work);
-		ipu_crtc->flip_state = IPU_FLIP_NONE;
-	}
+	if (crtc->state->event)
+		ipu_crtc_handle_pageflip(crtc);
 
 	return IRQ_HANDLED;
 }
@@ -458,8 +321,6 @@  static int ipu_crtc_init(struct ipu_crtc *ipu_crtc,
 	/* Only enable IRQ when we actually need it to trigger work. */
 	disable_irq(ipu_crtc->irq);
 
-	ipu_crtc->flip_queue = create_singlethread_workqueue("ipu-crtc-flip");
-
 	return 0;
 
 err_put_plane1_res:
@@ -504,7 +365,6 @@  static void ipu_drm_unbind(struct device *dev, struct device *master,
 
 	imx_drm_remove_crtc(ipu_crtc->imx_crtc);
 
-	destroy_workqueue(ipu_crtc->flip_queue);
 	ipu_put_resources(ipu_crtc);
 	if (ipu_crtc->plane[1])
 		ipu_plane_put_resources(ipu_crtc->plane[1]);