diff mbox series

[v2] drm/msm: Fix race condition in msm driver with async layer updates

Message ID 1602753310-22105-1-git-send-email-mkrishn@codeaurora.org (mailing list archive)
State Superseded
Headers show
Series [v2] drm/msm: Fix race condition in msm driver with async layer updates | expand

Commit Message

Krishna Manikandan Oct. 15, 2020, 9:15 a.m. UTC
When there are back to back commits with async cursor update,
there is a case where second commit can program the DPU hw
blocks while first didn't complete flushing config to HW.

Synchronize the compositions such that second commit waits
until first commit flushes the composition.

This change also introduces per crtc commit lock, such that
commits on different crtcs are not blocked by each other.

Changes in v2:
	- Use an array of mutexes in kms to handle commit
	  lock per crtc. (Rob Clark)

Signed-off-by: Krishna Manikandan <mkrishn@codeaurora.org>
---
 drivers/gpu/drm/msm/msm_atomic.c | 32 +++++++++++++++++++-------------
 drivers/gpu/drm/msm/msm_kms.h    |  6 ++++--
 2 files changed, 23 insertions(+), 15 deletions(-)

Comments

Rob Clark Oct. 15, 2020, 3:15 p.m. UTC | #1
On Thu, Oct 15, 2020 at 2:15 AM Krishna Manikandan
<mkrishn@codeaurora.org> wrote:
>
> When there are back to back commits with async cursor update,
> there is a case where second commit can program the DPU hw
> blocks while first didn't complete flushing config to HW.
>
> Synchronize the compositions such that second commit waits
> until first commit flushes the composition.
>
> This change also introduces per crtc commit lock, such that
> commits on different crtcs are not blocked by each other.
>
> Changes in v2:
>         - Use an array of mutexes in kms to handle commit
>           lock per crtc. (Rob Clark)
>
> Signed-off-by: Krishna Manikandan <mkrishn@codeaurora.org>
> ---
>  drivers/gpu/drm/msm/msm_atomic.c | 32 +++++++++++++++++++-------------
>  drivers/gpu/drm/msm/msm_kms.h    |  6 ++++--
>  2 files changed, 23 insertions(+), 15 deletions(-)
>
> diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
> index 561bfa4..f9bd472 100644
> --- a/drivers/gpu/drm/msm/msm_atomic.c
> +++ b/drivers/gpu/drm/msm/msm_atomic.c
> @@ -61,10 +61,10 @@ static void msm_atomic_async_commit(struct msm_kms *kms, int crtc_idx)
>
>         trace_msm_atomic_async_commit_start(crtc_mask);
>
> -       mutex_lock(&kms->commit_lock);
> +       mutex_lock(&kms->commit_lock[crtc_idx]);
>
>         if (!(kms->pending_crtc_mask & crtc_mask)) {
> -               mutex_unlock(&kms->commit_lock);
> +               mutex_unlock(&kms->commit_lock[crtc_idx]);
>                 goto out;
>         }
>
> @@ -79,7 +79,6 @@ static void msm_atomic_async_commit(struct msm_kms *kms, int crtc_idx)
>          */
>         trace_msm_atomic_flush_commit(crtc_mask);
>         kms->funcs->flush_commit(kms, crtc_mask);
> -       mutex_unlock(&kms->commit_lock);
>
>         /*
>          * Wait for flush to complete:
> @@ -90,9 +89,8 @@ static void msm_atomic_async_commit(struct msm_kms *kms, int crtc_idx)
>
>         vblank_put(kms, crtc_mask);
>
> -       mutex_lock(&kms->commit_lock);
>         kms->funcs->complete_commit(kms, crtc_mask);
> -       mutex_unlock(&kms->commit_lock);
> +       mutex_unlock(&kms->commit_lock[crtc_idx]);
>         kms->funcs->disable_commit(kms);
>
>  out:
> @@ -171,6 +169,16 @@ static unsigned get_crtc_mask(struct drm_atomic_state *state)
>         return mask;
>  }
>
> +static int get_crtc_id(struct msm_kms *kms, unsigned int crtc_mask)
> +{
> +       struct drm_crtc *crtc;
> +
> +       for_each_crtc_mask(kms->dev, crtc, crtc_mask)
> +               return drm_crtc_index(crtc);
> +
> +       return 0;
> +}

this is closer, but a commit could still touch multiple CRTCs, I think
what you should do is add a lock/unlock helper, similar to
vblank_get/put(), ie:

static void lock_crtcs(struct msm_kms *kms, unsigned crtc_mask)
{
  struct drm_crtc *crtc;

  for_each_crtc_mask(kms->dev, crtc, crtc_mask)
    mutex_lock(&kms->commit_lock[drm_crtc_index(crtc)]);
}

and use that everywhere

(Technically we only go down the async path if there is only a single
crtc, but no reason not to use the lock/unlock helpers in both cases)

BR,
-R

> +
>  void msm_atomic_commit_tail(struct drm_atomic_state *state)
>  {
>         struct drm_device *dev = state->dev;
> @@ -180,6 +188,7 @@ void msm_atomic_commit_tail(struct drm_atomic_state *state)
>         unsigned crtc_mask = get_crtc_mask(state);
>         bool async = kms->funcs->vsync_time &&
>                         can_do_async(state, &async_crtc);
> +       int crtc_idx = get_crtc_id(kms, crtc_mask);
>
>         trace_msm_atomic_commit_tail_start(async, crtc_mask);
>
> @@ -189,12 +198,11 @@ void msm_atomic_commit_tail(struct drm_atomic_state *state)
>          * Ensure any previous (potentially async) commit has
>          * completed:
>          */
> +       mutex_lock(&kms->commit_lock[crtc_idx]);
>         trace_msm_atomic_wait_flush_start(crtc_mask);
>         kms->funcs->wait_flush(kms, crtc_mask);
>         trace_msm_atomic_wait_flush_finish(crtc_mask);
>
> -       mutex_lock(&kms->commit_lock);
> -
>         /*
>          * Now that there is no in-progress flush, prepare the
>          * current update:
> @@ -232,8 +240,7 @@ void msm_atomic_commit_tail(struct drm_atomic_state *state)
>                 }
>
>                 kms->funcs->disable_commit(kms);
> -               mutex_unlock(&kms->commit_lock);
> -
> +               mutex_unlock(&kms->commit_lock[crtc_idx]);
>                 /*
>                  * At this point, from drm core's perspective, we
>                  * are done with the atomic update, so we can just
> @@ -260,8 +267,7 @@ void msm_atomic_commit_tail(struct drm_atomic_state *state)
>          */
>         trace_msm_atomic_flush_commit(crtc_mask);
>         kms->funcs->flush_commit(kms, crtc_mask);
> -       mutex_unlock(&kms->commit_lock);
> -
> +       mutex_unlock(&kms->commit_lock[crtc_idx]);
>         /*
>          * Wait for flush to complete:
>          */
> @@ -271,9 +277,9 @@ void msm_atomic_commit_tail(struct drm_atomic_state *state)
>
>         vblank_put(kms, crtc_mask);
>
> -       mutex_lock(&kms->commit_lock);
> +       mutex_lock(&kms->commit_lock[crtc_idx]);
>         kms->funcs->complete_commit(kms, crtc_mask);
> -       mutex_unlock(&kms->commit_lock);
> +       mutex_unlock(&kms->commit_lock[crtc_idx]);
>         kms->funcs->disable_commit(kms);
>
>         drm_atomic_helper_commit_hw_done(state);
> diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
> index 1cbef6b..2049847 100644
> --- a/drivers/gpu/drm/msm/msm_kms.h
> +++ b/drivers/gpu/drm/msm/msm_kms.h
> @@ -155,7 +155,7 @@ struct msm_kms {
>          * For async commit, where ->flush_commit() and later happens
>          * from the crtc's pending_timer close to end of the frame:
>          */
> -       struct mutex commit_lock;
> +       struct mutex commit_lock[MAX_CRTCS];
>         unsigned pending_crtc_mask;
>         struct msm_pending_timer pending_timers[MAX_CRTCS];
>  };
> @@ -165,7 +165,9 @@ static inline void msm_kms_init(struct msm_kms *kms,
>  {
>         unsigned i;
>
> -       mutex_init(&kms->commit_lock);
> +       for (i = 0; i < ARRAY_SIZE(kms->commit_lock); i++)
> +               mutex_init(&kms->commit_lock[i]);
> +
>         kms->funcs = funcs;
>
>         for (i = 0; i < ARRAY_SIZE(kms->pending_timers); i++)
> --
> 2.7.4
>
diff mbox series

Patch

diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index 561bfa4..f9bd472 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -61,10 +61,10 @@  static void msm_atomic_async_commit(struct msm_kms *kms, int crtc_idx)
 
 	trace_msm_atomic_async_commit_start(crtc_mask);
 
-	mutex_lock(&kms->commit_lock);
+	mutex_lock(&kms->commit_lock[crtc_idx]);
 
 	if (!(kms->pending_crtc_mask & crtc_mask)) {
-		mutex_unlock(&kms->commit_lock);
+		mutex_unlock(&kms->commit_lock[crtc_idx]);
 		goto out;
 	}
 
@@ -79,7 +79,6 @@  static void msm_atomic_async_commit(struct msm_kms *kms, int crtc_idx)
 	 */
 	trace_msm_atomic_flush_commit(crtc_mask);
 	kms->funcs->flush_commit(kms, crtc_mask);
-	mutex_unlock(&kms->commit_lock);
 
 	/*
 	 * Wait for flush to complete:
@@ -90,9 +89,8 @@  static void msm_atomic_async_commit(struct msm_kms *kms, int crtc_idx)
 
 	vblank_put(kms, crtc_mask);
 
-	mutex_lock(&kms->commit_lock);
 	kms->funcs->complete_commit(kms, crtc_mask);
-	mutex_unlock(&kms->commit_lock);
+	mutex_unlock(&kms->commit_lock[crtc_idx]);
 	kms->funcs->disable_commit(kms);
 
 out:
@@ -171,6 +169,16 @@  static unsigned get_crtc_mask(struct drm_atomic_state *state)
 	return mask;
 }
 
+static int get_crtc_id(struct msm_kms *kms, unsigned int crtc_mask)
+{
+	struct drm_crtc *crtc;
+
+	for_each_crtc_mask(kms->dev, crtc, crtc_mask)
+		return drm_crtc_index(crtc);
+
+	return 0;
+}
+
 void msm_atomic_commit_tail(struct drm_atomic_state *state)
 {
 	struct drm_device *dev = state->dev;
@@ -180,6 +188,7 @@  void msm_atomic_commit_tail(struct drm_atomic_state *state)
 	unsigned crtc_mask = get_crtc_mask(state);
 	bool async = kms->funcs->vsync_time &&
 			can_do_async(state, &async_crtc);
+	int crtc_idx = get_crtc_id(kms, crtc_mask);
 
 	trace_msm_atomic_commit_tail_start(async, crtc_mask);
 
@@ -189,12 +198,11 @@  void msm_atomic_commit_tail(struct drm_atomic_state *state)
 	 * Ensure any previous (potentially async) commit has
 	 * completed:
 	 */
+	mutex_lock(&kms->commit_lock[crtc_idx]);
 	trace_msm_atomic_wait_flush_start(crtc_mask);
 	kms->funcs->wait_flush(kms, crtc_mask);
 	trace_msm_atomic_wait_flush_finish(crtc_mask);
 
-	mutex_lock(&kms->commit_lock);
-
 	/*
 	 * Now that there is no in-progress flush, prepare the
 	 * current update:
@@ -232,8 +240,7 @@  void msm_atomic_commit_tail(struct drm_atomic_state *state)
 		}
 
 		kms->funcs->disable_commit(kms);
-		mutex_unlock(&kms->commit_lock);
-
+		mutex_unlock(&kms->commit_lock[crtc_idx]);
 		/*
 		 * At this point, from drm core's perspective, we
 		 * are done with the atomic update, so we can just
@@ -260,8 +267,7 @@  void msm_atomic_commit_tail(struct drm_atomic_state *state)
 	 */
 	trace_msm_atomic_flush_commit(crtc_mask);
 	kms->funcs->flush_commit(kms, crtc_mask);
-	mutex_unlock(&kms->commit_lock);
-
+	mutex_unlock(&kms->commit_lock[crtc_idx]);
 	/*
 	 * Wait for flush to complete:
 	 */
@@ -271,9 +277,9 @@  void msm_atomic_commit_tail(struct drm_atomic_state *state)
 
 	vblank_put(kms, crtc_mask);
 
-	mutex_lock(&kms->commit_lock);
+	mutex_lock(&kms->commit_lock[crtc_idx]);
 	kms->funcs->complete_commit(kms, crtc_mask);
-	mutex_unlock(&kms->commit_lock);
+	mutex_unlock(&kms->commit_lock[crtc_idx]);
 	kms->funcs->disable_commit(kms);
 
 	drm_atomic_helper_commit_hw_done(state);
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index 1cbef6b..2049847 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -155,7 +155,7 @@  struct msm_kms {
 	 * For async commit, where ->flush_commit() and later happens
 	 * from the crtc's pending_timer close to end of the frame:
 	 */
-	struct mutex commit_lock;
+	struct mutex commit_lock[MAX_CRTCS];
 	unsigned pending_crtc_mask;
 	struct msm_pending_timer pending_timers[MAX_CRTCS];
 };
@@ -165,7 +165,9 @@  static inline void msm_kms_init(struct msm_kms *kms,
 {
 	unsigned i;
 
-	mutex_init(&kms->commit_lock);
+	for (i = 0; i < ARRAY_SIZE(kms->commit_lock); i++)
+		mutex_init(&kms->commit_lock[i]);
+
 	kms->funcs = funcs;
 
 	for (i = 0; i < ARRAY_SIZE(kms->pending_timers); i++)