diff mbox series

drm/panthor: Kill the faulty_slots variable in panthor_sched_suspend()

Message ID 20240425103920.826458-1-boris.brezillon@collabora.com (mailing list archive)
State New, archived
Headers show
Series drm/panthor: Kill the faulty_slots variable in panthor_sched_suspend() | expand

Commit Message

Boris Brezillon April 25, 2024, 10:39 a.m. UTC
We can use upd_ctx.timedout_mask directly, and the faulty_slots update
in the flush_caches_failed situation is never used.

Suggested-by: Suggested-by: Steven Price <steven.price@arm.com>
Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>
---
 drivers/gpu/drm/panthor/panthor_sched.c | 10 +++-------
 1 file changed, 3 insertions(+), 7 deletions(-)

Comments

Erik Faye-Lund April 25, 2024, 11:04 a.m. UTC | #1
On Thu, 2024-04-25 at 12:39 +0200, Boris Brezillon wrote:
> We can use upd_ctx.timedout_mask directly, and the faulty_slots
> update
> in the flush_caches_failed situation is never used.
> 
> Suggested-by: Suggested-by: Steven Price <steven.price@arm.com>

Whoops? :)

> Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>
> ---
>  drivers/gpu/drm/panthor/panthor_sched.c | 10 +++-------
>  1 file changed, 3 insertions(+), 7 deletions(-)
> 
> diff --git a/drivers/gpu/drm/panthor/panthor_sched.c
> b/drivers/gpu/drm/panthor/panthor_sched.c
> index fad4678ca4c8..fed28c16d5d1 100644
> --- a/drivers/gpu/drm/panthor/panthor_sched.c
> +++ b/drivers/gpu/drm/panthor/panthor_sched.c
> @@ -2584,8 +2584,8 @@ void panthor_sched_suspend(struct
> panthor_device *ptdev)
>  {
>  	struct panthor_scheduler *sched = ptdev->scheduler;
>  	struct panthor_csg_slots_upd_ctx upd_ctx;
> -	u32 suspended_slots, faulty_slots;
>  	struct panthor_group *group;
> +	u32 suspended_slots;
>  	u32 i;
>  
>  	mutex_lock(&sched->lock);
> @@ -2605,10 +2605,9 @@ void panthor_sched_suspend(struct
> panthor_device *ptdev)
>  
>  	csgs_upd_ctx_apply_locked(ptdev, &upd_ctx);
>  	suspended_slots &= ~upd_ctx.timedout_mask;
> -	faulty_slots = upd_ctx.timedout_mask;
>  
> -	if (faulty_slots) {
> -		u32 slot_mask = faulty_slots;
> +	if (upd_ctx.timedout_mask) {
> +		u32 slot_mask = upd_ctx.timedout_mask;
>  
>  		drm_err(&ptdev->base, "CSG suspend failed,
> escalating to termination");
>  		csgs_upd_ctx_init(&upd_ctx);
> @@ -2659,9 +2658,6 @@ void panthor_sched_suspend(struct
> panthor_device *ptdev)
>  
>  			slot_mask &= ~BIT(csg_id);
>  		}
> -
> -		if (flush_caches_failed)
> -			faulty_slots |= suspended_slots;
>  	}
>  
>  	for (i = 0; i < sched->csg_slot_count; i++) {
Steven Price April 25, 2024, 11:18 a.m. UTC | #2
On 25/04/2024 11:39, Boris Brezillon wrote:
> We can use upd_ctx.timedout_mask directly, and the faulty_slots update
> in the flush_caches_failed situation is never used.
> 
> Suggested-by: Suggested-by: Steven Price <steven.price@arm.com>

I'm obviously too full of suggestions! ;)

And you're doing a much better job of my todo list than I am!

> Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>

Reviewed-by: Steven Price <steven.price@arm.com>

> ---
>  drivers/gpu/drm/panthor/panthor_sched.c | 10 +++-------
>  1 file changed, 3 insertions(+), 7 deletions(-)
> 
> diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c
> index fad4678ca4c8..fed28c16d5d1 100644
> --- a/drivers/gpu/drm/panthor/panthor_sched.c
> +++ b/drivers/gpu/drm/panthor/panthor_sched.c
> @@ -2584,8 +2584,8 @@ void panthor_sched_suspend(struct panthor_device *ptdev)
>  {
>  	struct panthor_scheduler *sched = ptdev->scheduler;
>  	struct panthor_csg_slots_upd_ctx upd_ctx;
> -	u32 suspended_slots, faulty_slots;
>  	struct panthor_group *group;
> +	u32 suspended_slots;
>  	u32 i;
>  
>  	mutex_lock(&sched->lock);
> @@ -2605,10 +2605,9 @@ void panthor_sched_suspend(struct panthor_device *ptdev)
>  
>  	csgs_upd_ctx_apply_locked(ptdev, &upd_ctx);
>  	suspended_slots &= ~upd_ctx.timedout_mask;
> -	faulty_slots = upd_ctx.timedout_mask;
>  
> -	if (faulty_slots) {
> -		u32 slot_mask = faulty_slots;
> +	if (upd_ctx.timedout_mask) {
> +		u32 slot_mask = upd_ctx.timedout_mask;
>  
>  		drm_err(&ptdev->base, "CSG suspend failed, escalating to termination");
>  		csgs_upd_ctx_init(&upd_ctx);
> @@ -2659,9 +2658,6 @@ void panthor_sched_suspend(struct panthor_device *ptdev)
>  
>  			slot_mask &= ~BIT(csg_id);
>  		}
> -
> -		if (flush_caches_failed)
> -			faulty_slots |= suspended_slots;
>  	}
>  
>  	for (i = 0; i < sched->csg_slot_count; i++) {
Liviu Dudau April 26, 2024, 11:56 a.m. UTC | #3
On Thu, Apr 25, 2024 at 12:39:20PM +0200, Boris Brezillon wrote:
> We can use upd_ctx.timedout_mask directly, and the faulty_slots update
> in the flush_caches_failed situation is never used.
> 
> Suggested-by: Suggested-by: Steven Price <steven.price@arm.com>
> Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>

Reviewed-by: Liviu Dudau <liviu.dudau@arm.com>

Best regards,
Liviu

> ---
>  drivers/gpu/drm/panthor/panthor_sched.c | 10 +++-------
>  1 file changed, 3 insertions(+), 7 deletions(-)
> 
> diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c
> index fad4678ca4c8..fed28c16d5d1 100644
> --- a/drivers/gpu/drm/panthor/panthor_sched.c
> +++ b/drivers/gpu/drm/panthor/panthor_sched.c
> @@ -2584,8 +2584,8 @@ void panthor_sched_suspend(struct panthor_device *ptdev)
>  {
>  	struct panthor_scheduler *sched = ptdev->scheduler;
>  	struct panthor_csg_slots_upd_ctx upd_ctx;
> -	u32 suspended_slots, faulty_slots;
>  	struct panthor_group *group;
> +	u32 suspended_slots;
>  	u32 i;
>  
>  	mutex_lock(&sched->lock);
> @@ -2605,10 +2605,9 @@ void panthor_sched_suspend(struct panthor_device *ptdev)
>  
>  	csgs_upd_ctx_apply_locked(ptdev, &upd_ctx);
>  	suspended_slots &= ~upd_ctx.timedout_mask;
> -	faulty_slots = upd_ctx.timedout_mask;
>  
> -	if (faulty_slots) {
> -		u32 slot_mask = faulty_slots;
> +	if (upd_ctx.timedout_mask) {
> +		u32 slot_mask = upd_ctx.timedout_mask;
>  
>  		drm_err(&ptdev->base, "CSG suspend failed, escalating to termination");
>  		csgs_upd_ctx_init(&upd_ctx);
> @@ -2659,9 +2658,6 @@ void panthor_sched_suspend(struct panthor_device *ptdev)
>  
>  			slot_mask &= ~BIT(csg_id);
>  		}
> -
> -		if (flush_caches_failed)
> -			faulty_slots |= suspended_slots;
>  	}
>  
>  	for (i = 0; i < sched->csg_slot_count; i++) {
> -- 
> 2.44.0
>
Boris Brezillon May 2, 2024, 3:59 p.m. UTC | #4
On Thu, 25 Apr 2024 12:18:29 +0100
Steven Price <steven.price@arm.com> wrote:

> On 25/04/2024 11:39, Boris Brezillon wrote:
> > We can use upd_ctx.timedout_mask directly, and the faulty_slots update
> > in the flush_caches_failed situation is never used.
> > 
> > Suggested-by: Suggested-by: Steven Price <steven.price@arm.com>  
> 
> I'm obviously too full of suggestions! ;)

Pushed to drm-misc-next-fixes, but I realize I forgot to drop the extra
Suggested-by. Oh well.

> 
> And you're doing a much better job of my todo list than I am!
> 
> > Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>  
> 
> Reviewed-by: Steven Price <steven.price@arm.com>
> 
> > ---
> >  drivers/gpu/drm/panthor/panthor_sched.c | 10 +++-------
> >  1 file changed, 3 insertions(+), 7 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c
> > index fad4678ca4c8..fed28c16d5d1 100644
> > --- a/drivers/gpu/drm/panthor/panthor_sched.c
> > +++ b/drivers/gpu/drm/panthor/panthor_sched.c
> > @@ -2584,8 +2584,8 @@ void panthor_sched_suspend(struct panthor_device *ptdev)
> >  {
> >  	struct panthor_scheduler *sched = ptdev->scheduler;
> >  	struct panthor_csg_slots_upd_ctx upd_ctx;
> > -	u32 suspended_slots, faulty_slots;
> >  	struct panthor_group *group;
> > +	u32 suspended_slots;
> >  	u32 i;
> >  
> >  	mutex_lock(&sched->lock);
> > @@ -2605,10 +2605,9 @@ void panthor_sched_suspend(struct panthor_device *ptdev)
> >  
> >  	csgs_upd_ctx_apply_locked(ptdev, &upd_ctx);
> >  	suspended_slots &= ~upd_ctx.timedout_mask;
> > -	faulty_slots = upd_ctx.timedout_mask;
> >  
> > -	if (faulty_slots) {
> > -		u32 slot_mask = faulty_slots;
> > +	if (upd_ctx.timedout_mask) {
> > +		u32 slot_mask = upd_ctx.timedout_mask;
> >  
> >  		drm_err(&ptdev->base, "CSG suspend failed, escalating to termination");
> >  		csgs_upd_ctx_init(&upd_ctx);
> > @@ -2659,9 +2658,6 @@ void panthor_sched_suspend(struct panthor_device *ptdev)
> >  
> >  			slot_mask &= ~BIT(csg_id);
> >  		}
> > -
> > -		if (flush_caches_failed)
> > -			faulty_slots |= suspended_slots;
> >  	}
> >  
> >  	for (i = 0; i < sched->csg_slot_count; i++) {  
>
diff mbox series

Patch

diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c
index fad4678ca4c8..fed28c16d5d1 100644
--- a/drivers/gpu/drm/panthor/panthor_sched.c
+++ b/drivers/gpu/drm/panthor/panthor_sched.c
@@ -2584,8 +2584,8 @@  void panthor_sched_suspend(struct panthor_device *ptdev)
 {
 	struct panthor_scheduler *sched = ptdev->scheduler;
 	struct panthor_csg_slots_upd_ctx upd_ctx;
-	u32 suspended_slots, faulty_slots;
 	struct panthor_group *group;
+	u32 suspended_slots;
 	u32 i;
 
 	mutex_lock(&sched->lock);
@@ -2605,10 +2605,9 @@  void panthor_sched_suspend(struct panthor_device *ptdev)
 
 	csgs_upd_ctx_apply_locked(ptdev, &upd_ctx);
 	suspended_slots &= ~upd_ctx.timedout_mask;
-	faulty_slots = upd_ctx.timedout_mask;
 
-	if (faulty_slots) {
-		u32 slot_mask = faulty_slots;
+	if (upd_ctx.timedout_mask) {
+		u32 slot_mask = upd_ctx.timedout_mask;
 
 		drm_err(&ptdev->base, "CSG suspend failed, escalating to termination");
 		csgs_upd_ctx_init(&upd_ctx);
@@ -2659,9 +2658,6 @@  void panthor_sched_suspend(struct panthor_device *ptdev)
 
 			slot_mask &= ~BIT(csg_id);
 		}
-
-		if (flush_caches_failed)
-			faulty_slots |= suspended_slots;
 	}
 
 	for (i = 0; i < sched->csg_slot_count; i++) {