Message ID | 20230320144356.803762-24-robdclark@gmail.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | drm/msm+PM+icc: Make job_run() reclaim-safe | expand |
This is good. Hopefully it helps us catch lockdep bugs. Reviewed-by: Luben Tuikov <luben.tuikov@amd.com> Regards, Luben On 2023-03-20 10:43, Rob Clark wrote: > From: Rob Clark <robdclark@chromium.org> > > Based on > https://lore.kernel.org/dri-devel/20200604081224.863494-10-daniel.vetter@ffwll.ch/ > but made to be optional. > > Signed-off-by: Rob Clark <robdclark@chromium.org> > --- > drivers/gpu/drm/msm/msm_ringbuffer.c | 1 + > drivers/gpu/drm/scheduler/sched_main.c | 9 +++++++++ > include/drm/gpu_scheduler.h | 2 ++ > 3 files changed, 12 insertions(+) > > diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c > index b60199184409..7e42baf16cd0 100644 > --- a/drivers/gpu/drm/msm/msm_ringbuffer.c > +++ b/drivers/gpu/drm/msm/msm_ringbuffer.c > @@ -93,6 +93,7 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id, > /* currently managing hangcheck ourselves: */ > sched_timeout = MAX_SCHEDULE_TIMEOUT; > > + ring->sched.fence_signaling = true; > ret = drm_sched_init(&ring->sched, &msm_sched_ops, > num_hw_submissions, 0, sched_timeout, > NULL, NULL, to_msm_bo(ring->bo)->name, gpu->dev->dev); > diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c > index 4e6ad6e122bc..c2ee44d6224b 100644 > --- a/drivers/gpu/drm/scheduler/sched_main.c > +++ b/drivers/gpu/drm/scheduler/sched_main.c > @@ -978,10 +978,15 @@ static bool drm_sched_blocked(struct drm_gpu_scheduler *sched) > static int drm_sched_main(void *param) > { > struct drm_gpu_scheduler *sched = (struct drm_gpu_scheduler *)param; > + const bool fence_signaling = sched->fence_signaling; > + bool fence_cookie; > int r; > > sched_set_fifo_low(current); > > + if (fence_signaling) > + fence_cookie = dma_fence_begin_signalling(); > + > while (!kthread_should_stop()) { > struct drm_sched_entity *entity = NULL; > struct drm_sched_fence *s_fence; > @@ -1039,6 +1044,10 @@ static int drm_sched_main(void *param) > > wake_up(&sched->job_scheduled); > } > + > + if (fence_signaling) > + dma_fence_end_signalling(fence_cookie); > + > return 0; > } > > diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h > index 9db9e5e504ee..8f23ea522e22 100644 > --- a/include/drm/gpu_scheduler.h > +++ b/include/drm/gpu_scheduler.h > @@ -483,6 +483,7 @@ struct drm_sched_backend_ops { > * @ready: marks if the underlying HW is ready to work > * @free_guilty: A hit to time out handler to free the guilty job. > * @dev: system &struct device > + * @fence_signaling: Opt in to fence signaling annotations > * > * One scheduler is implemented for each hardware ring. > */ > @@ -507,6 +508,7 @@ struct drm_gpu_scheduler { > bool ready; > bool free_guilty; > struct device *dev; > + bool fence_signaling; > }; > > int drm_sched_init(struct drm_gpu_scheduler *sched,
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c index b60199184409..7e42baf16cd0 100644 --- a/drivers/gpu/drm/msm/msm_ringbuffer.c +++ b/drivers/gpu/drm/msm/msm_ringbuffer.c @@ -93,6 +93,7 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id, /* currently managing hangcheck ourselves: */ sched_timeout = MAX_SCHEDULE_TIMEOUT; + ring->sched.fence_signaling = true; ret = drm_sched_init(&ring->sched, &msm_sched_ops, num_hw_submissions, 0, sched_timeout, NULL, NULL, to_msm_bo(ring->bo)->name, gpu->dev->dev); diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index 4e6ad6e122bc..c2ee44d6224b 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -978,10 +978,15 @@ static bool drm_sched_blocked(struct drm_gpu_scheduler *sched) static int drm_sched_main(void *param) { struct drm_gpu_scheduler *sched = (struct drm_gpu_scheduler *)param; + const bool fence_signaling = sched->fence_signaling; + bool fence_cookie; int r; sched_set_fifo_low(current); + if (fence_signaling) + fence_cookie = dma_fence_begin_signalling(); + while (!kthread_should_stop()) { struct drm_sched_entity *entity = NULL; struct drm_sched_fence *s_fence; @@ -1039,6 +1044,10 @@ static int drm_sched_main(void *param) wake_up(&sched->job_scheduled); } + + if (fence_signaling) + dma_fence_end_signalling(fence_cookie); + return 0; } diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index 9db9e5e504ee..8f23ea522e22 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -483,6 +483,7 @@ struct drm_sched_backend_ops { * @ready: marks if the underlying HW is ready to work * @free_guilty: A hit to time out handler to free the guilty job. * @dev: system &struct device + * @fence_signaling: Opt in to fence signaling annotations * * One scheduler is implemented for each hardware ring. */ @@ -507,6 +508,7 @@ struct drm_gpu_scheduler { bool ready; bool free_guilty; struct device *dev; + bool fence_signaling; }; int drm_sched_init(struct drm_gpu_scheduler *sched,