Message ID | 20200313095640.17967-1-l.stach@pengutronix.de (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | drm/sched: add run job trace | expand |
Am 13.03.20 um 10:56 schrieb Lucas Stach: > From: Robert Beckett <bob.beckett@collabora.com> > > Add a new trace event to show when jobs are run on the HW. > > Signed-off-by: Robert Beckett <bob.beckett@collabora.com> > Signed-off-by: Lucas Stach <l.stach@pengutronix.de> There is also the scheduled fence we could used for this, but this trace point adds a few extra fields which might be useful. Acked-by: Christian König <christian.koenig@amd.com> > --- > .../gpu/drm/scheduler/gpu_scheduler_trace.h | 27 +++++++++++++++++++ > drivers/gpu/drm/scheduler/sched_main.c | 1 + > 2 files changed, 28 insertions(+) > > diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h > index d79086498aff..877ce9b127f1 100644 > --- a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h > +++ b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h > @@ -59,6 +59,33 @@ TRACE_EVENT(drm_sched_job, > __entry->job_count, __entry->hw_job_count) > ); > > +TRACE_EVENT(drm_run_job, > + TP_PROTO(struct drm_sched_job *sched_job, struct drm_sched_entity *entity), > + TP_ARGS(sched_job, entity), > + TP_STRUCT__entry( > + __field(struct drm_sched_entity *, entity) > + __field(struct dma_fence *, fence) > + __field(const char *, name) > + __field(uint64_t, id) > + __field(u32, job_count) > + __field(int, hw_job_count) > + ), > + > + TP_fast_assign( > + __entry->entity = entity; > + __entry->id = sched_job->id; > + __entry->fence = &sched_job->s_fence->finished; > + __entry->name = sched_job->sched->name; > + __entry->job_count = spsc_queue_count(&entity->job_queue); > + __entry->hw_job_count = atomic_read( > + &sched_job->sched->hw_rq_count); > + ), > + TP_printk("entity=%p, id=%llu, fence=%p, ring=%s, job count:%u, hw job count:%d", > + __entry->entity, __entry->id, > + __entry->fence, __entry->name, > + __entry->job_count, __entry->hw_job_count) > +); > + > TRACE_EVENT(drm_sched_process_job, > TP_PROTO(struct drm_sched_fence *fence), > TP_ARGS(fence), > diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c > index 71ce6215956f..34231b7163cc 100644 > --- a/drivers/gpu/drm/scheduler/sched_main.c > +++ b/drivers/gpu/drm/scheduler/sched_main.c > @@ -773,6 +773,7 @@ static int drm_sched_main(void *param) > atomic_inc(&sched->hw_rq_count); > drm_sched_job_begin(sched_job); > > + trace_drm_run_job(sched_job, entity); > fence = sched->ops->run_job(sched_job); > drm_sched_fence_scheduled(s_fence); >
On Fri, Mar 13, 2020 at 6:18 AM Christian König <christian.koenig@amd.com> wrote: > > Am 13.03.20 um 10:56 schrieb Lucas Stach: > > From: Robert Beckett <bob.beckett@collabora.com> > > > > Add a new trace event to show when jobs are run on the HW. > > > > Signed-off-by: Robert Beckett <bob.beckett@collabora.com> > > Signed-off-by: Lucas Stach <l.stach@pengutronix.de> > > There is also the scheduled fence we could used for this, but this trace > point adds a few extra fields which might be useful. > > Acked-by: Christian König <christian.koenig@amd.com> Applied. thanks! Alex > > > --- > > .../gpu/drm/scheduler/gpu_scheduler_trace.h | 27 +++++++++++++++++++ > > drivers/gpu/drm/scheduler/sched_main.c | 1 + > > 2 files changed, 28 insertions(+) > > > > diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h > > index d79086498aff..877ce9b127f1 100644 > > --- a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h > > +++ b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h > > @@ -59,6 +59,33 @@ TRACE_EVENT(drm_sched_job, > > __entry->job_count, __entry->hw_job_count) > > ); > > > > +TRACE_EVENT(drm_run_job, > > + TP_PROTO(struct drm_sched_job *sched_job, struct drm_sched_entity *entity), > > + TP_ARGS(sched_job, entity), > > + TP_STRUCT__entry( > > + __field(struct drm_sched_entity *, entity) > > + __field(struct dma_fence *, fence) > > + __field(const char *, name) > > + __field(uint64_t, id) > > + __field(u32, job_count) > > + __field(int, hw_job_count) > > + ), > > + > > + TP_fast_assign( > > + __entry->entity = entity; > > + __entry->id = sched_job->id; > > + __entry->fence = &sched_job->s_fence->finished; > > + __entry->name = sched_job->sched->name; > > + __entry->job_count = spsc_queue_count(&entity->job_queue); > > + __entry->hw_job_count = atomic_read( > > + &sched_job->sched->hw_rq_count); > > + ), > > + TP_printk("entity=%p, id=%llu, fence=%p, ring=%s, job count:%u, hw job count:%d", > > + __entry->entity, __entry->id, > > + __entry->fence, __entry->name, > > + __entry->job_count, __entry->hw_job_count) > > +); > > + > > TRACE_EVENT(drm_sched_process_job, > > TP_PROTO(struct drm_sched_fence *fence), > > TP_ARGS(fence), > > diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c > > index 71ce6215956f..34231b7163cc 100644 > > --- a/drivers/gpu/drm/scheduler/sched_main.c > > +++ b/drivers/gpu/drm/scheduler/sched_main.c > > @@ -773,6 +773,7 @@ static int drm_sched_main(void *param) > > atomic_inc(&sched->hw_rq_count); > > drm_sched_job_begin(sched_job); > > > > + trace_drm_run_job(sched_job, entity); > > fence = sched->ops->run_job(sched_job); > > drm_sched_fence_scheduled(s_fence); > > > > _______________________________________________ > dri-devel mailing list > dri-devel@lists.freedesktop.org > https://lists.freedesktop.org/mailman/listinfo/dri-devel
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h index d79086498aff..877ce9b127f1 100644 --- a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h +++ b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h @@ -59,6 +59,33 @@ TRACE_EVENT(drm_sched_job, __entry->job_count, __entry->hw_job_count) ); +TRACE_EVENT(drm_run_job, + TP_PROTO(struct drm_sched_job *sched_job, struct drm_sched_entity *entity), + TP_ARGS(sched_job, entity), + TP_STRUCT__entry( + __field(struct drm_sched_entity *, entity) + __field(struct dma_fence *, fence) + __field(const char *, name) + __field(uint64_t, id) + __field(u32, job_count) + __field(int, hw_job_count) + ), + + TP_fast_assign( + __entry->entity = entity; + __entry->id = sched_job->id; + __entry->fence = &sched_job->s_fence->finished; + __entry->name = sched_job->sched->name; + __entry->job_count = spsc_queue_count(&entity->job_queue); + __entry->hw_job_count = atomic_read( + &sched_job->sched->hw_rq_count); + ), + TP_printk("entity=%p, id=%llu, fence=%p, ring=%s, job count:%u, hw job count:%d", + __entry->entity, __entry->id, + __entry->fence, __entry->name, + __entry->job_count, __entry->hw_job_count) +); + TRACE_EVENT(drm_sched_process_job, TP_PROTO(struct drm_sched_fence *fence), TP_ARGS(fence), diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index 71ce6215956f..34231b7163cc 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -773,6 +773,7 @@ static int drm_sched_main(void *param) atomic_inc(&sched->hw_rq_count); drm_sched_job_begin(sched_job); + trace_drm_run_job(sched_job, entity); fence = sched->ops->run_job(sched_job); drm_sched_fence_scheduled(s_fence);