diff mbox series

[v6,4/7] drm/sched: cleanup gpu_scheduler trace events

Message ID 20241114100113.150647-5-pierre-eric.pelloux-prayer@amd.com (mailing list archive)
State New, archived
Headers show
Series Improve gpu_scheduler trace events + uAPI | expand

Commit Message

Pierre-Eric Pelloux-Prayer Nov. 14, 2024, 10:01 a.m. UTC
A fence uniquely identify a job, so this commits updates the places
where a kernel pointer was used as an identifier by:

   "fence=(context:%llu, seqno:%lld)"

Signed-off-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com>
---
 .../gpu/drm/scheduler/gpu_scheduler_trace.h   | 39 +++++++++++--------
 1 file changed, 22 insertions(+), 17 deletions(-)

Comments

Tvrtko Ursulin Nov. 14, 2024, 11:50 a.m. UTC | #1
On 14/11/2024 10:01, Pierre-Eric Pelloux-Prayer wrote:
> A fence uniquely identify a job, so this commits updates the places
> where a kernel pointer was used as an identifier by:
> 
>     "fence=(context:%llu, seqno:%lld)"

Any special reason for %lld? Planning to use like -1 for unknown or 
something?

Btw in i915 we use fence=%llu:%llu which is more compact and IMO still 
readable so perhaps something to consider.

Regards,

Tvrtko

> Signed-off-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com>
> ---
>   .../gpu/drm/scheduler/gpu_scheduler_trace.h   | 39 +++++++++++--------
>   1 file changed, 22 insertions(+), 17 deletions(-)
> 
> diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
> index c4ec28540656..24358c4d5bbe 100644
> --- a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
> +++ b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
> @@ -36,28 +36,29 @@ DECLARE_EVENT_CLASS(drm_sched_job,
>   	    TP_PROTO(struct drm_sched_job *sched_job, struct drm_sched_entity *entity),
>   	    TP_ARGS(sched_job, entity),
>   	    TP_STRUCT__entry(
> -			     __field(struct drm_sched_entity *, entity)
> -			     __field(struct dma_fence *, fence)
>   			     __string(name, sched_job->sched->name)
>   			     __field(uint64_t, id)
>   			     __field(u32, job_count)
>   			     __field(int, hw_job_count)
>   			     __string(dev, dev_name(sched_job->sched->dev))
> +			     __field(uint64_t, fence_context)
> +			     __field(uint64_t, fence_seqno)
>   			     ),
>   
>   	    TP_fast_assign(
> -			   __entry->entity = entity;
>   			   __entry->id = sched_job->id;
> -			   __entry->fence = &sched_job->s_fence->finished;
>   			   __assign_str(name);
>   			   __entry->job_count = spsc_queue_count(&entity->job_queue);
>   			   __entry->hw_job_count = atomic_read(
>   				   &sched_job->sched->credit_count);
>   			   __assign_str(dev);
> +			   __entry->fence_context = sched_job->s_fence->finished.context;
> +			   __entry->fence_seqno = sched_job->s_fence->finished.seqno;
> +
>   			   ),
> -	    TP_printk("dev=%s, entity=%p, id=%llu, fence=%p, ring=%s, job count:%u, hw job count:%d",
> -		      __get_str(dev), __entry->entity, __entry->id,
> -		      __entry->fence, __get_str(name),
> +	    TP_printk("dev=%s, id=%llu, fence=(context:%llu, seqno:%lld), ring=%s, job count:%u, hw job count:%d",
> +		      __get_str(dev), __entry->id,
> +		      __entry->fence_context, __entry->fence_seqno, __get_str(name),
>   		      __entry->job_count, __entry->hw_job_count)
>   );
>   
> @@ -75,37 +76,41 @@ TRACE_EVENT(drm_sched_process_job,
>   	    TP_PROTO(struct drm_sched_fence *fence),
>   	    TP_ARGS(fence),
>   	    TP_STRUCT__entry(
> -		    __field(struct dma_fence *, fence)
> +		    __field(uint64_t, fence_context)
> +		    __field(uint64_t, fence_seqno)
>   		    ),
>   
>   	    TP_fast_assign(
> -		    __entry->fence = &fence->finished;
> +		    __entry->fence_context = fence->finished.context;
> +		    __entry->fence_seqno = fence->finished.seqno;
>   		    ),
> -	    TP_printk("fence=%p signaled", __entry->fence)
> +	    TP_printk("fence=(context:%llu, seqno:%lld) signaled",
> +		      __entry->fence_context, __entry->fence_seqno)
>   );
>   
>   TRACE_EVENT(drm_sched_job_wait_dep,
>   	    TP_PROTO(struct drm_sched_job *sched_job, struct dma_fence *fence),
>   	    TP_ARGS(sched_job, fence),
>   	    TP_STRUCT__entry(
> -			     __string(name, sched_job->sched->name)
> +			     __field(uint64_t, fence_context)
> +			     __field(uint64_t, fence_seqno)
>   			     __field(uint64_t, id)
>   			     __field(struct dma_fence *, fence)
>   			     __field(uint64_t, ctx)
> -			     __field(unsigned, seqno)
> +			     __field(uint64_t, seqno)
>   			     ),
>   
>   	    TP_fast_assign(
> -			   __assign_str(name);
> +			   __entry->fence_context = sched_job->s_fence->finished.context;
> +			   __entry->fence_seqno = sched_job->s_fence->finished.seqno;
>   			   __entry->id = sched_job->id;
>   			   __entry->fence = fence;
>   			   __entry->ctx = fence->context;
>   			   __entry->seqno = fence->seqno;
>   			   ),
> -	    TP_printk("job ring=%s, id=%llu, depends fence=%p, context=%llu, seq=%u",
> -		      __get_str(name), __entry->id,
> -		      __entry->fence, __entry->ctx,
> -		      __entry->seqno)
> +	    TP_printk("fence=(context:%llu, seqno:%lld), id=%llu, dependencies:{(context:%llu, seqno:%lld)}",
> +		      __entry->fence_context, __entry->fence_seqno, __entry->id,
> +		      __entry->ctx, __entry->seqno)
>   );
>   
>   #endif
Steven Rostedt Nov. 14, 2024, 2:56 p.m. UTC | #2
On Thu, 14 Nov 2024 11:01:07 +0100
Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com> wrote:

> A fence uniquely identify a job, so this commits updates the places
> where a kernel pointer was used as an identifier by:
> 
>    "fence=(context:%llu, seqno:%lld)"
> 
> Signed-off-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com>
> ---
>  .../gpu/drm/scheduler/gpu_scheduler_trace.h   | 39 +++++++++++--------
>  1 file changed, 22 insertions(+), 17 deletions(-)
> 
> diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
> index c4ec28540656..24358c4d5bbe 100644
> --- a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
> +++ b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
> @@ -36,28 +36,29 @@ DECLARE_EVENT_CLASS(drm_sched_job,
>  	    TP_PROTO(struct drm_sched_job *sched_job, struct drm_sched_entity *entity),
>  	    TP_ARGS(sched_job, entity),
>  	    TP_STRUCT__entry(
> -			     __field(struct drm_sched_entity *, entity)
> -			     __field(struct dma_fence *, fence)
>  			     __string(name, sched_job->sched->name)

The __string() is a 4 byte meta field in the trace event structure. This
means if you have a 64bit (8 byte) field following it, there will likely be
a 4 byte hole between the two fields.

I would suggest swapping the name field with the id field, then you will
have four 4 byte fields in a row (name, job_count, hw_job_count, dev),
which should fill all the holes.

-- Steve


>  			     __field(uint64_t, id)
>  			     __field(u32, job_count)
>  			     __field(int, hw_job_count)
>  			     __string(dev, dev_name(sched_job->sched->dev))
> +			     __field(uint64_t, fence_context)
> +			     __field(uint64_t, fence_seqno)
>  			     ),
>  
>  	    TP_fast_assign(
> -			   __entry->entity = entity;
>  			   __entry->id = sched_job->id;
> -			   __entry->fence = &sched_job->s_fence->finished;
>  			   __assign_str(name);
>  			   __entry->job_count = spsc_queue_count(&entity->job_queue);
>  			   __entry->hw_job_count = atomic_read(
>  				   &sched_job->sched->credit_count);
>  			   __assign_str(dev);
> +			   __entry->fence_context = sched_job->s_fence->finished.context;
> +			   __entry->fence_seqno = sched_job->s_fence->finished.seqno;
> +
>  			   ),
> -	    TP_printk("dev=%s, entity=%p, id=%llu, fence=%p, ring=%s, job count:%u, hw job count:%d",
> -		      __get_str(dev), __entry->entity, __entry->id,
> -		      __entry->fence, __get_str(name),
> +	    TP_printk("dev=%s, id=%llu, fence=(context:%llu, seqno:%lld), ring=%s, job count:%u, hw job count:%d",
> +		      __get_str(dev), __entry->id,
> +		      __entry->fence_context, __entry->fence_seqno, __get_str(name),
>  		      __entry->job_count, __entry->hw_job_count)
>  );
>
Philipp Stanner Nov. 15, 2024, 3:16 p.m. UTC | #3
On Thu, 2024-11-14 at 11:01 +0100, Pierre-Eric Pelloux-Prayer wrote:
> A fence uniquely identify a job, so this commits updates the places

s/identify/identifies

> where a kernel pointer was used as an identifier by:

But better sth like this:

"Currently, the scheduler's tracing infrastructure uses the job's
dma_fence and the drm_sched_entity the job belongs to. The dma_fence
alone, however, already uniquely identifies a job.

Use the dma_fence's context and sequence number exclusively identify a
job in debug prints like so: 

> 
>    "fence=(context:%llu, seqno:%lld)"
> 
> Signed-off-by: Pierre-Eric Pelloux-Prayer
> <pierre-eric.pelloux-prayer@amd.com>
> ---
>  .../gpu/drm/scheduler/gpu_scheduler_trace.h   | 39 +++++++++++------
> --
>  1 file changed, 22 insertions(+), 17 deletions(-)
> 
> diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
> b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
> index c4ec28540656..24358c4d5bbe 100644
> --- a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
> +++ b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
> @@ -36,28 +36,29 @@ DECLARE_EVENT_CLASS(drm_sched_job,
>  	    TP_PROTO(struct drm_sched_job *sched_job, struct
> drm_sched_entity *entity),
>  	    TP_ARGS(sched_job, entity),
>  	    TP_STRUCT__entry(
> -			     __field(struct drm_sched_entity *,
> entity)
> -			     __field(struct dma_fence *, fence)
>  			     __string(name, sched_job->sched->name)
>  			     __field(uint64_t, id)
>  			     __field(u32, job_count)
>  			     __field(int, hw_job_count)
>  			     __string(dev, dev_name(sched_job-
> >sched->dev))
> +			     __field(uint64_t, fence_context)
> +			     __field(uint64_t, fence_seqno)
>  			     ),
>  
>  	    TP_fast_assign(
> -			   __entry->entity = entity;
>  			   __entry->id = sched_job->id;
> -			   __entry->fence = &sched_job->s_fence-
> >finished;
>  			   __assign_str(name);
>  			   __entry->job_count =
> spsc_queue_count(&entity->job_queue);
>  			   __entry->hw_job_count = atomic_read(
>  				   &sched_job->sched->credit_count);
>  			   __assign_str(dev);
> +			   __entry->fence_context = sched_job-
> >s_fence->finished.context;
> +			   __entry->fence_seqno = sched_job-
> >s_fence->finished.seqno;
> +
>  			   ),
> -	    TP_printk("dev=%s, entity=%p, id=%llu, fence=%p,
> ring=%s, job count:%u, hw job count:%d",
> -		      __get_str(dev), __entry->entity, __entry->id,
> -		      __entry->fence, __get_str(name),
> +	    TP_printk("dev=%s, id=%llu, fence=(context:%llu,
> seqno:%lld), ring=%s, job count:%u, hw job count:%d",
> +		      __get_str(dev), __entry->id,
> +		      __entry->fence_context, __entry->fence_seqno,
> __get_str(name),
>  		      __entry->job_count, __entry->hw_job_count)
>  );
>  
> @@ -75,37 +76,41 @@ TRACE_EVENT(drm_sched_process_job,
>  	    TP_PROTO(struct drm_sched_fence *fence),
>  	    TP_ARGS(fence),
>  	    TP_STRUCT__entry(
> -		    __field(struct dma_fence *, fence)
> +		    __field(uint64_t, fence_context)
> +		    __field(uint64_t, fence_seqno)
>  		    ),
>  
>  	    TP_fast_assign(
> -		    __entry->fence = &fence->finished;
> +		    __entry->fence_context = fence-
> >finished.context;
> +		    __entry->fence_seqno = fence->finished.seqno;
>  		    ),
> -	    TP_printk("fence=%p signaled", __entry->fence)
> +	    TP_printk("fence=(context:%llu, seqno:%lld) signaled",
> +		      __entry->fence_context, __entry->fence_seqno)
>  );
>  
>  TRACE_EVENT(drm_sched_job_wait_dep,
>  	    TP_PROTO(struct drm_sched_job *sched_job, struct
> dma_fence *fence),
>  	    TP_ARGS(sched_job, fence),
>  	    TP_STRUCT__entry(
> -			     __string(name, sched_job->sched->name)
> +			     __field(uint64_t, fence_context)
> +			     __field(uint64_t, fence_seqno)
>  			     __field(uint64_t, id)
>  			     __field(struct dma_fence *, fence)
>  			     __field(uint64_t, ctx)
> -			     __field(unsigned, seqno)
> +			     __field(uint64_t, seqno)
>  			     ),
>  
>  	    TP_fast_assign(
> -			   __assign_str(name);
> +			   __entry->fence_context = sched_job-
> >s_fence->finished.context;
> +			   __entry->fence_seqno = sched_job-
> >s_fence->finished.seqno;
>  			   __entry->id = sched_job->id;
>  			   __entry->fence = fence;
>  			   __entry->ctx = fence->context;
>  			   __entry->seqno = fence->seqno;
>  			   ),
> -	    TP_printk("job ring=%s, id=%llu, depends fence=%p,
> context=%llu, seq=%u",
> -		      __get_str(name), __entry->id,
> -		      __entry->fence, __entry->ctx,
> -		      __entry->seqno)
> +	    TP_printk("fence=(context:%llu, seqno:%lld), id=%llu,
> dependencies:{(context:%llu, seqno:%lld)}",
> +		      __entry->fence_context, __entry->fence_seqno,
> __entry->id,
> +		      __entry->ctx, __entry->seqno)
>  );
>  
>  #endif
Pierre-Eric Pelloux-Prayer Nov. 21, 2024, 3:30 p.m. UTC | #4
Le 14/11/2024 à 12:50, Tvrtko Ursulin a écrit :
> 
> On 14/11/2024 10:01, Pierre-Eric Pelloux-Prayer wrote:
>> A fence uniquely identify a job, so this commits updates the places
>> where a kernel pointer was used as an identifier by:
>>
>>     "fence=(context:%llu, seqno:%lld)"
> 
> Any special reason for %lld? Planning to use like -1 for unknown or something?
>

Indeed, %llu is better.


> Btw in i915 we use fence=%llu:%llu which is more compact and IMO still readable so perhaps something 
> to consider.

I'll switch to "fence=%llu:%llu" unless others have concerns.

Thanks,
Pierre-Eric

> 
> Regards,
> 
> Tvrtko
> 
>> Signed-off-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com>
>> ---
>>   .../gpu/drm/scheduler/gpu_scheduler_trace.h   | 39 +++++++++++--------
>>   1 file changed, 22 insertions(+), 17 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h b/drivers/gpu/drm/scheduler/ 
>> gpu_scheduler_trace.h
>> index c4ec28540656..24358c4d5bbe 100644
>> --- a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
>> +++ b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
>> @@ -36,28 +36,29 @@ DECLARE_EVENT_CLASS(drm_sched_job,
>>           TP_PROTO(struct drm_sched_job *sched_job, struct drm_sched_entity *entity),
>>           TP_ARGS(sched_job, entity),
>>           TP_STRUCT__entry(
>> -                 __field(struct drm_sched_entity *, entity)
>> -                 __field(struct dma_fence *, fence)
>>                    __string(name, sched_job->sched->name)
>>                    __field(uint64_t, id)
>>                    __field(u32, job_count)
>>                    __field(int, hw_job_count)
>>                    __string(dev, dev_name(sched_job->sched->dev))
>> +                 __field(uint64_t, fence_context)
>> +                 __field(uint64_t, fence_seqno)
>>                    ),
>>           TP_fast_assign(
>> -               __entry->entity = entity;
>>                  __entry->id = sched_job->id;
>> -               __entry->fence = &sched_job->s_fence->finished;
>>                  __assign_str(name);
>>                  __entry->job_count = spsc_queue_count(&entity->job_queue);
>>                  __entry->hw_job_count = atomic_read(
>>                      &sched_job->sched->credit_count);
>>                  __assign_str(dev);
>> +               __entry->fence_context = sched_job->s_fence->finished.context;
>> +               __entry->fence_seqno = sched_job->s_fence->finished.seqno;
>> +
>>                  ),
>> -        TP_printk("dev=%s, entity=%p, id=%llu, fence=%p, ring=%s, job count:%u, hw job count:%d",
>> -              __get_str(dev), __entry->entity, __entry->id,
>> -              __entry->fence, __get_str(name),
>> +        TP_printk("dev=%s, id=%llu, fence=(context:%llu, seqno:%lld), ring=%s, job count:%u, hw 
>> job count:%d",
>> +              __get_str(dev), __entry->id,
>> +              __entry->fence_context, __entry->fence_seqno, __get_str(name),
>>                 __entry->job_count, __entry->hw_job_count)
>>   );
>> @@ -75,37 +76,41 @@ TRACE_EVENT(drm_sched_process_job,
>>           TP_PROTO(struct drm_sched_fence *fence),
>>           TP_ARGS(fence),
>>           TP_STRUCT__entry(
>> -            __field(struct dma_fence *, fence)
>> +            __field(uint64_t, fence_context)
>> +            __field(uint64_t, fence_seqno)
>>               ),
>>           TP_fast_assign(
>> -            __entry->fence = &fence->finished;
>> +            __entry->fence_context = fence->finished.context;
>> +            __entry->fence_seqno = fence->finished.seqno;
>>               ),
>> -        TP_printk("fence=%p signaled", __entry->fence)
>> +        TP_printk("fence=(context:%llu, seqno:%lld) signaled",
>> +              __entry->fence_context, __entry->fence_seqno)
>>   );
>>   TRACE_EVENT(drm_sched_job_wait_dep,
>>           TP_PROTO(struct drm_sched_job *sched_job, struct dma_fence *fence),
>>           TP_ARGS(sched_job, fence),
>>           TP_STRUCT__entry(
>> -                 __string(name, sched_job->sched->name)
>> +                 __field(uint64_t, fence_context)
>> +                 __field(uint64_t, fence_seqno)
>>                    __field(uint64_t, id)
>>                    __field(struct dma_fence *, fence)
>>                    __field(uint64_t, ctx)
>> -                 __field(unsigned, seqno)
>> +                 __field(uint64_t, seqno)
>>                    ),
>>           TP_fast_assign(
>> -               __assign_str(name);
>> +               __entry->fence_context = sched_job->s_fence->finished.context;
>> +               __entry->fence_seqno = sched_job->s_fence->finished.seqno;
>>                  __entry->id = sched_job->id;
>>                  __entry->fence = fence;
>>                  __entry->ctx = fence->context;
>>                  __entry->seqno = fence->seqno;
>>                  ),
>> -        TP_printk("job ring=%s, id=%llu, depends fence=%p, context=%llu, seq=%u",
>> -              __get_str(name), __entry->id,
>> -              __entry->fence, __entry->ctx,
>> -              __entry->seqno)
>> +        TP_printk("fence=(context:%llu, seqno:%lld), id=%llu, dependencies:{(context:%llu, seqno: 
>> %lld)}",
>> +              __entry->fence_context, __entry->fence_seqno, __entry->id,
>> +              __entry->ctx, __entry->seqno)
>>   );
>>   #endif
Pierre-Eric Pelloux-Prayer Nov. 21, 2024, 3:31 p.m. UTC | #5
Le 14/11/2024 à 15:56, Steven Rostedt a écrit :
> On Thu, 14 Nov 2024 11:01:07 +0100
> Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com> wrote:
> 
>> A fence uniquely identify a job, so this commits updates the places
>> where a kernel pointer was used as an identifier by:
>>
>>     "fence=(context:%llu, seqno:%lld)"
>>
>> Signed-off-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com>
>> ---
>>   .../gpu/drm/scheduler/gpu_scheduler_trace.h   | 39 +++++++++++--------
>>   1 file changed, 22 insertions(+), 17 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
>> index c4ec28540656..24358c4d5bbe 100644
>> --- a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
>> +++ b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
>> @@ -36,28 +36,29 @@ DECLARE_EVENT_CLASS(drm_sched_job,
>>   	    TP_PROTO(struct drm_sched_job *sched_job, struct drm_sched_entity *entity),
>>   	    TP_ARGS(sched_job, entity),
>>   	    TP_STRUCT__entry(
>> -			     __field(struct drm_sched_entity *, entity)
>> -			     __field(struct dma_fence *, fence)
>>   			     __string(name, sched_job->sched->name)
> 
> The __string() is a 4 byte meta field in the trace event structure. This
> means if you have a 64bit (8 byte) field following it, there will likely be
> a 4 byte hole between the two fields.
> 
> I would suggest swapping the name field with the id field, then you will
> have four 4 byte fields in a row (name, job_count, hw_job_count, dev),
> which should fill all the holes.

Good point, I'll swap them and check there's no hole.

Pierre-Eric

> 
> -- Steve
> 
> 
>>   			     __field(uint64_t, id)
>>   			     __field(u32, job_count)
>>   			     __field(int, hw_job_count)
>>   			     __string(dev, dev_name(sched_job->sched->dev))
>> +			     __field(uint64_t, fence_context)
>> +			     __field(uint64_t, fence_seqno)
>>   			     ),
>>   
>>   	    TP_fast_assign(
>> -			   __entry->entity = entity;
>>   			   __entry->id = sched_job->id;
>> -			   __entry->fence = &sched_job->s_fence->finished;
>>   			   __assign_str(name);
>>   			   __entry->job_count = spsc_queue_count(&entity->job_queue);
>>   			   __entry->hw_job_count = atomic_read(
>>   				   &sched_job->sched->credit_count);
>>   			   __assign_str(dev);
>> +			   __entry->fence_context = sched_job->s_fence->finished.context;
>> +			   __entry->fence_seqno = sched_job->s_fence->finished.seqno;
>> +
>>   			   ),
>> -	    TP_printk("dev=%s, entity=%p, id=%llu, fence=%p, ring=%s, job count:%u, hw job count:%d",
>> -		      __get_str(dev), __entry->entity, __entry->id,
>> -		      __entry->fence, __get_str(name),
>> +	    TP_printk("dev=%s, id=%llu, fence=(context:%llu, seqno:%lld), ring=%s, job count:%u, hw job count:%d",
>> +		      __get_str(dev), __entry->id,
>> +		      __entry->fence_context, __entry->fence_seqno, __get_str(name),
>>   		      __entry->job_count, __entry->hw_job_count)
>>   );
>>
diff mbox series

Patch

diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
index c4ec28540656..24358c4d5bbe 100644
--- a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
@@ -36,28 +36,29 @@  DECLARE_EVENT_CLASS(drm_sched_job,
 	    TP_PROTO(struct drm_sched_job *sched_job, struct drm_sched_entity *entity),
 	    TP_ARGS(sched_job, entity),
 	    TP_STRUCT__entry(
-			     __field(struct drm_sched_entity *, entity)
-			     __field(struct dma_fence *, fence)
 			     __string(name, sched_job->sched->name)
 			     __field(uint64_t, id)
 			     __field(u32, job_count)
 			     __field(int, hw_job_count)
 			     __string(dev, dev_name(sched_job->sched->dev))
+			     __field(uint64_t, fence_context)
+			     __field(uint64_t, fence_seqno)
 			     ),
 
 	    TP_fast_assign(
-			   __entry->entity = entity;
 			   __entry->id = sched_job->id;
-			   __entry->fence = &sched_job->s_fence->finished;
 			   __assign_str(name);
 			   __entry->job_count = spsc_queue_count(&entity->job_queue);
 			   __entry->hw_job_count = atomic_read(
 				   &sched_job->sched->credit_count);
 			   __assign_str(dev);
+			   __entry->fence_context = sched_job->s_fence->finished.context;
+			   __entry->fence_seqno = sched_job->s_fence->finished.seqno;
+
 			   ),
-	    TP_printk("dev=%s, entity=%p, id=%llu, fence=%p, ring=%s, job count:%u, hw job count:%d",
-		      __get_str(dev), __entry->entity, __entry->id,
-		      __entry->fence, __get_str(name),
+	    TP_printk("dev=%s, id=%llu, fence=(context:%llu, seqno:%lld), ring=%s, job count:%u, hw job count:%d",
+		      __get_str(dev), __entry->id,
+		      __entry->fence_context, __entry->fence_seqno, __get_str(name),
 		      __entry->job_count, __entry->hw_job_count)
 );
 
@@ -75,37 +76,41 @@  TRACE_EVENT(drm_sched_process_job,
 	    TP_PROTO(struct drm_sched_fence *fence),
 	    TP_ARGS(fence),
 	    TP_STRUCT__entry(
-		    __field(struct dma_fence *, fence)
+		    __field(uint64_t, fence_context)
+		    __field(uint64_t, fence_seqno)
 		    ),
 
 	    TP_fast_assign(
-		    __entry->fence = &fence->finished;
+		    __entry->fence_context = fence->finished.context;
+		    __entry->fence_seqno = fence->finished.seqno;
 		    ),
-	    TP_printk("fence=%p signaled", __entry->fence)
+	    TP_printk("fence=(context:%llu, seqno:%lld) signaled",
+		      __entry->fence_context, __entry->fence_seqno)
 );
 
 TRACE_EVENT(drm_sched_job_wait_dep,
 	    TP_PROTO(struct drm_sched_job *sched_job, struct dma_fence *fence),
 	    TP_ARGS(sched_job, fence),
 	    TP_STRUCT__entry(
-			     __string(name, sched_job->sched->name)
+			     __field(uint64_t, fence_context)
+			     __field(uint64_t, fence_seqno)
 			     __field(uint64_t, id)
 			     __field(struct dma_fence *, fence)
 			     __field(uint64_t, ctx)
-			     __field(unsigned, seqno)
+			     __field(uint64_t, seqno)
 			     ),
 
 	    TP_fast_assign(
-			   __assign_str(name);
+			   __entry->fence_context = sched_job->s_fence->finished.context;
+			   __entry->fence_seqno = sched_job->s_fence->finished.seqno;
 			   __entry->id = sched_job->id;
 			   __entry->fence = fence;
 			   __entry->ctx = fence->context;
 			   __entry->seqno = fence->seqno;
 			   ),
-	    TP_printk("job ring=%s, id=%llu, depends fence=%p, context=%llu, seq=%u",
-		      __get_str(name), __entry->id,
-		      __entry->fence, __entry->ctx,
-		      __entry->seqno)
+	    TP_printk("fence=(context:%llu, seqno:%lld), id=%llu, dependencies:{(context:%llu, seqno:%lld)}",
+		      __entry->fence_context, __entry->fence_seqno, __entry->id,
+		      __entry->ctx, __entry->seqno)
 );
 
 #endif