diff mbox series

[v4,1/2] drm/sched: Use drm sched lockdep map for submit_wq

Message ID 20241002131639.3425022-2-matthew.brost@intel.com (mailing list archive)
State New, archived
Headers show
Series Use user-defined workqueue lockdep map for drm sched | expand

Commit Message

Matthew Brost Oct. 2, 2024, 1:16 p.m. UTC
Avoid leaking a lockdep map on each drm sched creation and destruction
by using a single lockdep map for all drm sched allocated submit_wq.

v2:
 - Use alloc_ordered_workqueue_lockdep_map (Tejun)

Cc: Luben Tuikov <ltuikov89@gmail.com>
Cc: Christian König <christian.koenig@amd.com>
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
---
 drivers/gpu/drm/scheduler/sched_main.c | 11 +++++++++++
 1 file changed, 11 insertions(+)

Comments

Nirmoy Das Oct. 2, 2024, 2:14 p.m. UTC | #1
On 10/2/2024 3:16 PM, Matthew Brost wrote:
> Avoid leaking a lockdep map on each drm sched creation and destruction
> by using a single lockdep map for all drm sched allocated submit_wq.
>
> v2:
>  - Use alloc_ordered_workqueue_lockdep_map (Tejun)
>
> Cc: Luben Tuikov <ltuikov89@gmail.com>
> Cc: Christian König <christian.koenig@amd.com>
> Signed-off-by: Matthew Brost <matthew.brost@intel.com>

LGTM

Reviewed-by: Nirmoy Das <nirmoy.das@intel.com>

> ---
>  drivers/gpu/drm/scheduler/sched_main.c | 11 +++++++++++
>  1 file changed, 11 insertions(+)
>
> diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
> index 36db5c7736fc..e32b0f7d7e94 100644
> --- a/drivers/gpu/drm/scheduler/sched_main.c
> +++ b/drivers/gpu/drm/scheduler/sched_main.c
> @@ -87,6 +87,12 @@
>  #define CREATE_TRACE_POINTS
>  #include "gpu_scheduler_trace.h"
>  
> +#ifdef CONFIG_LOCKDEP
> +static struct lockdep_map drm_sched_lockdep_map = {
> +	.name = "drm_sched_lockdep_map"
> +};
> +#endif
> +
>  #define to_drm_sched_job(sched_job)		\
>  		container_of((sched_job), struct drm_sched_job, queue_node)
>  
> @@ -1270,7 +1276,12 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
>  		sched->submit_wq = submit_wq;
>  		sched->own_submit_wq = false;
>  	} else {
> +#ifdef CONFIG_LOCKDEP
> +		sched->submit_wq = alloc_ordered_workqueue_lockdep_map(name, 0,
> +								       &drm_sched_lockdep_map);
> +#else
>  		sched->submit_wq = alloc_ordered_workqueue(name, 0);
> +#endif
>  		if (!sched->submit_wq)
>  			return -ENOMEM;
>
Danilo Krummrich Oct. 2, 2024, 2:16 p.m. UTC | #2
On Wed, Oct 02, 2024 at 06:16:38AM -0700, Matthew Brost wrote:
> Avoid leaking a lockdep map on each drm sched creation and destruction
> by using a single lockdep map for all drm sched allocated submit_wq.
> 
> v2:
>  - Use alloc_ordered_workqueue_lockdep_map (Tejun)
> 
> Cc: Luben Tuikov <ltuikov89@gmail.com>
> Cc: Christian König <christian.koenig@amd.com>
> Signed-off-by: Matthew Brost <matthew.brost@intel.com>

Good catch,

Acked-by: Danilo Krummrich <dakr@kernel.org>

> ---
>  drivers/gpu/drm/scheduler/sched_main.c | 11 +++++++++++
>  1 file changed, 11 insertions(+)
> 
> diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
> index 36db5c7736fc..e32b0f7d7e94 100644
> --- a/drivers/gpu/drm/scheduler/sched_main.c
> +++ b/drivers/gpu/drm/scheduler/sched_main.c
> @@ -87,6 +87,12 @@
>  #define CREATE_TRACE_POINTS
>  #include "gpu_scheduler_trace.h"
>  
> +#ifdef CONFIG_LOCKDEP
> +static struct lockdep_map drm_sched_lockdep_map = {
> +	.name = "drm_sched_lockdep_map"
> +};
> +#endif
> +
>  #define to_drm_sched_job(sched_job)		\
>  		container_of((sched_job), struct drm_sched_job, queue_node)
>  
> @@ -1270,7 +1276,12 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
>  		sched->submit_wq = submit_wq;
>  		sched->own_submit_wq = false;
>  	} else {
> +#ifdef CONFIG_LOCKDEP
> +		sched->submit_wq = alloc_ordered_workqueue_lockdep_map(name, 0,
> +								       &drm_sched_lockdep_map);
> +#else
>  		sched->submit_wq = alloc_ordered_workqueue(name, 0);
> +#endif
>  		if (!sched->submit_wq)
>  			return -ENOMEM;
>  
> -- 
> 2.34.1
>
Matthew Brost Oct. 2, 2024, 3 p.m. UTC | #3
On Wed, Oct 02, 2024 at 04:16:00PM +0200, Danilo Krummrich wrote:
> On Wed, Oct 02, 2024 at 06:16:38AM -0700, Matthew Brost wrote:
> > Avoid leaking a lockdep map on each drm sched creation and destruction
> > by using a single lockdep map for all drm sched allocated submit_wq.
> > 
> > v2:
> >  - Use alloc_ordered_workqueue_lockdep_map (Tejun)
> > 
> > Cc: Luben Tuikov <ltuikov89@gmail.com>
> > Cc: Christian König <christian.koenig@amd.com>
> > Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> 
> Good catch,
> 

Thanks. I'm likely to merge this patch through drm-xe-next rather than
drm-misc assuming it is ok with the drm-misc maintainers. I've pinged
Maarten about this.

Any objection on your end if I do this?

Matt

> Acked-by: Danilo Krummrich <dakr@kernel.org>
> 
> > ---
> >  drivers/gpu/drm/scheduler/sched_main.c | 11 +++++++++++
> >  1 file changed, 11 insertions(+)
> > 
> > diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
> > index 36db5c7736fc..e32b0f7d7e94 100644
> > --- a/drivers/gpu/drm/scheduler/sched_main.c
> > +++ b/drivers/gpu/drm/scheduler/sched_main.c
> > @@ -87,6 +87,12 @@
> >  #define CREATE_TRACE_POINTS
> >  #include "gpu_scheduler_trace.h"
> >  
> > +#ifdef CONFIG_LOCKDEP
> > +static struct lockdep_map drm_sched_lockdep_map = {
> > +	.name = "drm_sched_lockdep_map"
> > +};
> > +#endif
> > +
> >  #define to_drm_sched_job(sched_job)		\
> >  		container_of((sched_job), struct drm_sched_job, queue_node)
> >  
> > @@ -1270,7 +1276,12 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
> >  		sched->submit_wq = submit_wq;
> >  		sched->own_submit_wq = false;
> >  	} else {
> > +#ifdef CONFIG_LOCKDEP
> > +		sched->submit_wq = alloc_ordered_workqueue_lockdep_map(name, 0,
> > +								       &drm_sched_lockdep_map);
> > +#else
> >  		sched->submit_wq = alloc_ordered_workqueue(name, 0);
> > +#endif
> >  		if (!sched->submit_wq)
> >  			return -ENOMEM;
> >  
> > -- 
> > 2.34.1
> >
Danilo Krummrich Oct. 2, 2024, 3:13 p.m. UTC | #4
On Wed, Oct 02, 2024 at 03:00:08PM +0000, Matthew Brost wrote:
> On Wed, Oct 02, 2024 at 04:16:00PM +0200, Danilo Krummrich wrote:
> > On Wed, Oct 02, 2024 at 06:16:38AM -0700, Matthew Brost wrote:
> > > Avoid leaking a lockdep map on each drm sched creation and destruction
> > > by using a single lockdep map for all drm sched allocated submit_wq.
> > > 
> > > v2:
> > >  - Use alloc_ordered_workqueue_lockdep_map (Tejun)
> > > 
> > > Cc: Luben Tuikov <ltuikov89@gmail.com>
> > > Cc: Christian König <christian.koenig@amd.com>
> > > Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> > 
> > Good catch,
> > 
> 
> Thanks. I'm likely to merge this patch through drm-xe-next rather than
> drm-misc assuming it is ok with the drm-misc maintainers. I've pinged
> Maarten about this.
> 
> Any objection on your end if I do this?

No, I think that makes sense.

> 
> Matt
> 
> > Acked-by: Danilo Krummrich <dakr@kernel.org>
> > 
> > > ---
> > >  drivers/gpu/drm/scheduler/sched_main.c | 11 +++++++++++
> > >  1 file changed, 11 insertions(+)
> > > 
> > > diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
> > > index 36db5c7736fc..e32b0f7d7e94 100644
> > > --- a/drivers/gpu/drm/scheduler/sched_main.c
> > > +++ b/drivers/gpu/drm/scheduler/sched_main.c
> > > @@ -87,6 +87,12 @@
> > >  #define CREATE_TRACE_POINTS
> > >  #include "gpu_scheduler_trace.h"
> > >  
> > > +#ifdef CONFIG_LOCKDEP
> > > +static struct lockdep_map drm_sched_lockdep_map = {
> > > +	.name = "drm_sched_lockdep_map"
> > > +};
> > > +#endif
> > > +
> > >  #define to_drm_sched_job(sched_job)		\
> > >  		container_of((sched_job), struct drm_sched_job, queue_node)
> > >  
> > > @@ -1270,7 +1276,12 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
> > >  		sched->submit_wq = submit_wq;
> > >  		sched->own_submit_wq = false;
> > >  	} else {
> > > +#ifdef CONFIG_LOCKDEP
> > > +		sched->submit_wq = alloc_ordered_workqueue_lockdep_map(name, 0,
> > > +								       &drm_sched_lockdep_map);
> > > +#else
> > >  		sched->submit_wq = alloc_ordered_workqueue(name, 0);
> > > +#endif
> > >  		if (!sched->submit_wq)
> > >  			return -ENOMEM;
> > >  
> > > -- 
> > > 2.34.1
> > > 
>
Ghimiray, Himal Prasad Oct. 3, 2024, 3:29 a.m. UTC | #5
On 02-10-2024 18:46, Matthew Brost wrote:
> Avoid leaking a lockdep map on each drm sched creation and destruction
> by using a single lockdep map for all drm sched allocated submit_wq.
> 
> v2:
>   - Use alloc_ordered_workqueue_lockdep_map (Tejun)
> 
> Cc: Luben Tuikov <ltuikov89@gmail.com>
> Cc: Christian König <christian.koenig@amd.com>
> Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> ---
>   drivers/gpu/drm/scheduler/sched_main.c | 11 +++++++++++
>   1 file changed, 11 insertions(+)
> 
> diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
> index 36db5c7736fc..e32b0f7d7e94 100644
> --- a/drivers/gpu/drm/scheduler/sched_main.c
> +++ b/drivers/gpu/drm/scheduler/sched_main.c
> @@ -87,6 +87,12 @@
>   #define CREATE_TRACE_POINTS
>   #include "gpu_scheduler_trace.h"
>   
> +#ifdef CONFIG_LOCKDEP
> +static struct lockdep_map drm_sched_lockdep_map = {
> +	.name = "drm_sched_lockdep_map"
> +};
> +#endif
> +
>   #define to_drm_sched_job(sched_job)		\
>   		container_of((sched_job), struct drm_sched_job, queue_node)
>   
> @@ -1270,7 +1276,12 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
>   		sched->submit_wq = submit_wq;
>   		sched->own_submit_wq = false;
>   	} else {
> +#ifdef CONFIG_LOCKDEP
> +		sched->submit_wq = alloc_ordered_workqueue_lockdep_map(name, 0,
> +								       &drm_sched_lockdep_map);
> +#else
>   		sched->submit_wq = alloc_ordered_workqueue(name, 0);
> +#endif

LGTM.
Reviewed-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>

>   		if (!sched->submit_wq)
>   			return -ENOMEM;
>
diff mbox series

Patch

diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 36db5c7736fc..e32b0f7d7e94 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -87,6 +87,12 @@ 
 #define CREATE_TRACE_POINTS
 #include "gpu_scheduler_trace.h"
 
+#ifdef CONFIG_LOCKDEP
+static struct lockdep_map drm_sched_lockdep_map = {
+	.name = "drm_sched_lockdep_map"
+};
+#endif
+
 #define to_drm_sched_job(sched_job)		\
 		container_of((sched_job), struct drm_sched_job, queue_node)
 
@@ -1270,7 +1276,12 @@  int drm_sched_init(struct drm_gpu_scheduler *sched,
 		sched->submit_wq = submit_wq;
 		sched->own_submit_wq = false;
 	} else {
+#ifdef CONFIG_LOCKDEP
+		sched->submit_wq = alloc_ordered_workqueue_lockdep_map(name, 0,
+								       &drm_sched_lockdep_map);
+#else
 		sched->submit_wq = alloc_ordered_workqueue(name, 0);
+#endif
 		if (!sched->submit_wq)
 			return -ENOMEM;