diff mbox series

[2/2] rcu: Allocate WQ with WQ_MEM_RECLAIM bit set

Message ID 20240305195720.42687-2-urezki@gmail.com (mailing list archive)
State New
Headers show
Series [1/2] rcu: Do not release a wait-head from a GP kthread | expand

Commit Message

Uladzislau Rezki March 5, 2024, 7:57 p.m. UTC
synchronize_rcu() users have to be processed regardless
of memory pressure so our private WQ needs to have at least
one execution context what WQ_MEM_RECLAIM flag guarantees.

Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
---
 kernel/rcu/tree.c | 6 +++++-
 1 file changed, 5 insertions(+), 1 deletion(-)

Comments

Zqiang March 6, 2024, 2:15 a.m. UTC | #1
>
> synchronize_rcu() users have to be processed regardless
> of memory pressure so our private WQ needs to have at least
> one execution context what WQ_MEM_RECLAIM flag guarantees.
>
> Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
> ---
>  kernel/rcu/tree.c | 6 +++++-
>  1 file changed, 5 insertions(+), 1 deletion(-)
>
> diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
> index 475647620b12..59881a68dd26 100644
> --- a/kernel/rcu/tree.c
> +++ b/kernel/rcu/tree.c
> @@ -1581,6 +1581,7 @@ static void rcu_sr_put_wait_head(struct llist_node *node)
>  /* Disabled by default. */
>  static int rcu_normal_wake_from_gp;
>  module_param(rcu_normal_wake_from_gp, int, 0644);
> +static struct workqueue_struct *sync_wq;
>
>  static void rcu_sr_normal_complete(struct llist_node *node)
>  {
> @@ -1679,7 +1680,7 @@ static void rcu_sr_normal_gp_cleanup(void)
>          * of outstanding users(if still left) and releasing wait-heads
>          * added by rcu_sr_normal_gp_init() call.
>          */
> -       queue_work(system_highpri_wq, &rcu_state.srs_cleanup_work);
> +       queue_work(sync_wq, &rcu_state.srs_cleanup_work);
>  }
>
>  /*
> @@ -5584,6 +5585,9 @@ void __init rcu_init(void)
>         rcu_gp_wq = alloc_workqueue("rcu_gp", WQ_MEM_RECLAIM, 0);
>         WARN_ON(!rcu_gp_wq);
>
> +       sync_wq = alloc_workqueue("sync_wq", WQ_MEM_RECLAIM, 0);

Why was WQ_HIGHPRI removed?

Thanks
Zqiang


> +       WARN_ON(!sync_wq);
> +
>         /* Fill in default value for rcutree.qovld boot parameter. */
>         /* -After- the rcu_node ->lock fields are initialized! */
>         if (qovld < 0)
> --
> 2.39.2
>
>
Uladzislau Rezki March 6, 2024, 11:56 a.m. UTC | #2
On Wed, Mar 06, 2024 at 10:15:44AM +0800, Z qiang wrote:
> >
> > synchronize_rcu() users have to be processed regardless
> > of memory pressure so our private WQ needs to have at least
> > one execution context what WQ_MEM_RECLAIM flag guarantees.
> >
> > Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
> > ---
> >  kernel/rcu/tree.c | 6 +++++-
> >  1 file changed, 5 insertions(+), 1 deletion(-)
> >
> > diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
> > index 475647620b12..59881a68dd26 100644
> > --- a/kernel/rcu/tree.c
> > +++ b/kernel/rcu/tree.c
> > @@ -1581,6 +1581,7 @@ static void rcu_sr_put_wait_head(struct llist_node *node)
> >  /* Disabled by default. */
> >  static int rcu_normal_wake_from_gp;
> >  module_param(rcu_normal_wake_from_gp, int, 0644);
> > +static struct workqueue_struct *sync_wq;
> >
> >  static void rcu_sr_normal_complete(struct llist_node *node)
> >  {
> > @@ -1679,7 +1680,7 @@ static void rcu_sr_normal_gp_cleanup(void)
> >          * of outstanding users(if still left) and releasing wait-heads
> >          * added by rcu_sr_normal_gp_init() call.
> >          */
> > -       queue_work(system_highpri_wq, &rcu_state.srs_cleanup_work);
> > +       queue_work(sync_wq, &rcu_state.srs_cleanup_work);
> >  }
> >
> >  /*
> > @@ -5584,6 +5585,9 @@ void __init rcu_init(void)
> >         rcu_gp_wq = alloc_workqueue("rcu_gp", WQ_MEM_RECLAIM, 0);
> >         WARN_ON(!rcu_gp_wq);
> >
> > +       sync_wq = alloc_workqueue("sync_wq", WQ_MEM_RECLAIM, 0);
> 
> Why was WQ_HIGHPRI removed?
> 
I would like to check perf. figures with it and send out it as a
separate patch if it is worth it.

--
Uladzislau Rezki
Joel Fernandes March 6, 2024, 5:57 p.m. UTC | #3
On 3/6/2024 6:56 AM, Uladzislau Rezki wrote:
> On Wed, Mar 06, 2024 at 10:15:44AM +0800, Z qiang wrote:
>>>
>>> synchronize_rcu() users have to be processed regardless
>>> of memory pressure so our private WQ needs to have at least
>>> one execution context what WQ_MEM_RECLAIM flag guarantees.
>>>
>>> Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
>>> ---
>>>  kernel/rcu/tree.c | 6 +++++-
>>>  1 file changed, 5 insertions(+), 1 deletion(-)
>>>
>>> diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
>>> index 475647620b12..59881a68dd26 100644
>>> --- a/kernel/rcu/tree.c
>>> +++ b/kernel/rcu/tree.c
>>> @@ -1581,6 +1581,7 @@ static void rcu_sr_put_wait_head(struct llist_node *node)
>>>  /* Disabled by default. */
>>>  static int rcu_normal_wake_from_gp;
>>>  module_param(rcu_normal_wake_from_gp, int, 0644);
>>> +static struct workqueue_struct *sync_wq;
>>>
>>>  static void rcu_sr_normal_complete(struct llist_node *node)
>>>  {
>>> @@ -1679,7 +1680,7 @@ static void rcu_sr_normal_gp_cleanup(void)
>>>          * of outstanding users(if still left) and releasing wait-heads
>>>          * added by rcu_sr_normal_gp_init() call.
>>>          */
>>> -       queue_work(system_highpri_wq, &rcu_state.srs_cleanup_work);
>>> +       queue_work(sync_wq, &rcu_state.srs_cleanup_work);
>>>  }
>>>
>>>  /*
>>> @@ -5584,6 +5585,9 @@ void __init rcu_init(void)
>>>         rcu_gp_wq = alloc_workqueue("rcu_gp", WQ_MEM_RECLAIM, 0);
>>>         WARN_ON(!rcu_gp_wq);
>>>
>>> +       sync_wq = alloc_workqueue("sync_wq", WQ_MEM_RECLAIM, 0);
>>
>> Why was WQ_HIGHPRI removed?
>>
> I would like to check perf. figures with it and send out it as a
> separate patch if it is worth it.

I guess one thing to note is that there are also other RCU-related WQ which have
WQ_MEM_RECLAIM but not WQ_HIGHPRI (such as for expedited RCU, at least some
configs). So for consistency, this makes sense to me.

Reviewed-by: Joel Fernandes (Google) <joel@joelfernandes.org).

thanks,

 - Joel
Uladzislau Rezki March 7, 2024, 12:16 p.m. UTC | #4
On Wed, Mar 06, 2024 at 12:57:25PM -0500, Joel Fernandes wrote:
> 
> 
> On 3/6/2024 6:56 AM, Uladzislau Rezki wrote:
> > On Wed, Mar 06, 2024 at 10:15:44AM +0800, Z qiang wrote:
> >>>
> >>> synchronize_rcu() users have to be processed regardless
> >>> of memory pressure so our private WQ needs to have at least
> >>> one execution context what WQ_MEM_RECLAIM flag guarantees.
> >>>
> >>> Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
> >>> ---
> >>>  kernel/rcu/tree.c | 6 +++++-
> >>>  1 file changed, 5 insertions(+), 1 deletion(-)
> >>>
> >>> diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
> >>> index 475647620b12..59881a68dd26 100644
> >>> --- a/kernel/rcu/tree.c
> >>> +++ b/kernel/rcu/tree.c
> >>> @@ -1581,6 +1581,7 @@ static void rcu_sr_put_wait_head(struct llist_node *node)
> >>>  /* Disabled by default. */
> >>>  static int rcu_normal_wake_from_gp;
> >>>  module_param(rcu_normal_wake_from_gp, int, 0644);
> >>> +static struct workqueue_struct *sync_wq;
> >>>
> >>>  static void rcu_sr_normal_complete(struct llist_node *node)
> >>>  {
> >>> @@ -1679,7 +1680,7 @@ static void rcu_sr_normal_gp_cleanup(void)
> >>>          * of outstanding users(if still left) and releasing wait-heads
> >>>          * added by rcu_sr_normal_gp_init() call.
> >>>          */
> >>> -       queue_work(system_highpri_wq, &rcu_state.srs_cleanup_work);
> >>> +       queue_work(sync_wq, &rcu_state.srs_cleanup_work);
> >>>  }
> >>>
> >>>  /*
> >>> @@ -5584,6 +5585,9 @@ void __init rcu_init(void)
> >>>         rcu_gp_wq = alloc_workqueue("rcu_gp", WQ_MEM_RECLAIM, 0);
> >>>         WARN_ON(!rcu_gp_wq);
> >>>
> >>> +       sync_wq = alloc_workqueue("sync_wq", WQ_MEM_RECLAIM, 0);
> >>
> >> Why was WQ_HIGHPRI removed?
> >>
> > I would like to check perf. figures with it and send out it as a
> > separate patch if it is worth it.
> 
> I guess one thing to note is that there are also other RCU-related WQ which have
> WQ_MEM_RECLAIM but not WQ_HIGHPRI (such as for expedited RCU, at least some
> configs). So for consistency, this makes sense to me.
> 
> Reviewed-by: Joel Fernandes (Google) <joel@joelfernandes.org).
> 
Thanks. I will update it with review tag!

--
Uladzislau Rezki
diff mbox series

Patch

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 475647620b12..59881a68dd26 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -1581,6 +1581,7 @@  static void rcu_sr_put_wait_head(struct llist_node *node)
 /* Disabled by default. */
 static int rcu_normal_wake_from_gp;
 module_param(rcu_normal_wake_from_gp, int, 0644);
+static struct workqueue_struct *sync_wq;
 
 static void rcu_sr_normal_complete(struct llist_node *node)
 {
@@ -1679,7 +1680,7 @@  static void rcu_sr_normal_gp_cleanup(void)
 	 * of outstanding users(if still left) and releasing wait-heads
 	 * added by rcu_sr_normal_gp_init() call.
 	 */
-	queue_work(system_highpri_wq, &rcu_state.srs_cleanup_work);
+	queue_work(sync_wq, &rcu_state.srs_cleanup_work);
 }
 
 /*
@@ -5584,6 +5585,9 @@  void __init rcu_init(void)
 	rcu_gp_wq = alloc_workqueue("rcu_gp", WQ_MEM_RECLAIM, 0);
 	WARN_ON(!rcu_gp_wq);
 
+	sync_wq = alloc_workqueue("sync_wq", WQ_MEM_RECLAIM, 0);
+	WARN_ON(!sync_wq);
+
 	/* Fill in default value for rcutree.qovld boot parameter. */
 	/* -After- the rcu_node ->lock fields are initialized! */
 	if (qovld < 0)