diff mbox series

[v2,4/5] riscv: add lazy preempt support

Message ID 20220831175920.2806-5-jszhang@kernel.org (mailing list archive)
State New, archived
Headers show
Series riscv: add PREEMPT_RT support | expand

Commit Message

Jisheng Zhang Aug. 31, 2022, 5:59 p.m. UTC
Implement the lazy preempt for riscv.

Signed-off-by: Jisheng Zhang <jszhang@kernel.org>
---
 arch/riscv/Kconfig                   | 1 +
 arch/riscv/include/asm/thread_info.h | 7 +++++--
 arch/riscv/kernel/asm-offsets.c      | 1 +
 arch/riscv/kernel/entry.S            | 9 +++++++--
 4 files changed, 14 insertions(+), 4 deletions(-)

Comments

Guo Ren Sept. 4, 2022, 3:16 p.m. UTC | #1
On Thu, Sep 1, 2022 at 2:08 AM Jisheng Zhang <jszhang@kernel.org> wrote:
>
> Implement the lazy preempt for riscv.
>
> Signed-off-by: Jisheng Zhang <jszhang@kernel.org>
> ---
>  arch/riscv/Kconfig                   | 1 +
>  arch/riscv/include/asm/thread_info.h | 7 +++++--
>  arch/riscv/kernel/asm-offsets.c      | 1 +
>  arch/riscv/kernel/entry.S            | 9 +++++++--
>  4 files changed, 14 insertions(+), 4 deletions(-)
>
> diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
> index 7a8134fd7ec9..9f2f1936b1b5 100644
> --- a/arch/riscv/Kconfig
> +++ b/arch/riscv/Kconfig
> @@ -105,6 +105,7 @@ config RISCV
>         select HAVE_PERF_REGS
>         select HAVE_PERF_USER_STACK_DUMP
>         select HAVE_POSIX_CPU_TIMERS_TASK_WORK
> +       select HAVE_PREEMPT_LAZY
>         select HAVE_REGS_AND_STACK_ACCESS_API
>         select HAVE_FUNCTION_ARG_ACCESS_API
>         select HAVE_STACKPROTECTOR
> diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h
> index 78933ac04995..471915b179a2 100644
> --- a/arch/riscv/include/asm/thread_info.h
> +++ b/arch/riscv/include/asm/thread_info.h
> @@ -56,6 +56,7 @@
>  struct thread_info {
>         unsigned long           flags;          /* low level flags */
>         int                     preempt_count;  /* 0=>preemptible, <0=>BUG */
> +       int                     preempt_lazy_count;  /* 0=>preemptible, <0=>BUG */
>         /*
>          * These stack pointers are overwritten on every system call or
>          * exception.  SP is also saved to the stack it can be recovered when
> @@ -90,7 +91,7 @@ struct thread_info {
>  #define TIF_NOTIFY_RESUME      1       /* callback before returning to user */
>  #define TIF_SIGPENDING         2       /* signal pending */
>  #define TIF_NEED_RESCHED       3       /* rescheduling necessary */
> -#define TIF_RESTORE_SIGMASK    4       /* restore signal mask in do_signal() */
> +#define TIF_NEED_RESCHED_LAZY  4       /* lazy rescheduling */
>  #define TIF_MEMDIE             5       /* is terminating due to OOM killer */
>  #define TIF_SYSCALL_TRACEPOINT  6       /* syscall tracepoint instrumentation */
>  #define TIF_SYSCALL_AUDIT      7       /* syscall auditing */
> @@ -98,6 +99,7 @@ struct thread_info {
>  #define TIF_NOTIFY_SIGNAL      9       /* signal notifications exist */
>  #define TIF_UPROBE             10      /* uprobe breakpoint or singlestep */
>  #define TIF_32BIT              11      /* compat-mode 32bit process */
> +#define TIF_RESTORE_SIGMASK    12      /* restore signal mask in do_signal() */
>
>  #define _TIF_SYSCALL_TRACE     (1 << TIF_SYSCALL_TRACE)
>  #define _TIF_NOTIFY_RESUME     (1 << TIF_NOTIFY_RESUME)
> @@ -108,10 +110,11 @@ struct thread_info {
>  #define _TIF_SECCOMP           (1 << TIF_SECCOMP)
>  #define _TIF_NOTIFY_SIGNAL     (1 << TIF_NOTIFY_SIGNAL)
>  #define _TIF_UPROBE            (1 << TIF_UPROBE)
> +#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY)
>
>  #define _TIF_WORK_MASK \
>         (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NEED_RESCHED | \
> -        _TIF_NOTIFY_SIGNAL | _TIF_UPROBE)
> +        _TIF_NEED_RESCHED_LAZY | _TIF_NOTIFY_SIGNAL | _TIF_UPROBE)
>
>  #define _TIF_SYSCALL_WORK \
>         (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT | _TIF_SYSCALL_AUDIT | \
> diff --git a/arch/riscv/kernel/asm-offsets.c b/arch/riscv/kernel/asm-offsets.c
> index df9444397908..e38e33822f72 100644
> --- a/arch/riscv/kernel/asm-offsets.c
> +++ b/arch/riscv/kernel/asm-offsets.c
> @@ -35,6 +35,7 @@ void asm_offsets(void)
>         OFFSET(TASK_THREAD_S11, task_struct, thread.s[11]);
>         OFFSET(TASK_TI_FLAGS, task_struct, thread_info.flags);
>         OFFSET(TASK_TI_PREEMPT_COUNT, task_struct, thread_info.preempt_count);
> +       OFFSET(TASK_TI_PREEMPT_LAZY_COUNT, task_struct, thread_info.preempt_lazy_count);
>         OFFSET(TASK_TI_KERNEL_SP, task_struct, thread_info.kernel_sp);
>         OFFSET(TASK_TI_USER_SP, task_struct, thread_info.user_sp);
>
> diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
> index b9eda3fcbd6d..595100a4c2c7 100644
> --- a/arch/riscv/kernel/entry.S
> +++ b/arch/riscv/kernel/entry.S
> @@ -361,9 +361,14 @@ restore_all:
>  resume_kernel:
>         REG_L s0, TASK_TI_PREEMPT_COUNT(tp)
>         bnez s0, restore_all
> -       REG_L s0, TASK_TI_FLAGS(tp)
> -       andi s0, s0, _TIF_NEED_RESCHED
> +       REG_L s1, TASK_TI_FLAGS(tp)
> +       andi s0, s1, _TIF_NEED_RESCHED
> +       bnez s0, 1f
> +       REG_L s0, TASK_TI_PREEMPT_LAZY_COUNT(tp)
> +       bnez s0, restore_all
> +       andi s0, s1, _TIF_NEED_RESCHED_LAZY
Can you tell me, who increased/decreased the PREEMPT_LAZY_COUNT? And
who set NEED_RESCHED_LAZY?


>         beqz s0, restore_all
> +1:
>         call preempt_schedule_irq
>         j restore_all
>  #endif
> --
> 2.34.1
>
>
> --
> kvm-riscv mailing list
> kvm-riscv@lists.infradead.org
> http://lists.infradead.org/mailman/listinfo/kvm-riscv
Sebastian Sewior Sept. 5, 2022, 6:34 a.m. UTC | #2
On 2022-09-04 23:16:12 [+0800], Guo Ren wrote:
> > diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
> > index b9eda3fcbd6d..595100a4c2c7 100644
> > --- a/arch/riscv/kernel/entry.S
> > +++ b/arch/riscv/kernel/entry.S
> > @@ -361,9 +361,14 @@ restore_all:
> >  resume_kernel:
> >         REG_L s0, TASK_TI_PREEMPT_COUNT(tp)
> >         bnez s0, restore_all
> > -       REG_L s0, TASK_TI_FLAGS(tp)
> > -       andi s0, s0, _TIF_NEED_RESCHED
> > +       REG_L s1, TASK_TI_FLAGS(tp)
> > +       andi s0, s1, _TIF_NEED_RESCHED
> > +       bnez s0, 1f
> > +       REG_L s0, TASK_TI_PREEMPT_LAZY_COUNT(tp)
> > +       bnez s0, restore_all
> > +       andi s0, s1, _TIF_NEED_RESCHED_LAZY
> Can you tell me, who increased/decreased the PREEMPT_LAZY_COUNT? And
> who set NEED_RESCHED_LAZY?

There is "generic" code in the PREEMPT_RT patch doing that. The counter
is incremented/ decremented via preempt_lazy_enable()/disable() and one
of the user is migrate_disable()/enable().
Basically if a task is task_is_realtime() then NEED_RESCHED is set for
the wakeup. For the remaining states (SCHED_OTHER, …) NEED_RESCHED_LAZY
is set for the wakeup. This can be delayed if the task is in a "preempt
disable lazy" section (similar to a preempt_disable() section) but a
task_is_realtime() can still be scheduled if needed.
See details at
	https://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git/plain/patches/sched__Add_support_for_lazy_preemption.patch?h=linux-6.0.y-rt-patches

Sebastian
Guo Ren Sept. 5, 2022, 8:33 a.m. UTC | #3
On Mon, Sep 5, 2022 at 2:34 PM Sebastian Andrzej Siewior
<bigeasy@linutronix.de> wrote:
>
> On 2022-09-04 23:16:12 [+0800], Guo Ren wrote:
> > > diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
> > > index b9eda3fcbd6d..595100a4c2c7 100644
> > > --- a/arch/riscv/kernel/entry.S
> > > +++ b/arch/riscv/kernel/entry.S
> > > @@ -361,9 +361,14 @@ restore_all:
> > >  resume_kernel:
> > >         REG_L s0, TASK_TI_PREEMPT_COUNT(tp)
> > >         bnez s0, restore_all
> > > -       REG_L s0, TASK_TI_FLAGS(tp)
> > > -       andi s0, s0, _TIF_NEED_RESCHED
> > > +       REG_L s1, TASK_TI_FLAGS(tp)
> > > +       andi s0, s1, _TIF_NEED_RESCHED
> > > +       bnez s0, 1f
> > > +       REG_L s0, TASK_TI_PREEMPT_LAZY_COUNT(tp)
> > > +       bnez s0, restore_all
> > > +       andi s0, s1, _TIF_NEED_RESCHED_LAZY
> > Can you tell me, who increased/decreased the PREEMPT_LAZY_COUNT? And
> > who set NEED_RESCHED_LAZY?
>
> There is "generic" code in the PREEMPT_RT patch doing that. The counter
> is incremented/ decremented via preempt_lazy_enable()/disable() and one
> of the user is migrate_disable()/enable().
> Basically if a task is task_is_realtime() then NEED_RESCHED is set for
> the wakeup. For the remaining states (SCHED_OTHER, …) NEED_RESCHED_LAZY
> is set for the wakeup. This can be delayed if the task is in a "preempt
> disable lazy" section (similar to a preempt_disable() section) but a
> task_is_realtime() can still be scheduled if needed.
Okay, It should be [PATCH RT]. RISC-V would also move to GENERIC_ENTRY
[1], so above assembly code would be replaced by generic one, right?

[1]: https://lore.kernel.org/linux-riscv/20220904072637.8619-3-guoren@kernel.org/T/#u


> See details at
>         https://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git/plain/patches/sched__Add_support_for_lazy_preemption.patch?h=linux-6.0.y-rt-patches


>
> Sebastian
Sebastian Sewior Sept. 5, 2022, 8:46 a.m. UTC | #4
On 2022-09-05 16:33:54 [+0800], Guo Ren wrote:
> > There is "generic" code in the PREEMPT_RT patch doing that. The counter
> > is incremented/ decremented via preempt_lazy_enable()/disable() and one
> > of the user is migrate_disable()/enable().
> > Basically if a task is task_is_realtime() then NEED_RESCHED is set for
> > the wakeup. For the remaining states (SCHED_OTHER, …) NEED_RESCHED_LAZY
> > is set for the wakeup. This can be delayed if the task is in a "preempt
> > disable lazy" section (similar to a preempt_disable() section) but a
> > task_is_realtime() can still be scheduled if needed.
> Okay, It should be [PATCH RT]. RISC-V would also move to GENERIC_ENTRY
> [1], so above assembly code would be replaced by generic one, right?

correct.

Sebastian
Jisheng Zhang Sept. 5, 2022, 12:58 p.m. UTC | #5
On Mon, Sep 05, 2022 at 04:33:54PM +0800, Guo Ren wrote:
> On Mon, Sep 5, 2022 at 2:34 PM Sebastian Andrzej Siewior
> <bigeasy@linutronix.de> wrote:
> >
> > On 2022-09-04 23:16:12 [+0800], Guo Ren wrote:
> > > > diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
> > > > index b9eda3fcbd6d..595100a4c2c7 100644
> > > > --- a/arch/riscv/kernel/entry.S
> > > > +++ b/arch/riscv/kernel/entry.S
> > > > @@ -361,9 +361,14 @@ restore_all:
> > > >  resume_kernel:
> > > >         REG_L s0, TASK_TI_PREEMPT_COUNT(tp)
> > > >         bnez s0, restore_all
> > > > -       REG_L s0, TASK_TI_FLAGS(tp)
> > > > -       andi s0, s0, _TIF_NEED_RESCHED
> > > > +       REG_L s1, TASK_TI_FLAGS(tp)
> > > > +       andi s0, s1, _TIF_NEED_RESCHED
> > > > +       bnez s0, 1f
> > > > +       REG_L s0, TASK_TI_PREEMPT_LAZY_COUNT(tp)
> > > > +       bnez s0, restore_all
> > > > +       andi s0, s1, _TIF_NEED_RESCHED_LAZY
> > > Can you tell me, who increased/decreased the PREEMPT_LAZY_COUNT? And
> > > who set NEED_RESCHED_LAZY?
> >
> > There is "generic" code in the PREEMPT_RT patch doing that. The counter
> > is incremented/ decremented via preempt_lazy_enable()/disable() and one
> > of the user is migrate_disable()/enable().
> > Basically if a task is task_is_realtime() then NEED_RESCHED is set for
> > the wakeup. For the remaining states (SCHED_OTHER, …) NEED_RESCHED_LAZY
> > is set for the wakeup. This can be delayed if the task is in a "preempt
> > disable lazy" section (similar to a preempt_disable() section) but a
> > task_is_realtime() can still be scheduled if needed.
> Okay, It should be [PATCH RT]. RISC-V would also move to GENERIC_ENTRY

As said in the cover letter, this patch is expected to reviewed and
maintained in RT tree. If your GENERIC_ENTRY patches are merged, I will
send an updated patch.

> [1], so above assembly code would be replaced by generic one, right?
> 
> [1]: https://lore.kernel.org/linux-riscv/20220904072637.8619-3-guoren@kernel.org/T/#u
> 
> 
> > See details at
> >         https://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git/plain/patches/sched__Add_support_for_lazy_preemption.patch?h=linux-6.0.y-rt-patches
> 
> 
> >
> > Sebastian
> 
> 
> 
> -- 
> Best Regards
>  Guo Ren
Guo Ren Sept. 6, 2022, 1:46 a.m. UTC | #6
On Mon, Sep 5, 2022 at 4:46 PM Sebastian Andrzej Siewior
<bigeasy@linutronix.de> wrote:
>
> On 2022-09-05 16:33:54 [+0800], Guo Ren wrote:
> > > There is "generic" code in the PREEMPT_RT patch doing that. The counter
> > > is incremented/ decremented via preempt_lazy_enable()/disable() and one
> > > of the user is migrate_disable()/enable().
> > > Basically if a task is task_is_realtime() then NEED_RESCHED is set for
> > > the wakeup. For the remaining states (SCHED_OTHER, …) NEED_RESCHED_LAZY
> > > is set for the wakeup. This can be delayed if the task is in a "preempt
> > > disable lazy" section (similar to a preempt_disable() section) but a
> > > task_is_realtime() can still be scheduled if needed.
> > Okay, It should be [PATCH RT]. RISC-V would also move to GENERIC_ENTRY
> > [1], so above assembly code would be replaced by generic one, right?
>
> correct.
Maybe TIF_XXX_RESCHED also could be merged into GENERIC_ENTRY, just
like what you've done in syscall.

struct thread_info {
          unsigned long           flags;
          unsigned long           syscall_work;   /* SYSCALL_WORK_ flags */
+        unsigned long           resched_work;   /* RESCHED flags */

Or merge them into one:
struct thread_info {
          unsigned long           flags;
-         unsigned long           syscall_work;   /* SYSCALL_WORK_ flags */
+         unsigned long           ge_flags; /* GENERIC_ENTRY flags */

>
> Sebastian
diff mbox series

Patch

diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index 7a8134fd7ec9..9f2f1936b1b5 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -105,6 +105,7 @@  config RISCV
 	select HAVE_PERF_REGS
 	select HAVE_PERF_USER_STACK_DUMP
 	select HAVE_POSIX_CPU_TIMERS_TASK_WORK
+	select HAVE_PREEMPT_LAZY
 	select HAVE_REGS_AND_STACK_ACCESS_API
 	select HAVE_FUNCTION_ARG_ACCESS_API
 	select HAVE_STACKPROTECTOR
diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h
index 78933ac04995..471915b179a2 100644
--- a/arch/riscv/include/asm/thread_info.h
+++ b/arch/riscv/include/asm/thread_info.h
@@ -56,6 +56,7 @@ 
 struct thread_info {
 	unsigned long		flags;		/* low level flags */
 	int                     preempt_count;  /* 0=>preemptible, <0=>BUG */
+	int			preempt_lazy_count;  /* 0=>preemptible, <0=>BUG */
 	/*
 	 * These stack pointers are overwritten on every system call or
 	 * exception.  SP is also saved to the stack it can be recovered when
@@ -90,7 +91,7 @@  struct thread_info {
 #define TIF_NOTIFY_RESUME	1	/* callback before returning to user */
 #define TIF_SIGPENDING		2	/* signal pending */
 #define TIF_NEED_RESCHED	3	/* rescheduling necessary */
-#define TIF_RESTORE_SIGMASK	4	/* restore signal mask in do_signal() */
+#define TIF_NEED_RESCHED_LAZY	4	/* lazy rescheduling */
 #define TIF_MEMDIE		5	/* is terminating due to OOM killer */
 #define TIF_SYSCALL_TRACEPOINT  6       /* syscall tracepoint instrumentation */
 #define TIF_SYSCALL_AUDIT	7	/* syscall auditing */
@@ -98,6 +99,7 @@  struct thread_info {
 #define TIF_NOTIFY_SIGNAL	9	/* signal notifications exist */
 #define TIF_UPROBE		10	/* uprobe breakpoint or singlestep */
 #define TIF_32BIT		11	/* compat-mode 32bit process */
+#define TIF_RESTORE_SIGMASK	12	/* restore signal mask in do_signal() */
 
 #define _TIF_SYSCALL_TRACE	(1 << TIF_SYSCALL_TRACE)
 #define _TIF_NOTIFY_RESUME	(1 << TIF_NOTIFY_RESUME)
@@ -108,10 +110,11 @@  struct thread_info {
 #define _TIF_SECCOMP		(1 << TIF_SECCOMP)
 #define _TIF_NOTIFY_SIGNAL	(1 << TIF_NOTIFY_SIGNAL)
 #define _TIF_UPROBE		(1 << TIF_UPROBE)
+#define _TIF_NEED_RESCHED_LAZY	(1 << TIF_NEED_RESCHED_LAZY)
 
 #define _TIF_WORK_MASK \
 	(_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NEED_RESCHED | \
-	 _TIF_NOTIFY_SIGNAL | _TIF_UPROBE)
+	 _TIF_NEED_RESCHED_LAZY | _TIF_NOTIFY_SIGNAL | _TIF_UPROBE)
 
 #define _TIF_SYSCALL_WORK \
 	(_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT | _TIF_SYSCALL_AUDIT | \
diff --git a/arch/riscv/kernel/asm-offsets.c b/arch/riscv/kernel/asm-offsets.c
index df9444397908..e38e33822f72 100644
--- a/arch/riscv/kernel/asm-offsets.c
+++ b/arch/riscv/kernel/asm-offsets.c
@@ -35,6 +35,7 @@  void asm_offsets(void)
 	OFFSET(TASK_THREAD_S11, task_struct, thread.s[11]);
 	OFFSET(TASK_TI_FLAGS, task_struct, thread_info.flags);
 	OFFSET(TASK_TI_PREEMPT_COUNT, task_struct, thread_info.preempt_count);
+	OFFSET(TASK_TI_PREEMPT_LAZY_COUNT, task_struct, thread_info.preempt_lazy_count);
 	OFFSET(TASK_TI_KERNEL_SP, task_struct, thread_info.kernel_sp);
 	OFFSET(TASK_TI_USER_SP, task_struct, thread_info.user_sp);
 
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
index b9eda3fcbd6d..595100a4c2c7 100644
--- a/arch/riscv/kernel/entry.S
+++ b/arch/riscv/kernel/entry.S
@@ -361,9 +361,14 @@  restore_all:
 resume_kernel:
 	REG_L s0, TASK_TI_PREEMPT_COUNT(tp)
 	bnez s0, restore_all
-	REG_L s0, TASK_TI_FLAGS(tp)
-	andi s0, s0, _TIF_NEED_RESCHED
+	REG_L s1, TASK_TI_FLAGS(tp)
+	andi s0, s1, _TIF_NEED_RESCHED
+	bnez s0, 1f
+	REG_L s0, TASK_TI_PREEMPT_LAZY_COUNT(tp)
+	bnez s0, restore_all
+	andi s0, s1, _TIF_NEED_RESCHED_LAZY
 	beqz s0, restore_all
+1:
 	call preempt_schedule_irq
 	j restore_all
 #endif