Message ID | 20230508213147.786238095@infradead.org (mailing list archive) |
---|---|
State | RFC, archived |
Headers | show |
Series | local_clock() vs noinstr | expand |
1;115;0cOn Mon, May 08, 2023 at 11:19:57PM +0200, Peter Zijlstra wrote: > With the intent to provide local_clock_noinstr(), a variant of > local_clock() that's safe to be called from noinstr code (with the > assumption that any such code will already be non-preemptible), > prepare for things by providing a noinstr sched_clock_noinstr() > function. > > Specifically, preempt_enable_*() calls out to schedule(), which upsets > noinstr validation efforts. > > Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> > --- > arch/s390/include/asm/timex.h | 13 +++++++++---- > arch/s390/kernel/time.c | 11 ++++++++++- > 2 files changed, 19 insertions(+), 5 deletions(-) ... > +static __always_inline unsigned long __get_tod_clock_monotonic(void) > +{ > + return get_tod_clock() - tod_clock_base.tod; > +} > + > /** > * get_clock_monotonic - returns current time in clock rate units > * > @@ -216,7 +221,7 @@ static inline unsigned long get_tod_cloc > unsigned long tod; > > preempt_disable_notrace(); > - tod = get_tod_clock() - tod_clock_base.tod; > + tod = __get_tod_clock_monotonic(); > preempt_enable_notrace(); > return tod; > } ... > +unsigned long long noinstr sched_clock_noinstr(void) > +{ > + return tod_to_ns(__get_tod_clock_monotonic()); > +} > + > /* > * Scheduler clock - returns current time in nanosec units. > */ > unsigned long long notrace sched_clock(void) > { > - return tod_to_ns(get_tod_clock_monotonic()); > + unsigned long long ns; > + preempt_disable_notrace(); > + ns = tod_to_ns(get_tod_clock_monotonic()); > + preempt_enable_notrace(); > + return ns; > } > NOKPROBE_SYMBOL(sched_clock); This disables preemption twice within sched_clock(). So this should either call __get_tod_clock_monotonic() instead, or the function could stay as it is, which I would prefer.
On Tue, May 09, 2023 at 08:13:59AM +0200, Heiko Carstens wrote: > > 1;115;0cOn Mon, May 08, 2023 at 11:19:57PM +0200, Peter Zijlstra wrote: > > With the intent to provide local_clock_noinstr(), a variant of > > local_clock() that's safe to be called from noinstr code (with the > > assumption that any such code will already be non-preemptible), > > prepare for things by providing a noinstr sched_clock_noinstr() > > function. > > > > Specifically, preempt_enable_*() calls out to schedule(), which upsets > > noinstr validation efforts. > > > > Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> > > --- > > arch/s390/include/asm/timex.h | 13 +++++++++---- > > arch/s390/kernel/time.c | 11 ++++++++++- > > 2 files changed, 19 insertions(+), 5 deletions(-) > ... > > +static __always_inline unsigned long __get_tod_clock_monotonic(void) > > +{ > > + return get_tod_clock() - tod_clock_base.tod; > > +} > > + > > /** > > * get_clock_monotonic - returns current time in clock rate units > > * > > @@ -216,7 +221,7 @@ static inline unsigned long get_tod_cloc > > unsigned long tod; > > > > preempt_disable_notrace(); > > - tod = get_tod_clock() - tod_clock_base.tod; > > + tod = __get_tod_clock_monotonic(); > > preempt_enable_notrace(); > > return tod; > > } > ... > > +unsigned long long noinstr sched_clock_noinstr(void) > > +{ > > + return tod_to_ns(__get_tod_clock_monotonic()); > > +} > > + > > /* > > * Scheduler clock - returns current time in nanosec units. > > */ > > unsigned long long notrace sched_clock(void) > > { > > - return tod_to_ns(get_tod_clock_monotonic()); > > + unsigned long long ns; > > + preempt_disable_notrace(); > > + ns = tod_to_ns(get_tod_clock_monotonic()); > > + preempt_enable_notrace(); > > + return ns; > > } > > NOKPROBE_SYMBOL(sched_clock); > > This disables preemption twice within sched_clock(). So this should either > call __get_tod_clock_monotonic() instead, or the function could stay as it > is, which I would prefer. Duh. Will fix.
--- a/arch/s390/include/asm/timex.h +++ b/arch/s390/include/asm/timex.h @@ -63,7 +63,7 @@ static inline int store_tod_clock_ext_cc return cc; } -static inline void store_tod_clock_ext(union tod_clock *tod) +static __always_inline void store_tod_clock_ext(union tod_clock *tod) { asm volatile("stcke %0" : "=Q" (*tod) : : "cc"); } @@ -177,7 +177,7 @@ static inline void local_tick_enable(uns typedef unsigned long cycles_t; -static inline unsigned long get_tod_clock(void) +static __always_inline unsigned long get_tod_clock(void) { union tod_clock clk; @@ -204,6 +204,11 @@ void init_cpu_timer(void); extern union tod_clock tod_clock_base; +static __always_inline unsigned long __get_tod_clock_monotonic(void) +{ + return get_tod_clock() - tod_clock_base.tod; +} + /** * get_clock_monotonic - returns current time in clock rate units * @@ -216,7 +221,7 @@ static inline unsigned long get_tod_cloc unsigned long tod; preempt_disable_notrace(); - tod = get_tod_clock() - tod_clock_base.tod; + tod = __get_tod_clock_monotonic(); preempt_enable_notrace(); return tod; } @@ -240,7 +245,7 @@ static inline unsigned long get_tod_cloc * -> ns = (th * 125) + ((tl * 125) >> 9); * */ -static inline unsigned long tod_to_ns(unsigned long todval) +static __always_inline unsigned long tod_to_ns(unsigned long todval) { return ((todval >> 9) * 125) + (((todval & 0x1ff) * 125) >> 9); } --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -102,12 +102,21 @@ void __init time_early_init(void) ((long) qui.old_leap * 4096000000L); } +unsigned long long noinstr sched_clock_noinstr(void) +{ + return tod_to_ns(__get_tod_clock_monotonic()); +} + /* * Scheduler clock - returns current time in nanosec units. */ unsigned long long notrace sched_clock(void) { - return tod_to_ns(get_tod_clock_monotonic()); + unsigned long long ns; + preempt_disable_notrace(); + ns = tod_to_ns(get_tod_clock_monotonic()); + preempt_enable_notrace(); + return ns; } NOKPROBE_SYMBOL(sched_clock);
With the intent to provide local_clock_noinstr(), a variant of local_clock() that's safe to be called from noinstr code (with the assumption that any such code will already be non-preemptible), prepare for things by providing a noinstr sched_clock_noinstr() function. Specifically, preempt_enable_*() calls out to schedule(), which upsets noinstr validation efforts. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> --- arch/s390/include/asm/timex.h | 13 +++++++++---- arch/s390/kernel/time.c | 11 ++++++++++- 2 files changed, 19 insertions(+), 5 deletions(-)