Message ID | 20190530141531.43462-4-vincenzo.frascino@arm.com (mailing list archive) |
---|---|
State | Superseded |
Headers | show |
Series | Unify vDSOs across more architectures | expand |
On Thu, May 30, 2019 at 03:15:15PM +0100, Vincenzo Frascino wrote: > With the definition of the unified vDSO library the implementations of > update_vsyscall and update_vsyscall_tz became quite similar across > architectures. > > Define a unified implementation of this two functions in kernel/vdso and ... of these two functions ... > provide the bindings that can be implemented by every architecture that > takes advantage of the unified vDSO library.
On 10/06/2019 10:34, Huw Davies wrote: > On Thu, May 30, 2019 at 03:15:15PM +0100, Vincenzo Frascino wrote: >> With the definition of the unified vDSO library the implementations of >> update_vsyscall and update_vsyscall_tz became quite similar across >> architectures. >> >> Define a unified implementation of this two functions in kernel/vdso and > > ... of these two functions ... Thanks for this, I will fix in v7. > >> provide the bindings that can be implemented by every architecture that >> takes advantage of the unified vDSO library.
On Thu, 30 May 2019, Vincenzo Frascino wrote: > + > + if (__arch_use_vsyscall(vdata)) { > + vdata[CS_HRES_COARSE].cycle_last = > + tk->tkr_mono.cycle_last; > + vdata[CS_HRES_COARSE].mask = > + tk->tkr_mono.mask; > + vdata[CS_HRES_COARSE].mult = > + tk->tkr_mono.mult; These line breaks make it really hard to read. Can you fold in the patch below please? Thanks, tglx 8<----------- --- a/kernel/vdso/vsyscall.c +++ b/kernel/vdso/vsyscall.c @@ -11,6 +11,66 @@ #include <vdso/helpers.h> #include <vdso/vsyscall.h> +static inline void udpate_vdata(struct vdso_data *vdata, struct timekeeper *tk) +{ + struct vdso_timestamp *vdso_ts; + u64 nsec; + + vdata[CS_HRES_COARSE].cycle_last = tk->tkr_mono.cycle_last; + vdata[CS_HRES_COARSE].mask = tk->tkr_mono.mask; + vdata[CS_HRES_COARSE].mult = tk->tkr_mono.mult; + vdata[CS_HRES_COARSE].shift = tk->tkr_mono.shift; + vdata[CS_RAW].cycle_last = tk->tkr_raw.cycle_last; + vdata[CS_RAW].mask = tk->tkr_raw.mask; + vdata[CS_RAW].mult = tk->tkr_raw.mult; + vdata[CS_RAW].shift = tk->tkr_raw.shift; + + /* CLOCK_REALTIME */ + vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_REALTIME]; + vdso_ts->sec = tk->xtime_sec; + vdso_ts->nsec = tk->tkr_mono.xtime_nsec; + + /* CLOCK_MONOTONIC */ + vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_MONOTONIC]; + vdso_ts->sec = tk->xtime_sec + tk->wall_to_monotonic.tv_sec; + + nsec = tk->tkr_mono.xtime_nsec; + nsec += ((u64)tk->wall_to_monotonic.tv_nsec << tk->tkr_mono.shift); + while (nsec >= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) { + nsec -= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift); + vdso_ts->sec++; + } + vdso_ts->nsec = nsec; + + /* CLOCK_MONOTONIC_RAW */ + vdso_ts = &vdata[CS_RAW].basetime[CLOCK_MONOTONIC_RAW]; + vdso_ts->sec = tk->raw_sec; + vdso_ts->nsec = tk->tkr_raw.xtime_nsec; + + /* CLOCK_BOOTTIME */ + vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_BOOTTIME]; + vdso_ts->sec = tk->xtime_sec + tk->wall_to_monotonic.tv_sec; + nsec = tk->tkr_mono.xtime_nsec; + nsec += ((u64)(tk->wall_to_monotonic.tv_nsec + + ktime_to_ns(tk->offs_boot)) << tk->tkr_mono.shift); + while (nsec >= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) { + nsec -= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift); + vdso_ts->sec++; + } + vdso_ts->nsec = nsec; + + /* CLOCK_TAI */ + vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_TAI]; + vdso_ts->sec = tk->xtime_sec + (s64)tk->tai_offset; + vdso_ts->nsec = tk->tkr_mono.xtime_nsec; + + /* + * Read without the seqlock held by clock_getres(). + * Note: No need to have a second copy. + */ + WRITE_ONCE(vdata[CS_HRES_COARSE].hrtimer_res, hrtimer_resolution); +} + void update_vsyscall(struct timekeeper *tk) { struct vdso_data *vdata = __arch_get_k_vdso_data(); @@ -32,92 +92,23 @@ void update_vsyscall(struct timekeeper * vdata[CS_RAW].clock_mode = __arch_get_clock_mode(tk); /* CLOCK_REALTIME_COARSE */ - vdso_ts = - &vdata[CS_HRES_COARSE].basetime[CLOCK_REALTIME_COARSE]; - vdso_ts->sec = tk->xtime_sec; - vdso_ts->nsec = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift; + vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_REALTIME_COARSE]; + vdso_ts->sec = tk->xtime_sec; + vdso_ts->nsec = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift; + /* CLOCK_MONOTONIC_COARSE */ - vdso_ts = - &vdata[CS_HRES_COARSE].basetime[CLOCK_MONOTONIC_COARSE]; - vdso_ts->sec = tk->xtime_sec + tk->wall_to_monotonic.tv_sec; - nsec = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift; - nsec = nsec + tk->wall_to_monotonic.tv_nsec; + vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_MONOTONIC_COARSE]; + vdso_ts->sec = tk->xtime_sec + tk->wall_to_monotonic.tv_sec; + nsec = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift; + nsec = nsec + tk->wall_to_monotonic.tv_nsec; while (nsec >= NSEC_PER_SEC) { nsec = nsec - NSEC_PER_SEC; vdso_ts->sec++; } - vdso_ts->nsec = nsec; + vdso_ts->nsec = nsec; - if (__arch_use_vsyscall(vdata)) { - vdata[CS_HRES_COARSE].cycle_last = - tk->tkr_mono.cycle_last; - vdata[CS_HRES_COARSE].mask = - tk->tkr_mono.mask; - vdata[CS_HRES_COARSE].mult = - tk->tkr_mono.mult; - vdata[CS_HRES_COARSE].shift = - tk->tkr_mono.shift; - vdata[CS_RAW].cycle_last = - tk->tkr_raw.cycle_last; - vdata[CS_RAW].mask = - tk->tkr_raw.mask; - vdata[CS_RAW].mult = - tk->tkr_raw.mult; - vdata[CS_RAW].shift = - tk->tkr_raw.shift; - /* CLOCK_REALTIME */ - vdso_ts = - &vdata[CS_HRES_COARSE].basetime[CLOCK_REALTIME]; - vdso_ts->sec = tk->xtime_sec; - vdso_ts->nsec = tk->tkr_mono.xtime_nsec; - /* CLOCK_MONOTONIC */ - vdso_ts = - &vdata[CS_HRES_COARSE].basetime[CLOCK_MONOTONIC]; - vdso_ts->sec = tk->xtime_sec + - tk->wall_to_monotonic.tv_sec; - nsec = tk->tkr_mono.xtime_nsec; - nsec = nsec + - ((u64)tk->wall_to_monotonic.tv_nsec << - tk->tkr_mono.shift); - while (nsec >= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) { - nsec = nsec - - (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift); - vdso_ts->sec++; - } - vdso_ts->nsec = nsec; - /* CLOCK_MONOTONIC_RAW */ - vdso_ts = - &vdata[CS_RAW].basetime[CLOCK_MONOTONIC_RAW]; - vdso_ts->sec = tk->raw_sec; - vdso_ts->nsec = tk->tkr_raw.xtime_nsec; - /* CLOCK_BOOTTIME */ - vdso_ts = - &vdata[CS_HRES_COARSE].basetime[CLOCK_BOOTTIME]; - vdso_ts->sec = tk->xtime_sec + - tk->wall_to_monotonic.tv_sec; - nsec = tk->tkr_mono.xtime_nsec; - nsec = nsec + - ((u64)(tk->wall_to_monotonic.tv_nsec + - ktime_to_ns(tk->offs_boot)) << - tk->tkr_mono.shift); - while (nsec >= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) { - nsec = nsec - - (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift); - vdso_ts->sec++; - } - vdso_ts->nsec = nsec; - /* CLOCK_TAI */ - vdso_ts = - &vdata[CS_HRES_COARSE].basetime[CLOCK_TAI]; - vdso_ts->sec = tk->xtime_sec + (s64)tk->tai_offset; - vdso_ts->nsec = tk->tkr_mono.xtime_nsec; - - /* - * Read without the seqlock held by clock_getres(). - * Note: No need to have a second copy. - */ - WRITE_ONCE(vdata[CS_HRES_COARSE].hrtimer_res, hrtimer_resolution); - } + if (__arch_use_vsyscall(vdata)) + update_vdata(vdata, tk); __arch_update_vsyscall(vdata, tk);
Hi Thomas, On 6/14/19 12:10 PM, Thomas Gleixner wrote: > On Thu, 30 May 2019, Vincenzo Frascino wrote: >> + >> + if (__arch_use_vsyscall(vdata)) { >> + vdata[CS_HRES_COARSE].cycle_last = >> + tk->tkr_mono.cycle_last; >> + vdata[CS_HRES_COARSE].mask = >> + tk->tkr_mono.mask; >> + vdata[CS_HRES_COARSE].mult = >> + tk->tkr_mono.mult; > > These line breaks make it really hard to read. Can you fold in the patch > below please? > Thanks for this. I will do it in v7. > Thanks, > > tglx > 8<----------- > --- a/kernel/vdso/vsyscall.c > +++ b/kernel/vdso/vsyscall.c > @@ -11,6 +11,66 @@ > #include <vdso/helpers.h> > #include <vdso/vsyscall.h> > > +static inline void udpate_vdata(struct vdso_data *vdata, struct timekeeper *tk) > +{ > + struct vdso_timestamp *vdso_ts; > + u64 nsec; > + > + vdata[CS_HRES_COARSE].cycle_last = tk->tkr_mono.cycle_last; > + vdata[CS_HRES_COARSE].mask = tk->tkr_mono.mask; > + vdata[CS_HRES_COARSE].mult = tk->tkr_mono.mult; > + vdata[CS_HRES_COARSE].shift = tk->tkr_mono.shift; > + vdata[CS_RAW].cycle_last = tk->tkr_raw.cycle_last; > + vdata[CS_RAW].mask = tk->tkr_raw.mask; > + vdata[CS_RAW].mult = tk->tkr_raw.mult; > + vdata[CS_RAW].shift = tk->tkr_raw.shift; > + > + /* CLOCK_REALTIME */ > + vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_REALTIME]; > + vdso_ts->sec = tk->xtime_sec; > + vdso_ts->nsec = tk->tkr_mono.xtime_nsec; > + > + /* CLOCK_MONOTONIC */ > + vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_MONOTONIC]; > + vdso_ts->sec = tk->xtime_sec + tk->wall_to_monotonic.tv_sec; > + > + nsec = tk->tkr_mono.xtime_nsec; > + nsec += ((u64)tk->wall_to_monotonic.tv_nsec << tk->tkr_mono.shift); > + while (nsec >= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) { > + nsec -= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift); > + vdso_ts->sec++; > + } > + vdso_ts->nsec = nsec; > + > + /* CLOCK_MONOTONIC_RAW */ > + vdso_ts = &vdata[CS_RAW].basetime[CLOCK_MONOTONIC_RAW]; > + vdso_ts->sec = tk->raw_sec; > + vdso_ts->nsec = tk->tkr_raw.xtime_nsec; > + > + /* CLOCK_BOOTTIME */ > + vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_BOOTTIME]; > + vdso_ts->sec = tk->xtime_sec + tk->wall_to_monotonic.tv_sec; > + nsec = tk->tkr_mono.xtime_nsec; > + nsec += ((u64)(tk->wall_to_monotonic.tv_nsec + > + ktime_to_ns(tk->offs_boot)) << tk->tkr_mono.shift); > + while (nsec >= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) { > + nsec -= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift); > + vdso_ts->sec++; > + } > + vdso_ts->nsec = nsec; > + > + /* CLOCK_TAI */ > + vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_TAI]; > + vdso_ts->sec = tk->xtime_sec + (s64)tk->tai_offset; > + vdso_ts->nsec = tk->tkr_mono.xtime_nsec; > + > + /* > + * Read without the seqlock held by clock_getres(). > + * Note: No need to have a second copy. > + */ > + WRITE_ONCE(vdata[CS_HRES_COARSE].hrtimer_res, hrtimer_resolution); > +} > + > void update_vsyscall(struct timekeeper *tk) > { > struct vdso_data *vdata = __arch_get_k_vdso_data(); > @@ -32,92 +92,23 @@ void update_vsyscall(struct timekeeper * > vdata[CS_RAW].clock_mode = __arch_get_clock_mode(tk); > > /* CLOCK_REALTIME_COARSE */ > - vdso_ts = > - &vdata[CS_HRES_COARSE].basetime[CLOCK_REALTIME_COARSE]; > - vdso_ts->sec = tk->xtime_sec; > - vdso_ts->nsec = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift; > + vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_REALTIME_COARSE]; > + vdso_ts->sec = tk->xtime_sec; > + vdso_ts->nsec = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift; > + > /* CLOCK_MONOTONIC_COARSE */ > - vdso_ts = > - &vdata[CS_HRES_COARSE].basetime[CLOCK_MONOTONIC_COARSE]; > - vdso_ts->sec = tk->xtime_sec + tk->wall_to_monotonic.tv_sec; > - nsec = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift; > - nsec = nsec + tk->wall_to_monotonic.tv_nsec; > + vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_MONOTONIC_COARSE]; > + vdso_ts->sec = tk->xtime_sec + tk->wall_to_monotonic.tv_sec; > + nsec = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift; > + nsec = nsec + tk->wall_to_monotonic.tv_nsec; > while (nsec >= NSEC_PER_SEC) { > nsec = nsec - NSEC_PER_SEC; > vdso_ts->sec++; > } > - vdso_ts->nsec = nsec; > + vdso_ts->nsec = nsec; > > - if (__arch_use_vsyscall(vdata)) { > - vdata[CS_HRES_COARSE].cycle_last = > - tk->tkr_mono.cycle_last; > - vdata[CS_HRES_COARSE].mask = > - tk->tkr_mono.mask; > - vdata[CS_HRES_COARSE].mult = > - tk->tkr_mono.mult; > - vdata[CS_HRES_COARSE].shift = > - tk->tkr_mono.shift; > - vdata[CS_RAW].cycle_last = > - tk->tkr_raw.cycle_last; > - vdata[CS_RAW].mask = > - tk->tkr_raw.mask; > - vdata[CS_RAW].mult = > - tk->tkr_raw.mult; > - vdata[CS_RAW].shift = > - tk->tkr_raw.shift; > - /* CLOCK_REALTIME */ > - vdso_ts = > - &vdata[CS_HRES_COARSE].basetime[CLOCK_REALTIME]; > - vdso_ts->sec = tk->xtime_sec; > - vdso_ts->nsec = tk->tkr_mono.xtime_nsec; > - /* CLOCK_MONOTONIC */ > - vdso_ts = > - &vdata[CS_HRES_COARSE].basetime[CLOCK_MONOTONIC]; > - vdso_ts->sec = tk->xtime_sec + > - tk->wall_to_monotonic.tv_sec; > - nsec = tk->tkr_mono.xtime_nsec; > - nsec = nsec + > - ((u64)tk->wall_to_monotonic.tv_nsec << > - tk->tkr_mono.shift); > - while (nsec >= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) { > - nsec = nsec - > - (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift); > - vdso_ts->sec++; > - } > - vdso_ts->nsec = nsec; > - /* CLOCK_MONOTONIC_RAW */ > - vdso_ts = > - &vdata[CS_RAW].basetime[CLOCK_MONOTONIC_RAW]; > - vdso_ts->sec = tk->raw_sec; > - vdso_ts->nsec = tk->tkr_raw.xtime_nsec; > - /* CLOCK_BOOTTIME */ > - vdso_ts = > - &vdata[CS_HRES_COARSE].basetime[CLOCK_BOOTTIME]; > - vdso_ts->sec = tk->xtime_sec + > - tk->wall_to_monotonic.tv_sec; > - nsec = tk->tkr_mono.xtime_nsec; > - nsec = nsec + > - ((u64)(tk->wall_to_monotonic.tv_nsec + > - ktime_to_ns(tk->offs_boot)) << > - tk->tkr_mono.shift); > - while (nsec >= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) { > - nsec = nsec - > - (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift); > - vdso_ts->sec++; > - } > - vdso_ts->nsec = nsec; > - /* CLOCK_TAI */ > - vdso_ts = > - &vdata[CS_HRES_COARSE].basetime[CLOCK_TAI]; > - vdso_ts->sec = tk->xtime_sec + (s64)tk->tai_offset; > - vdso_ts->nsec = tk->tkr_mono.xtime_nsec; > - > - /* > - * Read without the seqlock held by clock_getres(). > - * Note: No need to have a second copy. > - */ > - WRITE_ONCE(vdata[CS_HRES_COARSE].hrtimer_res, hrtimer_resolution); > - } > + if (__arch_use_vsyscall(vdata)) > + update_vdata(vdata, tk); > > __arch_update_vsyscall(vdata, tk); > >
On Fri, 14 Jun 2019, Vincenzo Frascino wrote: > On 6/14/19 12:10 PM, Thomas Gleixner wrote: > > On Thu, 30 May 2019, Vincenzo Frascino wrote: > >> + > >> + if (__arch_use_vsyscall(vdata)) { > >> + vdata[CS_HRES_COARSE].cycle_last = > >> + tk->tkr_mono.cycle_last; > >> + vdata[CS_HRES_COARSE].mask = > >> + tk->tkr_mono.mask; > >> + vdata[CS_HRES_COARSE].mult = > >> + tk->tkr_mono.mult; > > > > These line breaks make it really hard to read. Can you fold in the patch > > below please? > > > > Thanks for this. I will do it in v7. Talking about v7. I'd like to get this into 5.3. That means you'd have to rebase it on git://git.kernel.org/pub/scm/linux/kernel/git/hyperv/linux.git hyperv-next to avoid the hyperv conflict. I'll sort this out with the hyperv folks how I can get these bits as a base for a tip branch which holds all the vdso pieces. Thanks, tglx
On 6/14/19 1:19 PM, Thomas Gleixner wrote: > On Fri, 14 Jun 2019, Vincenzo Frascino wrote: >> On 6/14/19 12:10 PM, Thomas Gleixner wrote: >>> On Thu, 30 May 2019, Vincenzo Frascino wrote: >>>> + >>>> + if (__arch_use_vsyscall(vdata)) { >>>> + vdata[CS_HRES_COARSE].cycle_last = >>>> + tk->tkr_mono.cycle_last; >>>> + vdata[CS_HRES_COARSE].mask = >>>> + tk->tkr_mono.mask; >>>> + vdata[CS_HRES_COARSE].mult = >>>> + tk->tkr_mono.mult; >>> >>> These line breaks make it really hard to read. Can you fold in the patch >>> below please? >>> >> >> Thanks for this. I will do it in v7. > > Talking about v7. I'd like to get this into 5.3. That means you'd have to > rebase it on > > git://git.kernel.org/pub/scm/linux/kernel/git/hyperv/linux.git hyperv-next > > to avoid the hyperv conflict. I'll sort this out with the hyperv folks how > I can get these bits as a base for a tip branch which holds all the vdso > pieces. > Ok, I will rebase and test the patches against the hyperv-next branch. Could you please let me know when all the bits are sorted? > Thanks, > > tglx >
On Fri, 14 Jun 2019, Vincenzo Frascino wrote: > On 6/14/19 1:19 PM, Thomas Gleixner wrote: > > On Fri, 14 Jun 2019, Vincenzo Frascino wrote: > >> On 6/14/19 12:10 PM, Thomas Gleixner wrote: > >>> On Thu, 30 May 2019, Vincenzo Frascino wrote: > >>>> + > >>>> + if (__arch_use_vsyscall(vdata)) { > >>>> + vdata[CS_HRES_COARSE].cycle_last = > >>>> + tk->tkr_mono.cycle_last; > >>>> + vdata[CS_HRES_COARSE].mask = > >>>> + tk->tkr_mono.mask; > >>>> + vdata[CS_HRES_COARSE].mult = > >>>> + tk->tkr_mono.mult; > >>> > >>> These line breaks make it really hard to read. Can you fold in the patch > >>> below please? > >>> > >> > >> Thanks for this. I will do it in v7. > > > > Talking about v7. I'd like to get this into 5.3. That means you'd have to > > rebase it on > > > > git://git.kernel.org/pub/scm/linux/kernel/git/hyperv/linux.git hyperv-next > > > > to avoid the hyperv conflict. I'll sort this out with the hyperv folks how > > I can get these bits as a base for a tip branch which holds all the vdso > > pieces. > > > > Ok, I will rebase and test the patches against the hyperv-next branch. Could you > please let me know when all the bits are sorted? Don't worry. Just post it against that branch and I'll sort out the logistics independently. Thanks, tglx
diff --git a/include/asm-generic/vdso/vsyscall.h b/include/asm-generic/vdso/vsyscall.h new file mode 100644 index 000000000000..9a4b9fbcc9b6 --- /dev/null +++ b/include/asm-generic/vdso/vsyscall.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_VSYSCALL_H +#define __ASM_GENERIC_VSYSCALL_H + +#ifndef __ASSEMBLY__ + +#ifndef __arch_get_k_vdso_data +static __always_inline +struct vdso_data *__arch_get_k_vdso_data(void) +{ + return NULL; +} +#endif /* __arch_get_k_vdso_data */ + +#ifndef __arch_update_vdso_data +static __always_inline +int __arch_update_vdso_data(void) +{ + return 0; +} +#endif /* __arch_update_vdso_data */ + +#ifndef __arch_get_clock_mode +static __always_inline +int __arch_get_clock_mode(struct timekeeper *tk) +{ + return 0; +} +#endif /* __arch_get_clock_mode */ + +#ifndef __arch_use_vsyscall +static __always_inline +int __arch_use_vsyscall(struct vdso_data *vdata) +{ + return 1; +} +#endif /* __arch_use_vsyscall */ + +#ifndef __arch_update_vsyscall +static __always_inline +void __arch_update_vsyscall(struct vdso_data *vdata, + struct timekeeper *tk) +{ +} +#endif /* __arch_update_vsyscall */ + +#ifndef __arch_sync_vdso_data +static __always_inline +void __arch_sync_vdso_data(struct vdso_data *vdata) +{ +} +#endif /* __arch_sync_vdso_data */ + +#endif /* !__ASSEMBLY__ */ + +#endif /* __ASM_GENERIC_VSYSCALL_H */ diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h index 7acb953298a7..8177e75a71eb 100644 --- a/include/linux/timekeeper_internal.h +++ b/include/linux/timekeeper_internal.h @@ -135,9 +135,18 @@ struct timekeeper { #ifdef CONFIG_GENERIC_TIME_VSYSCALL +#ifdef CONFIG_HAVE_GENERIC_VDSO + +void update_vsyscall(struct timekeeper *tk); +void update_vsyscall_tz(void); + +#else + extern void update_vsyscall(struct timekeeper *tk); extern void update_vsyscall_tz(void); +#endif /* CONFIG_HAVE_GENERIC_VDSO */ + #else static inline void update_vsyscall(struct timekeeper *tk) diff --git a/include/vdso/vsyscall.h b/include/vdso/vsyscall.h new file mode 100644 index 000000000000..2c6134e0c23d --- /dev/null +++ b/include/vdso/vsyscall.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __VDSO_VSYSCALL_H +#define __VDSO_VSYSCALL_H + +#ifndef __ASSEMBLY__ + +#include <asm/vdso/vsyscall.h> + +#endif /* !__ASSEMBLY__ */ + +#endif /* __VDSO_VSYSCALL_H */ diff --git a/kernel/Makefile b/kernel/Makefile index 33824f0385b3..56a98ebb7772 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -48,6 +48,7 @@ obj-$(CONFIG_CHECKPOINT_RESTORE) += kcmp.o obj-$(CONFIG_FREEZER) += freezer.o obj-$(CONFIG_PROFILING) += profile.o obj-$(CONFIG_STACKTRACE) += stacktrace.o +obj-$(CONFIG_HAVE_GENERIC_VDSO) += vdso/ obj-y += time/ obj-$(CONFIG_FUTEX) += futex.o obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o diff --git a/kernel/vdso/Makefile b/kernel/vdso/Makefile new file mode 100644 index 000000000000..ad0d3b1a475c --- /dev/null +++ b/kernel/vdso/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_HAVE_GENERIC_VDSO) += vsyscall.o diff --git a/kernel/vdso/vsyscall.c b/kernel/vdso/vsyscall.c new file mode 100644 index 000000000000..49409eece728 --- /dev/null +++ b/kernel/vdso/vsyscall.c @@ -0,0 +1,139 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2019 ARM Ltd. + * + * Generic implementation of update_vsyscall and update_vsyscall_tz. + */ + +#include <linux/hrtimer.h> +#include <linux/timekeeper_internal.h> +#include <vdso/datapage.h> +#include <vdso/helpers.h> +#include <vdso/vsyscall.h> + +void update_vsyscall(struct timekeeper *tk) +{ + struct vdso_data *vdata = __arch_get_k_vdso_data(); + struct vdso_timestamp *vdso_ts; + u64 nsec; + + if (__arch_update_vdso_data()) { + /* + * Some architectures might want to skip the update of the + * data page. + */ + return; + } + + /* copy vsyscall data */ + vdso_write_begin(vdata); + + vdata[CS_HRES_COARSE].clock_mode = __arch_get_clock_mode(tk); + vdata[CS_RAW].clock_mode = __arch_get_clock_mode(tk); + + /* CLOCK_REALTIME_COARSE */ + vdso_ts = + &vdata[CS_HRES_COARSE].basetime[CLOCK_REALTIME_COARSE]; + vdso_ts->sec = tk->xtime_sec; + vdso_ts->nsec = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift; + /* CLOCK_MONOTONIC_COARSE */ + vdso_ts = + &vdata[CS_HRES_COARSE].basetime[CLOCK_MONOTONIC_COARSE]; + vdso_ts->sec = tk->xtime_sec + tk->wall_to_monotonic.tv_sec; + nsec = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift; + nsec = nsec + tk->wall_to_monotonic.tv_nsec; + while (nsec >= NSEC_PER_SEC) { + nsec = nsec - NSEC_PER_SEC; + vdso_ts->sec++; + } + vdso_ts->nsec = nsec; + + if (__arch_use_vsyscall(vdata)) { + vdata[CS_HRES_COARSE].cycle_last = + tk->tkr_mono.cycle_last; + vdata[CS_HRES_COARSE].mask = + tk->tkr_mono.mask; + vdata[CS_HRES_COARSE].mult = + tk->tkr_mono.mult; + vdata[CS_HRES_COARSE].shift = + tk->tkr_mono.shift; + vdata[CS_RAW].cycle_last = + tk->tkr_raw.cycle_last; + vdata[CS_RAW].mask = + tk->tkr_raw.mask; + vdata[CS_RAW].mult = + tk->tkr_raw.mult; + vdata[CS_RAW].shift = + tk->tkr_raw.shift; + /* CLOCK_REALTIME */ + vdso_ts = + &vdata[CS_HRES_COARSE].basetime[CLOCK_REALTIME]; + vdso_ts->sec = tk->xtime_sec; + vdso_ts->nsec = tk->tkr_mono.xtime_nsec; + /* CLOCK_MONOTONIC */ + vdso_ts = + &vdata[CS_HRES_COARSE].basetime[CLOCK_MONOTONIC]; + vdso_ts->sec = tk->xtime_sec + + tk->wall_to_monotonic.tv_sec; + nsec = tk->tkr_mono.xtime_nsec; + nsec = nsec + + ((u64)tk->wall_to_monotonic.tv_nsec << + tk->tkr_mono.shift); + while (nsec >= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) { + nsec = nsec - + (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift); + vdso_ts->sec++; + } + vdso_ts->nsec = nsec; + /* CLOCK_MONOTONIC_RAW */ + vdso_ts = + &vdata[CS_RAW].basetime[CLOCK_MONOTONIC_RAW]; + vdso_ts->sec = tk->raw_sec; + vdso_ts->nsec = tk->tkr_raw.xtime_nsec; + /* CLOCK_BOOTTIME */ + vdso_ts = + &vdata[CS_HRES_COARSE].basetime[CLOCK_BOOTTIME]; + vdso_ts->sec = tk->xtime_sec + + tk->wall_to_monotonic.tv_sec; + nsec = tk->tkr_mono.xtime_nsec; + nsec = nsec + + ((u64)(tk->wall_to_monotonic.tv_nsec + + ktime_to_ns(tk->offs_boot)) << + tk->tkr_mono.shift); + while (nsec >= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) { + nsec = nsec - + (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift); + vdso_ts->sec++; + } + vdso_ts->nsec = nsec; + /* CLOCK_TAI */ + vdso_ts = + &vdata[CS_HRES_COARSE].basetime[CLOCK_TAI]; + vdso_ts->sec = tk->xtime_sec + (s64)tk->tai_offset; + vdso_ts->nsec = tk->tkr_mono.xtime_nsec; + + /* + * Read without the seqlock held by clock_getres(). + * Note: No need to have a second copy. + */ + WRITE_ONCE(vdata[CS_HRES_COARSE].hrtimer_res, hrtimer_resolution); + } + + __arch_update_vsyscall(vdata, tk); + + vdso_write_end(vdata); + + __arch_sync_vdso_data(vdata); +} + +void update_vsyscall_tz(void) +{ + struct vdso_data *vdata = __arch_get_k_vdso_data(); + + if (__arch_use_vsyscall(vdata)) { + vdata[CS_HRES_COARSE].tz_minuteswest = sys_tz.tz_minuteswest; + vdata[CS_HRES_COARSE].tz_dsttime = sys_tz.tz_dsttime; + } + + __arch_sync_vdso_data(vdata); +}
With the definition of the unified vDSO library the implementations of update_vsyscall and update_vsyscall_tz became quite similar across architectures. Define a unified implementation of this two functions in kernel/vdso and provide the bindings that can be implemented by every architecture that takes advantage of the unified vDSO library. Signed-off-by: Vincenzo Frascino <vincenzo.frascino@arm.com> --- include/asm-generic/vdso/vsyscall.h | 56 +++++++++++ include/linux/timekeeper_internal.h | 9 ++ include/vdso/vsyscall.h | 11 +++ kernel/Makefile | 1 + kernel/vdso/Makefile | 2 + kernel/vdso/vsyscall.c | 139 ++++++++++++++++++++++++++++ 6 files changed, 218 insertions(+) create mode 100644 include/asm-generic/vdso/vsyscall.h create mode 100644 include/vdso/vsyscall.h create mode 100644 kernel/vdso/Makefile create mode 100644 kernel/vdso/vsyscall.c