diff mbox

[v3,01/18] x86/tsc: Inline native_read_tsc and remove __native_read_tsc

Message ID d05ffe2aaf8468ca475ebc00efad7b2fa174af19.1434501121.git.luto@kernel.org (mailing list archive)
State New, archived
Headers show

Commit Message

Andy Lutomirski June 17, 2015, 12:35 a.m. UTC
In cdc7957d1954 ("x86: move native_read_tsc() offline"),
native_read_tsc was moved out of line, presumably for some
now-obsolete vDSO-related reason.  Undo it.

The entire rdtsc, shl, or sequence is only 11 bytes, and calls via
rdtscl and similar helpers were already inlined.

Signed-off-by: Andy Lutomirski <luto@kernel.org>
---
 arch/x86/entry/vdso/vclock_gettime.c  | 2 +-
 arch/x86/include/asm/msr.h            | 8 +++-----
 arch/x86/include/asm/pvclock.h        | 2 +-
 arch/x86/include/asm/stackprotector.h | 2 +-
 arch/x86/include/asm/tsc.h            | 2 +-
 arch/x86/kernel/apb_timer.c           | 4 ++--
 arch/x86/kernel/tsc.c                 | 6 ------
 7 files changed, 9 insertions(+), 17 deletions(-)

Comments

Borislav Petkov June 17, 2015, 9:26 a.m. UTC | #1
On Tue, Jun 16, 2015 at 05:35:49PM -0700, Andy Lutomirski wrote:
> In cdc7957d1954 ("x86: move native_read_tsc() offline"),
> native_read_tsc was moved out of line, presumably for some
> now-obsolete vDSO-related reason.  Undo it.
> 
> The entire rdtsc, shl, or sequence is only 11 bytes, and calls via
> rdtscl and similar helpers were already inlined.
> 
> Signed-off-by: Andy Lutomirski <luto@kernel.org>
> ---
>  arch/x86/entry/vdso/vclock_gettime.c  | 2 +-
>  arch/x86/include/asm/msr.h            | 8 +++-----
>  arch/x86/include/asm/pvclock.h        | 2 +-
>  arch/x86/include/asm/stackprotector.h | 2 +-
>  arch/x86/include/asm/tsc.h            | 2 +-
>  arch/x86/kernel/apb_timer.c           | 4 ++--
>  arch/x86/kernel/tsc.c                 | 6 ------
>  7 files changed, 9 insertions(+), 17 deletions(-)

Acked-by: Borislav Petkov <bp@suse.de>
diff mbox

Patch

diff --git a/arch/x86/entry/vdso/vclock_gettime.c b/arch/x86/entry/vdso/vclock_gettime.c
index 9793322751e0..972b488ac16a 100644
--- a/arch/x86/entry/vdso/vclock_gettime.c
+++ b/arch/x86/entry/vdso/vclock_gettime.c
@@ -186,7 +186,7 @@  notrace static cycle_t vread_tsc(void)
 	 * but no one has ever seen it happen.
 	 */
 	rdtsc_barrier();
-	ret = (cycle_t)__native_read_tsc();
+	ret = (cycle_t)native_read_tsc();
 
 	last = gtod->cycle_last;
 
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index e6a707eb5081..88711470af7f 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -106,12 +106,10 @@  notrace static inline int native_write_msr_safe(unsigned int msr,
 	return err;
 }
 
-extern unsigned long long native_read_tsc(void);
-
 extern int rdmsr_safe_regs(u32 regs[8]);
 extern int wrmsr_safe_regs(u32 regs[8]);
 
-static __always_inline unsigned long long __native_read_tsc(void)
+static __always_inline unsigned long long native_read_tsc(void)
 {
 	DECLARE_ARGS(val, low, high);
 
@@ -181,10 +179,10 @@  static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
 }
 
 #define rdtscl(low)						\
-	((low) = (u32)__native_read_tsc())
+	((low) = (u32)native_read_tsc())
 
 #define rdtscll(val)						\
-	((val) = __native_read_tsc())
+	((val) = native_read_tsc())
 
 #define rdpmc(counter, low, high)			\
 do {							\
diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h
index d6b078e9fa28..71bd485c2986 100644
--- a/arch/x86/include/asm/pvclock.h
+++ b/arch/x86/include/asm/pvclock.h
@@ -62,7 +62,7 @@  static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift)
 static __always_inline
 u64 pvclock_get_nsec_offset(const struct pvclock_vcpu_time_info *src)
 {
-	u64 delta = __native_read_tsc() - src->tsc_timestamp;
+	u64 delta = native_read_tsc() - src->tsc_timestamp;
 	return pvclock_scale_delta(delta, src->tsc_to_system_mul,
 				   src->tsc_shift);
 }
diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
index c2e00bb2a136..bc5fa2af112e 100644
--- a/arch/x86/include/asm/stackprotector.h
+++ b/arch/x86/include/asm/stackprotector.h
@@ -72,7 +72,7 @@  static __always_inline void boot_init_stack_canary(void)
 	 * on during the bootup the random pool has true entropy too.
 	 */
 	get_random_bytes(&canary, sizeof(canary));
-	tsc = __native_read_tsc();
+	tsc = native_read_tsc();
 	canary += tsc + (tsc << 32UL);
 
 	current->stack_canary = canary;
diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h
index 94605c0e9cee..fd11128faf25 100644
--- a/arch/x86/include/asm/tsc.h
+++ b/arch/x86/include/asm/tsc.h
@@ -42,7 +42,7 @@  static __always_inline cycles_t vget_cycles(void)
 	if (!cpu_has_tsc)
 		return 0;
 #endif
-	return (cycles_t)__native_read_tsc();
+	return (cycles_t)native_read_tsc();
 }
 
 extern void tsc_init(void);
diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c
index ede92c3364d3..9fe111cc50f8 100644
--- a/arch/x86/kernel/apb_timer.c
+++ b/arch/x86/kernel/apb_timer.c
@@ -390,13 +390,13 @@  unsigned long apbt_quick_calibrate(void)
 	old = dw_apb_clocksource_read(clocksource_apbt);
 	old += loop;
 
-	t1 = __native_read_tsc();
+	t1 = native_read_tsc();
 
 	do {
 		new = dw_apb_clocksource_read(clocksource_apbt);
 	} while (new < old);
 
-	t2 = __native_read_tsc();
+	t2 = native_read_tsc();
 
 	shift = 5;
 	if (unlikely(loop >> shift == 0)) {
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 505449700e0c..e7710cd7ba00 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -308,12 +308,6 @@  unsigned long long
 sched_clock(void) __attribute__((alias("native_sched_clock")));
 #endif
 
-unsigned long long native_read_tsc(void)
-{
-	return __native_read_tsc();
-}
-EXPORT_SYMBOL(native_read_tsc);
-
 int check_tsc_unstable(void)
 {
 	return tsc_unstable;