@@ -28,14 +28,14 @@ u64 dummy_sched_clock(void);
DECLARE_STATIC_CALL(pv_steal_clock, dummy_steal_clock);
DECLARE_STATIC_CALL(pv_sched_clock, dummy_sched_clock);
-void __init __paravirt_set_sched_clock(u64 (*func)(void), bool stable,
- void (*save)(void), void (*restore)(void));
+int __init __paravirt_set_sched_clock(u64 (*func)(void), bool stable,
+ void (*save)(void), void (*restore)(void));
static __always_inline void paravirt_set_sched_clock(u64 (*func)(void),
void (*save)(void),
void (*restore)(void))
{
- __paravirt_set_sched_clock(func, true, save, restore);
+ (void)__paravirt_set_sched_clock(func, true, save, restore);
}
static __always_inline u64 paravirt_sched_clock(void)
@@ -337,9 +337,12 @@ static int kvmclock_setup_percpu(unsigned int cpu)
static void __init kvm_sched_clock_init(bool stable)
{
+ if (__paravirt_set_sched_clock(kvm_sched_clock_read, stable,
+ kvm_save_sched_clock_state,
+ kvm_restore_sched_clock_state))
+ return;
+
kvm_sched_clock_offset = kvm_clock_read();
- __paravirt_set_sched_clock(kvm_sched_clock_read, stable,
- kvm_save_sched_clock_state, kvm_restore_sched_clock_state);
kvmclock_is_sched_clock = true;
/*
@@ -86,8 +86,8 @@ static u64 native_steal_clock(int cpu)
DEFINE_STATIC_CALL(pv_steal_clock, native_steal_clock);
DEFINE_STATIC_CALL(pv_sched_clock, native_sched_clock);
-void __init __paravirt_set_sched_clock(u64 (*func)(void), bool stable,
- void (*save)(void), void (*restore)(void))
+int __init __paravirt_set_sched_clock(u64 (*func)(void), bool stable,
+ void (*save)(void), void (*restore)(void))
{
if (!stable)
clear_sched_clock_stable();
@@ -95,6 +95,7 @@ void __init __paravirt_set_sched_clock(u64 (*func)(void), bool stable,
static_call_update(pv_sched_clock, func);
x86_platform.save_sched_clock_state = save;
x86_platform.restore_sched_clock_state = restore;
+ return 0;
}
/* These are in entry.S */
Add a return code to __paravirt_set_sched_clock() so that the kernel can reject attempts to use a PV sched_clock without breaking the caller. E.g. when running as a CoCo VM with a secure TSC, using a PV clock is generally undesirable. Note, kvmclock is the only PV clock that does anything "extra" beyond simply registering itself as sched_clock, i.e. is the only caller that needs to check the new return value. Signed-off-by: Sean Christopherson <seanjc@google.com> --- arch/x86/include/asm/paravirt.h | 6 +++--- arch/x86/kernel/kvmclock.c | 7 +++++-- arch/x86/kernel/paravirt.c | 5 +++-- 3 files changed, 11 insertions(+), 7 deletions(-)