@@ -203,6 +203,7 @@ struct clocksource {
};
extern struct clocksource *clock; /* current clocksource */
+extern spinlock_t clocksource_lock;
/*
* Clock source flags bits::
@@ -212,6 +213,7 @@ extern struct clocksource *clock; /* current clocksource */
#define CLOCK_SOURCE_WATCHDOG 0x10
#define CLOCK_SOURCE_VALID_FOR_HRES 0x20
+#define CLOCK_SOURCE_USE_FOR_SCHED_CLOCK 0x40
/* simplify initialization of mask field */
#define CLOCKSOURCE_MASK(bits) (cycle_t)((bits) < 64 ? ((1ULL<<(bits))-1) : -1)
@@ -30,6 +30,7 @@
#include <linux/percpu.h>
#include <linux/ktime.h>
#include <linux/sched.h>
+#include <linux/clocksource.h>
/*
* Scheduler clock - returns current time in nanosec units.
@@ -38,6 +39,27 @@
*/
unsigned long long __attribute__((weak)) sched_clock(void)
{
+ /*
+ * Use the current clocksource when it becomes available later in
+ * the boot process. As this needs to be fast, we only make a
+ * single pass at grabbing the spinlock. If the clock is changing
+ * out from underneath us, fall back on jiffies and try it again
+ * the next time around.
+ */
+ if (clock && _raw_spin_trylock(&clocksource_lock)) {
+ /*
+ * Only use clocksources suitable for sched_clock()
+ */
+ if (clock->flags & CLOCK_SOURCE_USE_FOR_SCHED_CLOCK) {
+ cycle_t now = cyc2ns(clock, clocksource_read(clock));
+ _raw_spin_unlock(&clocksource_lock);
+ return now;
+ }
+
+ _raw_spin_unlock(&clocksource_lock);
+ }
+
+ /* If all else fails, fall back on jiffies */
return (unsigned long long)(jiffies - INITIAL_JIFFIES)
* (NSEC_PER_SEC / HZ);
}
@@ -127,7 +127,7 @@ static struct clocksource *curr_clocksource = &clocksource_jiffies;
static struct clocksource *next_clocksource;
static struct clocksource *clocksource_override;
static LIST_HEAD(clocksource_list);
-static DEFINE_SPINLOCK(clocksource_lock);
+DEFINE_SPINLOCK(clocksource_lock);
static char override_name[32];
static int finished_booting;