@@ -742,6 +742,7 @@ static DEFINE_PER_CPU(int, cpu_tsc_shift);
static DEFINE_PER_CPU(s64, cpu_tsc_offset);
static DEFINE_PER_CPU(u64, cpu_tsc_measure_base);
static DEFINE_PER_CPU(atomic_t, cpu_tsc_synchronized);
+static DEFINE_PER_CPU(int, cpu_tsc_generation);
static int tsc_base_cpu = -1;
static unsigned long ref_tsc_khz;
@@ -813,6 +814,29 @@ static inline u64 compute_ref_tsc(int cpu)
return tsc + per_cpu(cpu_tsc_offset, cpu);
}
+/*
+ * kvm_get_ref_tsc returns a reference TSC value which we attempt to keep
+ * stable across all CPUs, regardless of frequency shift, CPU hotplug, or
+ * pre-existing TSC offsets.
+ */
+u64 kvm_get_ref_tsc(void)
+{
+ int cpu, gen;
+ u64 tsc;
+
+ cpu = get_cpu();
+again:
+ gen = per_cpu(cpu_tsc_generation, cpu);
+ smp_rmb();
+ tsc = compute_ref_tsc(cpu);
+ smp_rmb();
+ if (unlikely(gen != per_cpu(cpu_tsc_generation, cpu)))
+ goto again;
+ put_cpu();
+ return tsc;
+}
+EXPORT_SYMBOL_GPL(kvm_get_ref_tsc);
+
#define SYNC_TRIES 64
/*
@@ -929,6 +953,7 @@ static void kvm_sync_tsc(void *cpup)
accumulator -= delta[i+SYNC_TRIES];
accumulator = accumulator / (SYNC_TRIES*2-12);
per_cpu(cpu_tsc_offset, new_cpu) = accumulator;
+ ++per_cpu(cpu_tsc_generation, new_cpu);
atomic_set(&per_cpu(cpu_tsc_synchronized, new_cpu), 1);
pr_debug("%s: OUT, cpu = %d, cpu_tsc_offset = %lld, cpu_tsc_multiplier=%ld, cpu_tsc_shift=%d\n", __func__, raw_smp_processor_id(), per_cpu(cpu_tsc_offset, new_cpu), per_cpu(cpu_tsc_multiplier, new_cpu), per_cpu(cpu_tsc_shift, new_cpu));
}