@@ -201,6 +201,10 @@ module_param(eager_page_split, bool, 0644);
static bool __read_mostly mitigate_smt_rsb;
module_param(mitigate_smt_rsb, bool, 0444);
+#ifdef CONFIG_X86_64
+static bool kvm_get_time_and_clockread(s64 *kernel_ns, u64 *tsc_timestamp);
+#endif
+
/*
* Restoring the host value for MSRs that are only consumed when running in
* usermode, e.g. SYSCALL MSRs and TSC_AUX, can be deferred until the CPU
@@ -2753,14 +2757,22 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 *user_value)
{
u64 data = user_value ? *user_value : 0;
struct kvm *kvm = vcpu->kvm;
- u64 offset, ns, elapsed;
+ u64 offset, host_tsc, ns, elapsed;
unsigned long flags;
bool matched = false;
bool synchronizing = false;
+#ifdef CONFIG_X86_64
+ if (!kvm_get_time_and_clockread(&ns, &host_tsc))
+#endif
+ {
+ ns = get_kvmclock_base_ns();
+ host_tsc = rdtsc();
+ }
+
raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
- offset = kvm_compute_l1_tsc_offset(vcpu, rdtsc(), data);
- ns = get_kvmclock_base_ns();
+
+ offset = kvm_compute_l1_tsc_offset(vcpu, host_tsc, data);
elapsed = ns - kvm->arch.last_tsc_nsec;
if (vcpu->arch.virtual_tsc_khz) {
@@ -2805,12 +2817,24 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 *user_value)
*/
if (synchronizing &&
vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) {
+ /*
+ * If synchronizing, the "last written" TSC value/time recorded
+ * by __kvm_synchronize_tsc() should not change (i.e. should
+ * be precisely the same as the existing generation)...
+ */
+ data = kvm->arch.last_tsc_write;
+
if (!kvm_check_tsc_unstable()) {
offset = kvm->arch.cur_tsc_offset;
+ ns = kvm->arch.cur_tsc_nsec;
} else {
+ /*
+ * ... unless the TSC is unstable and has to be
+ * synthesised from the host clock in nanoseconds.
+ */
u64 delta = nsec_to_cycles(vcpu, elapsed);
data += delta;
- offset = kvm_compute_l1_tsc_offset(vcpu, rdtsc(), data);
+ offset = kvm_compute_l1_tsc_offset(vcpu, host_tsc, data);
}
matched = true;
}