@@ -445,6 +445,7 @@ struct kvm_arch {
unsigned long irq_sources_bitmap;
s64 kvmclock_offset;
spinlock_t clock_lock;
+ struct pvclock_vcpu_time_info master_clock;
u64 last_tsc_nsec;
u64 last_tsc_offset;
u64 last_tsc_write;
@@ -1116,6 +1116,38 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
return 0;
/*
+ * If there is a stable TSC, we use a master reference clock for
+ * the KVM clock; otherwise, individual computations for each VCPU
+ * would exhibit slight drift relative to each other, which could
+ * cause global time to go backwards.
+ *
+ * If the master clock has no TSC timestamp, that means we must
+ * recompute the clock as either some real time has elapsed during
+ * a suspend cycle, or we are measuring the clock for the first time
+ * during VM creation (or following a migration). Since master clock
+ * changes should happen only at rare occasions, so we can ignore
+ * the precautions below.
+ */
+ if (!check_tsc_unstable()) {
+ struct pvclock_vcpu_time_info *master =
+ &v->kvm->arch.master_clock;
+ if (vcpu->hv_clock.version != master->version) {
+ spin_lock(&v->kvm->arch.clock_lock);
+ WARN_ON(master->version < vcpu->hv_clock.version);
+ if (!master->tsc_timestamp) {
+ pr_debug("KVM: computing new master clock\n");
+ update_pvclock(v, master, tsc_timestamp,
+ kernel_ns, tsc_khz);
+ }
+ memcpy(&vcpu->hv_clock, master, sizeof(*master));
+ spin_unlock(&v->kvm->arch.clock_lock);
+ update_user_kvmclock(v, &vcpu->hv_clock);
+ } else
+ pr_debug("ignoring spurious KVM clock update");
+ return 0;
+ }
+
+ /*
* Time as measured by the TSC may go backwards when resetting the base
* tsc_timestamp. The reason for this is that the TSC resolution is
* higher than the resolution of the other clock scales. Thus, many
@@ -3482,7 +3514,11 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = 0;
now_ns = get_kernel_ns();
delta = user_ns.clock - now_ns;
+ spin_lock(&kvm->arch.clock_lock);
+ kvm->arch.master_clock.version += 2;
+ kvm->arch.master_clock.tsc_timestamp = 0;
kvm->arch.kvmclock_offset = delta;
+ spin_unlock(&kvm->arch.clock_lock);
break;
}
case KVM_GET_CLOCK: {
@@ -5845,11 +5881,14 @@ int kvm_arch_hardware_enable(void *garbage)
*/
if (backwards_tsc) {
u64 delta_cyc = max_tsc - local_tsc;
- list_for_each_entry(kvm, &vm_list, vm_list)
+ list_for_each_entry(kvm, &vm_list, vm_list) {
kvm_for_each_vcpu(i, vcpu, kvm) {
vcpu->arch.tsc_offset_adjustment += delta_cyc;
vcpu->arch.last_host_tsc = 0;
}
+ kvm->arch.master_clock.tsc_timestamp = 0;
+ kvm->arch.master_clock.version += 2;
+ }
}
return 0;
@@ -5965,6 +6004,7 @@ struct kvm *kvm_arch_create_vm(void)
set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
spin_lock_init(&kvm->arch.clock_lock);
+ kvm->arch.master_clock.version = 1000;
return kvm;
}