@@ -887,7 +887,7 @@ struct kvm_vcpu_arch {
gpa_t time;
struct pvclock_vcpu_time_info hv_clock;
- unsigned int hw_tsc_khz;
+ unsigned int hw_tsc_hz;
struct gfn_to_pfn_cache pv_time;
/* set guest stopped flag in pvclock flags field */
bool pvclock_set_guest_stopped_request;
@@ -3215,7 +3215,8 @@ static void kvm_setup_guest_pvclock(struct kvm_vcpu *v,
static int kvm_guest_time_update(struct kvm_vcpu *v)
{
- unsigned long flags, tgt_tsc_khz;
+ unsigned long flags;
+ uint64_t tgt_tsc_hz;
unsigned seq;
struct kvm_vcpu_arch *vcpu = &v->arch;
struct kvm_arch *ka = &v->kvm->arch;
@@ -3252,8 +3253,8 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
/* Keep irq disabled to prevent changes to the clock */
local_irq_save(flags);
- tgt_tsc_khz = get_cpu_tsc_khz();
- if (unlikely(tgt_tsc_khz == 0)) {
+ tgt_tsc_hz = get_cpu_tsc_khz() * 1000LL;
+ if (unlikely(tgt_tsc_hz == 0)) {
local_irq_restore(flags);
kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
return 1;
@@ -3288,14 +3289,14 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
/* With all the info we got, fill in the values */
if (kvm_caps.has_tsc_control)
- tgt_tsc_khz = kvm_scale_tsc(tgt_tsc_khz,
- v->arch.l1_tsc_scaling_ratio);
+ tgt_tsc_hz = kvm_scale_tsc(tgt_tsc_hz,
+ v->arch.l1_tsc_scaling_ratio);
- if (unlikely(vcpu->hw_tsc_khz != tgt_tsc_khz)) {
- kvm_get_time_scale(NSEC_PER_SEC, tgt_tsc_khz * 1000LL,
+ if (unlikely(vcpu->hw_tsc_hz != tgt_tsc_hz)) {
+ kvm_get_time_scale(NSEC_PER_SEC, tgt_tsc_hz,
&vcpu->hv_clock.tsc_shift,
&vcpu->hv_clock.tsc_to_system_mul);
- vcpu->hw_tsc_khz = tgt_tsc_khz;
+ vcpu->hw_tsc_hz = tgt_tsc_hz;
kvm_xen_update_tsc_info(v);
}
@@ -2273,7 +2273,7 @@ void kvm_xen_update_tsc_info(struct kvm_vcpu *vcpu)
entry = kvm_find_cpuid_entry_index(vcpu, function, 2);
if (entry)
- entry->eax = vcpu->arch.hw_tsc_khz;
+ entry->eax = vcpu->arch.hw_tsc_hz / 1000;
}
void kvm_xen_init_vm(struct kvm *kvm)