Message ID | 20240427111929.9600-14-dwmw2@infradead.org (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | [v2,01/15] KVM: x86/xen: Do not corrupt KVM clock in kvm_xen_shared_info_init() | expand |
On 4/27/2024 7:05 PM, David Woodhouse wrote: > From: David Woodhouse <dwmw@amazon.co.uk> > > These pointlessly duplicate of the last_tsc_{nsec,offset,write} values. > > The only place they were used was where the TSC is stable and a new vCPU > is being synchronized to the previous setting, in which case the 'last_' > value is definitely identical. > > Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> > --- > arch/x86/include/asm/kvm_host.h | 3 --- > arch/x86/kvm/x86.c | 9 ++------- > 2 files changed, 2 insertions(+), 10 deletions(-) > > diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h > index b01c1d000fff..7d06f389a607 100644 > --- a/arch/x86/include/asm/kvm_host.h > +++ b/arch/x86/include/asm/kvm_host.h > @@ -1354,9 +1354,6 @@ struct kvm_arch { > u32 last_tsc_khz; > u64 last_tsc_offset; > u64 last_tsc_scaling_ratio; > - u64 cur_tsc_nsec; > - u64 cur_tsc_write; > - u64 cur_tsc_offset; > u64 cur_tsc_generation; > int nr_vcpus_matched_tsc; > > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c > index 6ec43f39bdb0..92e81bfca25a 100644 > --- a/arch/x86/kvm/x86.c > +++ b/arch/x86/kvm/x86.c > @@ -2737,9 +2737,6 @@ static void __kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 offset, u64 tsc, > * These values are tracked in kvm->arch.cur_xxx variables. > */ > kvm->arch.cur_tsc_generation++; > - kvm->arch.cur_tsc_nsec = ns; > - kvm->arch.cur_tsc_write = tsc; > - kvm->arch.cur_tsc_offset = offset; > kvm->arch.nr_vcpus_matched_tsc = 0; > } else if (vcpu->arch.this_tsc_generation != kvm->arch.cur_tsc_generation) { > kvm->arch.nr_vcpus_matched_tsc++; > @@ -2747,8 +2744,6 @@ static void __kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 offset, u64 tsc, > > /* Keep track of which generation this VCPU has synchronized to */ > vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation; > - vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec; > - vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write; Do we need to track vcpu->arch.this_tsc_nsec/this_tsc_write? At least they are still used in compute_guest_tsc() to calculate the guest tsc. > > kvm_track_tsc_matching(vcpu); > } > @@ -2825,8 +2820,8 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 *user_value) > data = kvm->arch.last_tsc_write; > > if (!kvm_check_tsc_unstable()) { > - offset = kvm->arch.cur_tsc_offset; > - ns = kvm->arch.cur_tsc_nsec; > + offset = kvm->arch.last_tsc_offset; > + ns = kvm->arch.last_tsc_nsec; > } else { > /* > * ... unless the TSC is unstable and has to be
On Fri, 2024-05-10 at 17:03 +0800, Chenyi Qiang wrote: > > Do we need to track vcpu->arch.this_tsc_nsec/this_tsc_write? At least > they are still used in compute_guest_tsc() to calculate the guest > tsc. Ah yes, that's true. Good catch; thanks. That should be caught by a test case which runs the guest TSC at a higher speed than the host, but *without* hardware TSC scaling.
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index b01c1d000fff..7d06f389a607 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1354,9 +1354,6 @@ struct kvm_arch { u32 last_tsc_khz; u64 last_tsc_offset; u64 last_tsc_scaling_ratio; - u64 cur_tsc_nsec; - u64 cur_tsc_write; - u64 cur_tsc_offset; u64 cur_tsc_generation; int nr_vcpus_matched_tsc; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 6ec43f39bdb0..92e81bfca25a 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2737,9 +2737,6 @@ static void __kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 offset, u64 tsc, * These values are tracked in kvm->arch.cur_xxx variables. */ kvm->arch.cur_tsc_generation++; - kvm->arch.cur_tsc_nsec = ns; - kvm->arch.cur_tsc_write = tsc; - kvm->arch.cur_tsc_offset = offset; kvm->arch.nr_vcpus_matched_tsc = 0; } else if (vcpu->arch.this_tsc_generation != kvm->arch.cur_tsc_generation) { kvm->arch.nr_vcpus_matched_tsc++; @@ -2747,8 +2744,6 @@ static void __kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 offset, u64 tsc, /* Keep track of which generation this VCPU has synchronized to */ vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation; - vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec; - vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write; kvm_track_tsc_matching(vcpu); } @@ -2825,8 +2820,8 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 *user_value) data = kvm->arch.last_tsc_write; if (!kvm_check_tsc_unstable()) { - offset = kvm->arch.cur_tsc_offset; - ns = kvm->arch.cur_tsc_nsec; + offset = kvm->arch.last_tsc_offset; + ns = kvm->arch.last_tsc_nsec; } else { /* * ... unless the TSC is unstable and has to be