@@ -36,6 +36,9 @@ On x86:
holding kvm->arch.mmu_lock (typically with ``read_lock``, otherwise
there's no need to take kvm->arch.tdp_mmu_pages_lock at all).
+- kvm->arch.tsc_write_lock is taken outside
+ kvm->arch.pvclock_gtod_sync_lock
+
Everything else is a leaf: no other lock is taken inside the critical
sections.
@@ -222,6 +225,14 @@ time it will be set using the Dirty tracking mechanism described above.
:Comment: 'raw' because hardware enabling/disabling must be atomic /wrt
migration.
+:Name: kvm_arch::pvclock_gtod_sync_lock
+:Type: raw_spinlock_t
+:Arch: x86
+:Protects: kvm_arch::{cur_tsc_generation,cur_tsc_nsec,cur_tsc_write,
+ cur_tsc_offset,nr_vcpus_matched_tsc}
+:Comment: 'raw' because updating the kvm master clock must not be
+ preempted.
+
:Name: kvm_arch::tsc_write_lock
:Type: raw_spinlock
:Arch: x86
@@ -2529,7 +2529,6 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 data)
vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write;
kvm_vcpu_write_tsc_offset(vcpu, offset);
- raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
spin_lock_irqsave(&kvm->arch.pvclock_gtod_sync_lock, flags);
if (!matched) {
@@ -2540,6 +2539,7 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 data)
kvm_track_tsc_matching(vcpu);
spin_unlock_irqrestore(&kvm->arch.pvclock_gtod_sync_lock, flags);
+ raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
}
static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
A later change requires that the pvclock sync lock be taken while holding the tsc_write_lock. Change the locking in kvm_synchronize_tsc() to align with the requirement to isolate the locking change to its own commit. Cc: Sean Christopherson <seanjc@google.com> Signed-off-by: Oliver Upton <oupton@google.com> --- Documentation/virt/kvm/locking.rst | 11 +++++++++++ arch/x86/kvm/x86.c | 2 +- 2 files changed, 12 insertions(+), 1 deletion(-)