@@ -747,22 +747,32 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu)
return 0;
}
-/* Make the updates of cntvoff for all vtimer contexts atomic */
-static void update_vtimer_cntvoff(struct kvm_vcpu *vcpu, u64 cntvoff)
+/* Make offset updates for all timer contexts atomic */
+static void update_timer_offset(struct kvm_vcpu *vcpu,
+ enum kvm_arch_timers timer, u64 offset)
{
int i;
struct kvm *kvm = vcpu->kvm;
struct kvm_vcpu *tmp;
- mutex_lock(&kvm->lock);
+ lockdep_assert_held(&kvm->lock);
+
kvm_for_each_vcpu(i, tmp, kvm)
- timer_set_offset(vcpu_vtimer(tmp), cntvoff);
+ timer_set_offset(vcpu_get_timer(tmp, timer), offset);
/*
* When called from the vcpu create path, the CPU being created is not
* included in the loop above, so we just set it here as well.
*/
- timer_set_offset(vcpu_vtimer(vcpu), cntvoff);
+ timer_set_offset(vcpu_get_timer(vcpu, timer), offset);
+}
+
+static void update_vtimer_cntvoff(struct kvm_vcpu *vcpu, u64 cntvoff)
+{
+ struct kvm *kvm = vcpu->kvm;
+
+ mutex_lock(&kvm->lock);
+ update_timer_offset(vcpu, TIMER_VTIMER, cntvoff);
mutex_unlock(&kvm->lock);
}