@@ -84,11 +84,9 @@ u64 timer_get_cval(struct arch_timer_context *ctxt)
static u64 timer_get_offset(struct arch_timer_context *ctxt)
{
- struct kvm_vcpu *vcpu = ctxt->vcpu;
-
switch(arch_timer_ctx_index(ctxt)) {
case TIMER_VTIMER:
- return __vcpu_sys_reg(vcpu, CNTVOFF_EL2);
+ return ctxt->host_offset;
default:
return 0;
}
@@ -128,17 +126,33 @@ static void timer_set_cval(struct arch_timer_context *ctxt, u64 cval)
static void timer_set_offset(struct arch_timer_context *ctxt, u64 offset)
{
- struct kvm_vcpu *vcpu = ctxt->vcpu;
-
switch(arch_timer_ctx_index(ctxt)) {
case TIMER_VTIMER:
- __vcpu_sys_reg(vcpu, CNTVOFF_EL2) = offset;
+ ctxt->host_offset = offset;
break;
default:
WARN(offset, "timer %ld\n", arch_timer_ctx_index(ctxt));
}
}
+static void timer_set_guest_offset(struct arch_timer_context *ctxt, u64 offset)
+{
+ struct kvm_vcpu *vcpu = ctxt->vcpu;
+
+ switch (arch_timer_ctx_index(ctxt)) {
+ case TIMER_VTIMER: {
+ u64 host_offset = timer_get_offset(ctxt);
+
+ host_offset += offset - __vcpu_sys_reg(vcpu, CNTVOFF_EL2);
+ __vcpu_sys_reg(vcpu, CNTVOFF_EL2) = offset;
+ timer_set_offset(ctxt, host_offset);
+ break;
+ }
+ default:
+ WARN_ONCE(offset, "timer %ld\n", arch_timer_ctx_index(ctxt));
+ }
+}
+
u64 kvm_phys_timer_read(void)
{
return timecounter->cc->read(timecounter->cc);
@@ -749,7 +763,8 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu)
/* Make offset updates for all timer contexts atomic */
static void update_timer_offset(struct kvm_vcpu *vcpu,
- enum kvm_arch_timers timer, u64 offset)
+ enum kvm_arch_timers timer, u64 offset,
+ bool guest_visible)
{
int i;
struct kvm *kvm = vcpu->kvm;
@@ -758,13 +773,20 @@ static void update_timer_offset(struct kvm_vcpu *vcpu,
lockdep_assert_held(&kvm->lock);
kvm_for_each_vcpu(i, tmp, kvm)
- timer_set_offset(vcpu_get_timer(tmp, timer), offset);
+ if (guest_visible)
+ timer_set_guest_offset(vcpu_get_timer(tmp, timer),
+ offset);
+ else
+ timer_set_offset(vcpu_get_timer(tmp, timer), offset);
/*
* When called from the vcpu create path, the CPU being created is not
* included in the loop above, so we just set it here as well.
*/
- timer_set_offset(vcpu_get_timer(vcpu, timer), offset);
+ if (guest_visible)
+ timer_set_guest_offset(vcpu_get_timer(vcpu, timer), offset);
+ else
+ timer_set_offset(vcpu_get_timer(vcpu, timer), offset);
}
static void update_vtimer_cntvoff(struct kvm_vcpu *vcpu, u64 cntvoff)
@@ -772,7 +794,7 @@ static void update_vtimer_cntvoff(struct kvm_vcpu *vcpu, u64 cntvoff)
struct kvm *kvm = vcpu->kvm;
mutex_lock(&kvm->lock);
- update_timer_offset(vcpu, TIMER_VTIMER, cntvoff);
+ update_timer_offset(vcpu, TIMER_VTIMER, cntvoff, true);
mutex_unlock(&kvm->lock);
}
@@ -42,6 +42,9 @@ struct arch_timer_context {
/* Duplicated state from arch_timer.c for convenience */
u32 host_timer_irq;
u32 host_timer_irq_flags;
+
+ /* offset relative to the host's physical counter-timer */
+ u64 host_offset;
};
struct timer_map {