@@ -156,7 +156,6 @@ void __kvm_migrate_timers(struct kvm_vcpu *vcpu)
{
__kvm_migrate_apic_timer(vcpu);
__kvm_migrate_pit_timer(vcpu);
- __kvm_migrate_xen_timer(vcpu);
static_call_cond(kvm_x86_migrate_timers)(vcpu);
}
@@ -122,6 +122,8 @@ void kvm_xen_inject_timer_irqs(struct kvm_vcpu *vcpu)
e.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL;
kvm_xen_set_evtchn(&e, vcpu->kvm);
+
+ vcpu->arch.xen.timer_expires = 0;
atomic_set(&vcpu->arch.xen.timer_pending, 0);
}
}
@@ -130,19 +132,9 @@ static enum hrtimer_restart xen_timer_callback(struct hrtimer *timer)
{
struct kvm_vcpu *vcpu = container_of(timer, struct kvm_vcpu,
arch.xen.timer);
- struct kvm_xen_evtchn e;
-
if (atomic_read(&vcpu->arch.xen.timer_pending))
return HRTIMER_NORESTART;
- e.vcpu_id = vcpu->vcpu_id;
- e.vcpu_idx = vcpu->vcpu_idx;
- e.port = vcpu->arch.xen.timer_virq;
- e.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL;
-
- if (kvm_xen_set_evtchn_fast(&e, vcpu->kvm) != -EWOULDBLOCK)
- return HRTIMER_NORESTART;
-
atomic_inc(&vcpu->arch.xen.timer_pending);
kvm_make_request(KVM_REQ_UNBLOCK, vcpu);
kvm_vcpu_kick(vcpu);
@@ -150,29 +142,19 @@ static enum hrtimer_restart xen_timer_callback(struct hrtimer *timer)
return HRTIMER_NORESTART;
}
-void __kvm_migrate_xen_timer(struct kvm_vcpu *vcpu)
-{
- struct hrtimer *timer;
-
- if (!kvm_xen_timer_enabled(vcpu))
- return;
-
- timer = &vcpu->arch.xen.timer;
- if (hrtimer_cancel(timer))
- hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED);
-}
-
-static void kvm_xen_start_timer(struct kvm_vcpu *vcpu, u64 guest_abs, u64 delta_ns)
+static void kvm_xen_start_timer(struct kvm_vcpu *vcpu, u64 guest_abs, s64 delta_ns)
{
- ktime_t ktime_now;
-
atomic_set(&vcpu->arch.xen.timer_pending, 0);
vcpu->arch.xen.timer_expires = guest_abs;
- ktime_now = ktime_get();
- hrtimer_start(&vcpu->arch.xen.timer,
- ktime_add_ns(ktime_now, delta_ns),
- HRTIMER_MODE_ABS_PINNED);
+ if (delta_ns <= 0) {
+ xen_timer_callback(&vcpu->arch.xen.timer);
+ } else {
+ ktime_t ktime_now = ktime_get();
+ hrtimer_start(&vcpu->arch.xen.timer,
+ ktime_add_ns(ktime_now, delta_ns),
+ HRTIMER_MODE_ABS_HARD);
+ }
}
static void kvm_xen_stop_timer(struct kvm_vcpu *vcpu)
@@ -185,7 +167,7 @@ static void kvm_xen_stop_timer(struct kvm_vcpu *vcpu)
static void kvm_xen_init_timer(struct kvm_vcpu *vcpu)
{
hrtimer_init(&vcpu->arch.xen.timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_ABS_PINNED);
+ HRTIMER_MODE_ABS_HARD);
vcpu->arch.xen.timer.function = xen_timer_callback;
}
@@ -1204,7 +1186,7 @@ static bool kvm_xen_hcall_set_timer_op(struct kvm_vcpu *vcpu, uint64_t timeout,
if (timeout) {
uint64_t guest_now = get_kvmclock_ns(vcpu->kvm);
- long delta = timeout - guest_now;
+ int64_t delta = timeout - guest_now;
/* Xen has a 'Linux workaround' in do_set_timer_op() which
* checks for negative absolute timeout values (caused by
@@ -75,7 +75,6 @@ static inline int kvm_xen_has_pending_timer(struct kvm_vcpu *vcpu)
return 0;
}
-void __kvm_migrate_xen_timer(struct kvm_vcpu *vcpu);
void kvm_xen_inject_timer_irqs(struct kvm_vcpu *vcpu);
#else
static inline int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data)