@@ -109,6 +109,29 @@ static void rtc_irq_restore(struct kvm_ioapic *ioapic)
}
ioapic->rtc_status.pending_eoi = pending_eoi;
}
+
+static void rtc_irq_ack_eoi(struct kvm_vcpu *vcpu,
+ struct rtc_status *rtc_status, int irq)
+{
+ if (irq != 8)
+ return;
+
+ if (test_and_clear_bit(vcpu->vcpu_id, rtc_status->dest_map))
+ --rtc_status->pending_eoi;
+
+ WARN_ON(rtc_status->pending_eoi < 0);
+}
+
+static bool rtc_irq_check(struct kvm_ioapic *ioapic, int irq)
+{
+ if (irq != 8)
+ return false;
+
+ if (ioapic->rtc_status.pending_eoi > 0)
+ return true; /* coalesced */
+
+ return false;
+}
#else
static void rtc_irq_reset(struct kvm_ioapic *ioapic)
{
@@ -119,6 +142,17 @@ static void rtc_irq_restore(struct kvm_ioapic *ioapic)
{
return;
}
+
+static void rtc_irq_ack_eoi(struct kvm_vcpu *vcpu,
+ struct rtc_status *rtc_status, int irq)
+{
+ return;
+}
+
+static bool rtc_irq_check(struct kvm_ioapic *ioapic, int irq)
+{
+ return false;
+}
#endif
static int ioapic_service(struct kvm_ioapic *ioapic, unsigned int idx)
@@ -236,6 +270,7 @@ static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq)
{
union kvm_ioapic_redirect_entry *entry = &ioapic->redirtbl[irq];
struct kvm_lapic_irq irqe;
+ int ret;
ioapic_debug("dest=%x dest_mode=%x delivery_mode=%x "
"vector=%x trig_mode=%x\n",
@@ -251,7 +286,16 @@ static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq)
irqe.level = 1;
irqe.shorthand = 0;
- return kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe, NULL);
+#ifdef CONFIG_X86
+ if (irq == 8) {
+ ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe,
+ ioapic->rtc_status.dest_map);
+ ioapic->rtc_status.pending_eoi = ret;
+ } else
+#endif
+ ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe, NULL);
+
+ return ret;
}
int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id,
@@ -275,6 +319,11 @@ int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id,
ret = 1;
} else {
int edge = (entry.fields.trig_mode == IOAPIC_EDGE_TRIG);
+
+ if (rtc_irq_check(ioapic, irq)) {
+ ret = 0; /* coalesced */
+ goto out;
+ }
ioapic->irr |= mask;
if ((edge && old_irr != ioapic->irr) ||
(!edge && !entry.fields.remote_irr))
@@ -282,6 +331,7 @@ int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id,
else
ret = 0; /* report coalesced interrupt */
}
+out:
trace_kvm_ioapic_set_irq(entry.bits, irq, ret == 0);
spin_unlock(&ioapic->lock);
@@ -309,6 +359,7 @@ static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu,
if (ent->fields.vector != vector)
continue;
+ rtc_irq_ack_eoi(vcpu, &ioapic->rtc_status, i);
/*
* We are dropping lock while calling ack notifiers because ack
* notifier callbacks for assigned devices call into IOAPIC