@@ -180,6 +180,50 @@ static void rtc_irq_restore(struct kvm_ioapic *ioapic)
}
ioapic->rtc_status.need_eoi = need_eoi;
}
+
+static void rtc_irq_set_eoi(struct kvm_ioapic *ioapic, int irq)
+{
+ union kvm_ioapic_redirect_entry *entry = &ioapic->redirtbl[irq];
+
+ if (irq != 8)
+ return;
+
+ if (likely(!bitmap_empty(ioapic->rtc_status.vcpu_map, KVM_MAX_VCPUS))) {
+ if (entry->fields.delivery_mode == APIC_DM_LOWEST)
+ ioapic->rtc_status.need_eoi = 1;
+ else {
+ int weight;
+ weight = bitmap_weight(ioapic->rtc_status.vcpu_map,
+ sizeof(ioapic->rtc_status.vcpu_map));
+ ioapic->rtc_status.need_eoi = weight;
+ }
+ bitmap_copy(ioapic->rtc_status.expected_eoi_bitmap,
+ ioapic->rtc_status.vcpu_map, KVM_MAX_VCPUS);
+ }
+}
+
+static void rtc_irq_ack_eoi(struct kvm_vcpu *vcpu,
+ struct rtc_status *rtc_status, int irq)
+{
+ if (irq != 8)
+ return;
+
+ if (test_and_clear_bit(vcpu->vcpu_id, rtc_status->expected_eoi_bitmap))
+ --rtc_status->need_eoi;
+
+ WARN_ON(rtc_status->need_eoi < 0);
+}
+
+static bool rtc_irq_check(struct kvm_ioapic *ioapic, int irq)
+{
+ if (irq != 8)
+ return false;
+
+ if (ioapic->rtc_status.need_eoi > 0)
+ return true; /* coalesced */
+
+ return false;
+}
#else
void kvm_vcpu_scan_ioapic(struct kvm *kvm)
{
@@ -195,6 +239,22 @@ static void rtc_irq_restore(struct kvm_ioapic *ioapic)
{
return;
}
+
+static void rtc_irq_set_eoi(struct kvm_ioapic *ioapic, int irq)
+{
+ return;
+}
+
+static void rtc_irq_ack_eoi(struct kvm_vcpu *vcpu,
+ struct rtc_status *rtc_status, int irq)
+{
+ return;
+}
+
+static bool rtc_irq_check(struct kvm_ioapic *ioapic, int irq)
+{
+ return false;
+}
#endif
static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
@@ -262,6 +322,8 @@ static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq)
irqe.level = 1;
irqe.shorthand = 0;
+ rtc_irq_set_eoi(ioapic, irq);
+
return kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe);
}
@@ -286,6 +348,11 @@ int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id,
ret = 1;
} else {
int edge = (entry.fields.trig_mode == IOAPIC_EDGE_TRIG);
+
+ if (rtc_irq_check(ioapic, irq)) {
+ ret = 0; /* coalesced */
+ goto out;
+ }
ioapic->irr |= mask;
if ((edge && old_irr != ioapic->irr) ||
(!edge && !entry.fields.remote_irr))
@@ -293,6 +360,7 @@ int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id,
else
ret = 0; /* report coalesced interrupt */
}
+out:
trace_kvm_ioapic_set_irq(entry.bits, irq, ret == 0);
spin_unlock(&ioapic->lock);
@@ -320,6 +388,7 @@ static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu,
if (ent->fields.vector != vector)
continue;
+ rtc_irq_ack_eoi(vcpu, &ioapic->rtc_status, i);
/*
* We are dropping lock while calling ack notifiers because ack
* notifier callbacks for assigned devices call into IOAPIC