@@ -103,7 +103,8 @@ static void rtc_irq_restore(struct kvm_ioapic *ioapic)
kvm_for_each_vcpu(i, vcpu, ioapic->kvm) {
if (kvm_apic_pending_eoi(vcpu, vector)) {
need_eoi++;
- set_bit(vcpu->vcpu_id, ioapic->rtc_status.vcpu_map);
+ set_bit(vcpu->vcpu_id,
+ ioapic->rtc_status.expected_eoi_bitmap);
}
}
ioapic->rtc_status.need_eoi = need_eoi;
@@ -132,6 +133,50 @@ static void rtc_irq_get_dest_vcpu(struct kvm_ioapic *ioapic, int irq)
spin_unlock(&ioapic->lock);
}
+static void rtc_irq_set_eoi(struct kvm_ioapic *ioapic, int irq)
+{
+ union kvm_ioapic_redirect_entry *entry = &ioapic->redirtbl[irq];
+
+ if (irq != 8)
+ return;
+
+ if (likely(!bitmap_empty(ioapic->rtc_status.vcpu_map, KVM_MAX_VCPUS))) {
+ if (entry->fields.delivery_mode == APIC_DM_LOWEST)
+ ioapic->rtc_status.need_eoi = 1;
+ else {
+ int weight;
+ weight = bitmap_weight(ioapic->rtc_status.vcpu_map,
+ sizeof(ioapic->rtc_status.vcpu_map));
+ ioapic->rtc_status.need_eoi = weight;
+ }
+ bitmap_copy(ioapic->rtc_status.expected_eoi_bitmap,
+ ioapic->rtc_status.vcpu_map, KVM_MAX_VCPUS);
+ }
+}
+
+static void rtc_irq_ack_eoi(struct kvm_vcpu *vcpu,
+ struct rtc_status *rtc_status, int irq)
+{
+ if (irq != 8)
+ return;
+
+ if (test_and_clear_bit(vcpu->vcpu_id, rtc_status->expected_eoi_bitmap))
+ --rtc_status->need_eoi;
+
+ WARN_ON(rtc_status->need_eoi < 0);
+}
+
+static bool rtc_irq_check(struct kvm_ioapic *ioapic, int irq)
+{
+ if (irq != 8)
+ return false;
+
+ if (ioapic->rtc_status.need_eoi > 0)
+ return true; /* coalesced */
+
+ return false;
+}
+
#else
static void rtc_irq_reset(struct kvm_ioapic *ioapic)
@@ -148,6 +193,22 @@ static void rtc_irq_get_dest_vcpu(struct kvm_ioapic *ioapic, int irq)
{
return;
}
+
+static void rtc_irq_set_eoi(struct kvm_ioapic *ioapic, int irq)
+{
+ return;
+}
+
+static void rtc_irq_ack_eoi(struct kvm_vcpu *vcpu,
+ struct rtc_status *rtc_status, int irq)
+{
+ return;
+}
+
+static bool rtc_irq_check(struct kvm_ioapic *ioapic, int irq)
+{
+ return false;
+}
#endif
static int ioapic_service(struct kvm_ioapic *ioapic, unsigned int idx)
@@ -260,7 +321,9 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG
&& ioapic->irr & (1 << index))
ioapic_service(ioapic, index);
+ spin_unlock(&ioapic->lock);
kvm_scan_ioapic_entry(ioapic->kvm);
+ spin_lock(&ioapic->lock);
break;
}
}
@@ -284,6 +347,8 @@ static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq)
irqe.level = 1;
irqe.shorthand = 0;
+ rtc_irq_set_eoi(ioapic, irq);
+
return kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe);
}
@@ -308,6 +373,11 @@ int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id,
ret = 1;
} else {
int edge = (entry.fields.trig_mode == IOAPIC_EDGE_TRIG);
+
+ if (rtc_irq_check(ioapic, irq)) {
+ ret = 0; /* coalesced */
+ goto out;
+ }
ioapic->irr |= mask;
if ((edge && old_irr != ioapic->irr) ||
(!edge && !entry.fields.remote_irr))
@@ -315,6 +385,7 @@ int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id,
else
ret = 0; /* report coalesced interrupt */
}
+out:
trace_kvm_ioapic_set_irq(entry.bits, irq, ret == 0);
spin_unlock(&ioapic->lock);
@@ -342,6 +413,7 @@ static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu,
if (ent->fields.vector != vector)
continue;
+ rtc_irq_ack_eoi(vcpu, &ioapic->rtc_status, i);
/*
* We are dropping lock while calling ack notifiers because ack
* notifier callbacks for assigned devices call into IOAPIC
@@ -562,7 +634,7 @@ int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
memcpy(ioapic, state, sizeof(struct kvm_ioapic_state));
update_handled_vectors(ioapic);
rtc_irq_restore(ioapic);
- kvm_scan_ioapic_entry(kvm);
spin_unlock(&ioapic->lock);
+ kvm_scan_ioapic_entry(kvm);
return 0;
}