@@ -1041,6 +1041,7 @@ struct kvm_vcpu_arch {
#if IS_ENABLED(CONFIG_HYPERV)
hpa_t hv_root_tdp;
#endif
+ DECLARE_BITMAP(ioapic_pending_vectors, 256);
};
struct kvm_lpage_info {
@@ -284,6 +284,8 @@ void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, ulong *ioapic_handled_vectors)
spin_lock(&ioapic->lock);
+ bitmap_zero(vcpu->arch.ioapic_pending_vectors, 256);
+
/* Make sure we see any missing RTC EOI */
if (test_bit(vcpu->vcpu_id, dest_map->map))
__set_bit(dest_map->vectors[vcpu->vcpu_id],
@@ -297,10 +299,15 @@ void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, ulong *ioapic_handled_vectors)
u16 dm = kvm_lapic_irq_dest_mode(!!e->fields.dest_mode);
if (kvm_apic_match_dest(vcpu, NULL, APIC_DEST_NOSHORT,
- e->fields.dest_id, dm) ||
- kvm_apic_pending_eoi(vcpu, e->fields.vector))
+ e->fields.dest_id, dm))
+ __set_bit(e->fields.vector,
+ ioapic_handled_vectors);
+ else if (kvm_apic_pending_eoi(vcpu, e->fields.vector)) {
__set_bit(e->fields.vector,
ioapic_handled_vectors);
+ __set_bit(e->fields.vector,
+ vcpu->arch.ioapic_pending_vectors);
+ }
}
}
spin_unlock(&ioapic->lock);
@@ -5710,6 +5710,16 @@ static int handle_apic_eoi_induced(struct kvm_vcpu *vcpu)
/* EOI-induced VM exit is trap-like and thus no need to adjust IP */
kvm_apic_set_eoi_accelerated(vcpu, vector);
+
+ /* When there are instances where ioapic_handled_vectors is
+ * set due to pending interrupts, clean up the record and the
+ * corresponding bit after the interrupt is completed.
+ */
+ if (test_bit(vector, vcpu->arch.ioapic_pending_vectors)) {
+ clear_bit(vector, vcpu->arch.ioapic_pending_vectors);
+ clear_bit(vector, vcpu->arch.ioapic_handled_vectors);
+ kvm_make_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu);
+ }
return 1;
}