@@ -2251,7 +2251,7 @@ int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v);
int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
int kvm_cpu_has_extint(struct kvm_vcpu *v);
int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
-int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
+int kvm_cpu_get_interrupt(struct kvm_vcpu *v, int nested_pi_nv);
void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event);
int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
@@ -135,13 +135,13 @@ static int kvm_cpu_get_extint(struct kvm_vcpu *v)
/*
* Read pending interrupt vector and intack.
*/
-int kvm_cpu_get_interrupt(struct kvm_vcpu *v)
+int kvm_cpu_get_interrupt(struct kvm_vcpu *v, int nested_pi_nv)
{
int vector = kvm_cpu_get_extint(v);
if (vector != -1)
- return vector; /* PIC */
+ return vector; /* PIC */
- return kvm_get_apic_interrupt(v); /* APIC */
+ return kvm_get_apic_interrupt(v, nested_pi_nv); /* APIC */
}
EXPORT_SYMBOL_GPL(kvm_cpu_get_interrupt);
@@ -2924,7 +2924,7 @@ void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu)
}
}
-int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu)
+int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu, int nested_pi_nv)
{
int vector = kvm_apic_has_interrupt(vcpu);
struct kvm_lapic *apic = vcpu->arch.apic;
@@ -2939,8 +2939,16 @@ int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu)
* on exit" mode. Then we cannot inject the interrupt via RVI,
* because the process would deliver it through the IDT.
*/
-
apic_clear_irr(vector, apic);
+
+ /*
+ * If the vector is L2's posted interrupt notification vector, return
+ * without moving the vector to the ISR, as notification interrupts
+ * trigger processing in L2, i.e. aren't delivered to L1.
+ */
+ if (vector == nested_pi_nv)
+ return vector;
+
if (kvm_hv_synic_auto_eoi_set(vcpu, vector)) {
/*
* For auto-EOI interrupts, there might be another pending
@@ -89,7 +89,7 @@ void kvm_free_lapic(struct kvm_vcpu *vcpu);
int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu);
int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu);
-int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu);
+int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu, int nested_pi_nv);
int kvm_apic_accept_events(struct kvm_vcpu *vcpu);
void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event);
u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu);
@@ -4294,7 +4294,7 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu)
if (nested_exit_intr_ack_set(vcpu)) {
int irq;
- irq = kvm_cpu_get_interrupt(vcpu);
+ irq = kvm_cpu_get_interrupt(vcpu, -1);
if (WARN_ON_ONCE(irq < 0))
goto no_vmexit;
@@ -10548,7 +10548,7 @@ static int kvm_check_and_inject_events(struct kvm_vcpu *vcpu,
if (r < 0)
goto out;
if (r) {
- int irq = kvm_cpu_get_interrupt(vcpu);
+ int irq = kvm_cpu_get_interrupt(vcpu, -1);
if (!WARN_ON_ONCE(irq == -1)) {
kvm_queue_interrupt(vcpu, irq, false);
When getting an IRQ from the local APIC, don't move the vector to the ISR and skip the PPR update if the found vector is the vCPU's nested posted interrupt notification vector, i.e. if the IRQ should trigger posted interrupt processing in L2 instead of being deliver to L1. For now, pass in -1 from all callers and defer passing the actual nested notification vector to a separate patch, as more prep work is needed. Functionally, this should be a glorified nop, i.e. no true functional change intended. Cc: stable@vger.kernel.org Signed-off-by: Sean Christopherson <seanjc@google.com> --- arch/x86/include/asm/kvm_host.h | 2 +- arch/x86/kvm/irq.c | 6 +++--- arch/x86/kvm/lapic.c | 12 ++++++++++-- arch/x86/kvm/lapic.h | 2 +- arch/x86/kvm/vmx/nested.c | 2 +- arch/x86/kvm/x86.c | 2 +- 6 files changed, 17 insertions(+), 9 deletions(-)