From patchwork Tue Feb 26 12:34:16 2013 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Zhang, Yang Z" X-Patchwork-Id: 2185451 Return-Path: X-Original-To: patchwork-kvm@patchwork.kernel.org Delivered-To: patchwork-process-083081@patchwork1.kernel.org Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by patchwork1.kernel.org (Postfix) with ESMTP id F05553FD4E for ; Tue, 26 Feb 2013 12:38:14 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1756759Ab3BZMiL (ORCPT ); Tue, 26 Feb 2013 07:38:11 -0500 Received: from mga03.intel.com ([143.182.124.21]:38106 "EHLO mga03.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1750926Ab3BZMiK (ORCPT ); Tue, 26 Feb 2013 07:38:10 -0500 Received: from azsmga001.ch.intel.com ([10.2.17.19]) by azsmga101.ch.intel.com with ESMTP; 26 Feb 2013 04:38:09 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="4.84,740,1355126400"; d="scan'208";a="261562899" Received: from yang-desktop.sh.intel.com ([10.239.13.6]) by azsmga001.ch.intel.com with ESMTP; 26 Feb 2013 04:38:07 -0800 From: Yang Zhang To: kvm@vger.kernel.org Cc: gleb@redhat.com, mtosatti@redhat.com, xiantao.zhang@intel.com, Yang Zhang Subject: [PATCH] KVM: VMX: fix interrupt lost when enable virtual interrupt delivery Date: Tue, 26 Feb 2013 20:34:16 +0800 Message-Id: <1361882056-10482-1-git-send-email-yang.z.zhang@intel.com> X-Mailer: git-send-email 1.7.1.1 Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org From: Yang Zhang In the platform which supporing virtual interrupt delivery feature, hardware will clear vIRR atomatically when target vcpu is running. So software should not modify vIRR when target vcpu is running. This patch will record the virtual interrupt into posted_irr when delivering virtual interrupt to guest. And then sync posted_irr into vIRR in target vcpu context. The patch to enable Posted Interrupt has the similar logic. Since we are still discussing it, so split this part from Posted Interrupt patch to fix the virtual interrupt delivery issue. Signed-off-by: Yang Zhang --- arch/x86/kvm/lapic.c | 39 ++++++++++++++++++++++++++++++++++++++- arch/x86/kvm/lapic.h | 3 +++ arch/x86/kvm/vmx.c | 3 ++- 3 files changed, 43 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 02b51dd..98bc37e 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -357,6 +357,11 @@ static u8 count_vectors(void *bitmap) return count; } +static inline bool apic_test_irr(int vec, struct kvm_lapic *apic) +{ + return apic_test_vector(vec, apic->regs + APIC_IRR); +} + static inline int apic_test_and_set_irr(int vec, struct kvm_lapic *apic) { apic->irr_pending = true; @@ -368,6 +373,26 @@ static inline int apic_search_irr(struct kvm_lapic *apic) return find_highest_vector(apic->regs + APIC_IRR); } +static inline void kvm_apic_update_irr(struct kvm_lapic *apic) +{ + int index; + + if (!kvm_x86_ops->vm_has_apicv(apic->vcpu->kvm)) + return; + + if (apic->posted_irr_changed) { + spin_lock(&apic->posted_irr_lock); + for_each_set_bit(index, + (unsigned long *)(&apic->posted_irr_changed), 8) { + *((u32 *)(apic->regs + APIC_IRR + index * 0x10)) |= + apic->posted_irr[index]; + apic->posted_irr[index] = 0; + } + apic->posted_irr_changed = 0; + spin_unlock(&apic->posted_irr_lock); + } +} + static inline int apic_find_highest_irr(struct kvm_lapic *apic) { int result; @@ -379,6 +404,7 @@ static inline int apic_find_highest_irr(struct kvm_lapic *apic) if (!apic->irr_pending) return -1; + kvm_apic_update_irr(apic); result = apic_search_irr(apic); ASSERT(result == -1 || result >= 16); @@ -700,7 +726,17 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, } else apic_clear_vector(vector, apic->regs + APIC_TMR); - result = !apic_test_and_set_irr(vector, apic); + if (kvm_x86_ops->vm_has_apicv(vcpu->kvm)) { + spin_lock(&apic->posted_irr_lock); + if (!apic_test_irr(vector, apic)) + result = !test_and_set_bit(vector, + (unsigned long *)apic->posted_irr); + if (result) + apic->posted_irr_changed |= 1 << (vector >> 5); + spin_unlock(&apic->posted_irr_lock); + } else { + result = !apic_test_and_set_irr(vector, apic); + } trace_kvm_apic_accept_irq(vcpu->vcpu_id, delivery_mode, trig_mode, vector, !result); if (!result) { @@ -1567,6 +1603,7 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu) static_key_slow_inc(&apic_sw_disabled.key); /* sw disabled at reset */ kvm_lapic_reset(vcpu); kvm_iodevice_init(&apic->dev, &apic_mmio_ops); + spin_lock_init(&apic->posted_irr_lock); return 0; nomem_free_apic: diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h index 1676d34..38bebc8 100644 --- a/arch/x86/kvm/lapic.h +++ b/arch/x86/kvm/lapic.h @@ -20,6 +20,9 @@ struct kvm_lapic { u32 divide_count; struct kvm_vcpu *vcpu; bool irr_pending; + u32 posted_irr[8]; + u8 posted_irr_changed; + spinlock_t posted_irr_lock; /* Number of bits set in ISR. */ s16 isr_count; /* The highest vector set in ISR; if -1 - invalid, must scan ISR. */ diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index c1b3041..c8e6036 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -84,7 +84,8 @@ module_param(vmm_exclusive, bool, S_IRUGO); static bool __read_mostly fasteoi = 1; module_param(fasteoi, bool, S_IRUGO); -static bool __read_mostly enable_apicv_reg_vid; +static bool __read_mostly enable_apicv_reg_vid = 1; +module_param(enable_apicv_reg_vid, bool, S_IRUGO); /* * If nested=1, nested virtualization is supported, i.e., guests may use