From patchwork Mon Mar 18 12:47:22 2013 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Zhang, Yang Z" X-Patchwork-Id: 2291021 Return-Path: X-Original-To: patchwork-kvm@patchwork.kernel.org Delivered-To: patchwork-process-083081@patchwork2.kernel.org Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by patchwork2.kernel.org (Postfix) with ESMTP id 99CDDDF215 for ; Mon, 18 Mar 2013 12:53:08 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1753133Ab3CRMw7 (ORCPT ); Mon, 18 Mar 2013 08:52:59 -0400 Received: from mga03.intel.com ([143.182.124.21]:16631 "EHLO mga03.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1753168Ab3CRMwy (ORCPT ); Mon, 18 Mar 2013 08:52:54 -0400 Received: from azsmga001.ch.intel.com ([10.2.17.19]) by azsmga101.ch.intel.com with ESMTP; 18 Mar 2013 05:52:53 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="4.84,865,1355126400"; d="scan'208";a="272304298" Received: from yang-desktop.sh.intel.com ([10.239.47.153]) by azsmga001.ch.intel.com with ESMTP; 18 Mar 2013 05:52:52 -0700 From: Yang Zhang To: kvm@vger.kernel.org Cc: gleb@redhat.com, mtosatti@redhat.com, xiantao.zhang@intel.com, Yang Zhang Subject: [PATCH v3 8/8] KVM: Use eoi to track RTC interrupt delivery status Date: Mon, 18 Mar 2013 20:47:22 +0800 Message-Id: <1363610842-5878-9-git-send-email-yang.z.zhang@intel.com> X-Mailer: git-send-email 1.7.1.1 In-Reply-To: <1363610842-5878-1-git-send-email-yang.z.zhang@intel.com> References: <1363610842-5878-1-git-send-email-yang.z.zhang@intel.com> Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org From: Yang Zhang Current interrupt coalescing logci which only used by RTC has conflict with Posted Interrupt. This patch introduces a new mechinism to use eoi to track interrupt: When delivering an interrupt to vcpu, the need_eoi set to number of vcpu that received the interrupt. And decrease it when each vcpu writing eoi. No subsequent RTC interrupt can deliver to vcpu until all vcpus write eoi. Signed-off-by: Yang Zhang --- virt/kvm/ioapic.c | 76 +++++++++++++++++++++++++++++++++++++++++++++++++++- 1 files changed, 74 insertions(+), 2 deletions(-) diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c index 4ddaa07..35b3d8c 100644 --- a/virt/kvm/ioapic.c +++ b/virt/kvm/ioapic.c @@ -103,7 +103,8 @@ static void rtc_irq_restore(struct kvm_ioapic *ioapic) kvm_for_each_vcpu(i, vcpu, ioapic->kvm) { if (kvm_apic_pending_eoi(vcpu, vector)) { need_eoi++; - set_bit(vcpu->vcpu_id, ioapic->rtc_status.vcpu_map); + set_bit(vcpu->vcpu_id, + ioapic->rtc_status.expected_eoi_bitmap); } } ioapic->rtc_status.need_eoi = need_eoi; @@ -132,6 +133,50 @@ static void rtc_irq_get_dest_vcpu(struct kvm_ioapic *ioapic, int irq) spin_unlock(&ioapic->lock); } +static void rtc_irq_set_eoi(struct kvm_ioapic *ioapic, int irq) +{ + union kvm_ioapic_redirect_entry *entry = &ioapic->redirtbl[irq]; + + if (irq != 8) + return; + + if (likely(!bitmap_empty(ioapic->rtc_status.vcpu_map, KVM_MAX_VCPUS))) { + if (entry->fields.delivery_mode == APIC_DM_LOWEST) + ioapic->rtc_status.need_eoi = 1; + else { + int weight; + weight = bitmap_weight(ioapic->rtc_status.vcpu_map, + sizeof(ioapic->rtc_status.vcpu_map)); + ioapic->rtc_status.need_eoi = weight; + } + bitmap_copy(ioapic->rtc_status.expected_eoi_bitmap, + ioapic->rtc_status.vcpu_map, KVM_MAX_VCPUS); + } +} + +static void rtc_irq_ack_eoi(struct kvm_vcpu *vcpu, + struct rtc_status *rtc_status, int irq) +{ + if (irq != 8) + return; + + if (test_and_clear_bit(vcpu->vcpu_id, rtc_status->expected_eoi_bitmap)) + --rtc_status->need_eoi; + + WARN_ON(rtc_status->need_eoi < 0); +} + +static bool rtc_irq_check(struct kvm_ioapic *ioapic, int irq) +{ + if (irq != 8) + return false; + + if (ioapic->rtc_status.need_eoi > 0) + return true; /* coalesced */ + + return false; +} + #else static void rtc_irq_reset(struct kvm_ioapic *ioapic) @@ -148,6 +193,22 @@ static void rtc_irq_get_dest_vcpu(struct kvm_ioapic *ioapic, int irq) { return; } + +static void rtc_irq_set_eoi(struct kvm_ioapic *ioapic, int irq) +{ + return; +} + +static void rtc_irq_ack_eoi(struct kvm_vcpu *vcpu, + struct rtc_status *rtc_status, int irq) +{ + return; +} + +static bool rtc_irq_check(struct kvm_ioapic *ioapic, int irq) +{ + return false; +} #endif static int ioapic_service(struct kvm_ioapic *ioapic, unsigned int idx) @@ -260,7 +321,9 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val) if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG && ioapic->irr & (1 << index)) ioapic_service(ioapic, index); + spin_unlock(&ioapic->lock); kvm_scan_ioapic_entry(ioapic->kvm); + spin_lock(&ioapic->lock); break; } } @@ -284,6 +347,8 @@ static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq) irqe.level = 1; irqe.shorthand = 0; + rtc_irq_set_eoi(ioapic, irq); + return kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe); } @@ -308,6 +373,11 @@ int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id, ret = 1; } else { int edge = (entry.fields.trig_mode == IOAPIC_EDGE_TRIG); + + if (rtc_irq_check(ioapic, irq)) { + ret = 0; /* coalesced */ + goto out; + } ioapic->irr |= mask; if ((edge && old_irr != ioapic->irr) || (!edge && !entry.fields.remote_irr)) @@ -315,6 +385,7 @@ int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id, else ret = 0; /* report coalesced interrupt */ } +out: trace_kvm_ioapic_set_irq(entry.bits, irq, ret == 0); spin_unlock(&ioapic->lock); @@ -342,6 +413,7 @@ static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, if (ent->fields.vector != vector) continue; + rtc_irq_ack_eoi(vcpu, &ioapic->rtc_status, i); /* * We are dropping lock while calling ack notifiers because ack * notifier callbacks for assigned devices call into IOAPIC @@ -562,7 +634,7 @@ int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state) memcpy(ioapic, state, sizeof(struct kvm_ioapic_state)); update_handled_vectors(ioapic); rtc_irq_restore(ioapic); - kvm_scan_ioapic_entry(kvm); spin_unlock(&ioapic->lock); + kvm_scan_ioapic_entry(kvm); return 0; }