From patchwork Mon Jan 28 00:54:07 2013 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Zhang, Yang Z" X-Patchwork-Id: 2052831 Return-Path: X-Original-To: patchwork-kvm@patchwork.kernel.org Delivered-To: patchwork-process-083081@patchwork2.kernel.org Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by patchwork2.kernel.org (Postfix) with ESMTP id 292C6DFE86 for ; Mon, 28 Jan 2013 00:57:41 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1756902Ab3A1A5h (ORCPT ); Sun, 27 Jan 2013 19:57:37 -0500 Received: from mga09.intel.com ([134.134.136.24]:17287 "EHLO mga09.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1756852Ab3A1A5f (ORCPT ); Sun, 27 Jan 2013 19:57:35 -0500 Received: from orsmga001.jf.intel.com ([10.7.209.18]) by orsmga102.jf.intel.com with ESMTP; 27 Jan 2013 16:56:24 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="4.84,548,1355126400"; d="scan'208";a="253332145" Received: from yang-desktop.sh.intel.com ([10.239.13.6]) by orsmga001.jf.intel.com with ESMTP; 27 Jan 2013 16:57:33 -0800 From: Yang Zhang To: kvm@vger.kernel.org Cc: gleb@redhat.com, haitao.shan@intel.com, mtosatti@redhat.com, xiantao.zhang@intel.com, hpa@linux.intel.com, jun.nakajima@intel.com, Yang Zhang Subject: [PATCH v3] KVM: VMX: enable acknowledge interupt on vmexit Date: Mon, 28 Jan 2013 08:54:07 +0800 Message-Id: <1359334447-28153-1-git-send-email-yang.z.zhang@intel.com> X-Mailer: git-send-email 1.7.1.1 Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org From: Yang Zhang The "acknowledge interrupt on exit" feature controls processor behavior for external interrupt acknowledgement. When this control is set, the processor acknowledges the interrupt controller to acquire the interrupt vector on VM exit. After enabling this feature, an interrupt which arrived when target cpu is running in vmx non-root mode will be handled by vmx handler instead of handler in idt. Currently, vmx handler only fakes an interrupt stack and jump to idt table to let real handler to handle it. Further, we will recognize the interrupt and only delivery the interrupt which not belong to current vcpu through idt table. The interrupt which belonged to current vcpu will be handled inside vmx handler. This will reduce the interrupt handle cost of KVM. Refer to Intel SDM volum 3, chapter 33.2. Signed-off-by: Yang Zhang --- arch/x86/include/asm/kvm_host.h | 2 + arch/x86/kvm/svm.c | 6 ++++ arch/x86/kvm/vmx.c | 61 ++++++++++++++++++++++++++++++++++++-- arch/x86/kvm/x86.c | 3 +- 4 files changed, 67 insertions(+), 5 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 77d56a4..07daf10 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -340,6 +340,7 @@ struct kvm_vcpu_arch { unsigned long cr8; u32 hflags; u64 efer; + unsigned long host_idt_base; u64 apic_base; struct kvm_lapic *apic; /* kernel irqchip context */ unsigned long apic_attention; @@ -725,6 +726,7 @@ struct kvm_x86_ops { int (*check_intercept)(struct kvm_vcpu *vcpu, struct x86_instruction_info *info, enum x86_intercept_stage stage); + void (*handle_external_intr)(struct kvm_vcpu *vcpu); }; struct kvm_arch_async_pf { diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index d29d3cd..c283185 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -4227,6 +4227,11 @@ out: return ret; } +static void svm_handle_external_intr(struct kvm_vcpu *vcpu) +{ + local_irq_enable(); +} + static struct kvm_x86_ops svm_x86_ops = { .cpu_has_kvm_support = has_svm, .disabled_by_bios = is_disabled, @@ -4318,6 +4323,7 @@ static struct kvm_x86_ops svm_x86_ops = { .set_tdp_cr3 = set_tdp_cr3, .check_intercept = svm_check_intercept, + .handle_external_intr = svm_handle_external_intr, }; static int __init svm_init(void) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 02eeba8..243ce45 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -2565,7 +2565,8 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) #ifdef CONFIG_X86_64 min |= VM_EXIT_HOST_ADDR_SPACE_SIZE; #endif - opt = VM_EXIT_SAVE_IA32_PAT | VM_EXIT_LOAD_IA32_PAT; + opt = VM_EXIT_SAVE_IA32_PAT | VM_EXIT_LOAD_IA32_PAT | + VM_EXIT_ACK_INTR_ON_EXIT; if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS, &_vmexit_control) < 0) return -EIO; @@ -3742,7 +3743,7 @@ static void vmx_disable_intercept_for_msr(u32 msr, bool longmode_only) * Note that host-state that does change is set elsewhere. E.g., host-state * that is set differently for each CPU is set in vmx_vcpu_load(), not here. */ -static void vmx_set_constant_host_state(void) +static void vmx_set_constant_host_state(struct kvm_vcpu *vcpu) { u32 low32, high32; unsigned long tmpl; @@ -3770,6 +3771,7 @@ static void vmx_set_constant_host_state(void) native_store_idt(&dt); vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */ + vcpu->arch.host_idt_base = dt.address; vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */ @@ -3884,7 +3886,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) vmcs_write16(HOST_FS_SELECTOR, 0); /* 22.2.4 */ vmcs_write16(HOST_GS_SELECTOR, 0); /* 22.2.4 */ - vmx_set_constant_host_state(); + vmx_set_constant_host_state(&vmx->vcpu); #ifdef CONFIG_X86_64 rdmsrl(MSR_FS_BASE, a); vmcs_writel(HOST_FS_BASE, a); /* 22.2.4 */ @@ -6094,6 +6096,56 @@ static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx) } } +static void vmx_handle_external_intr(struct kvm_vcpu *vcpu) +{ + u32 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); + if ((exit_intr_info & (INTR_INFO_VALID_MASK | INTR_INFO_INTR_TYPE_MASK)) + == (INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR)) { + unsigned int vector; + unsigned long entry; + gate_desc *desc; + + vector = exit_intr_info & INTR_INFO_VECTOR_MASK; +#ifdef CONFIG_X86_64 + desc = (void *)vcpu->arch.host_idt_base + vector * 16; +#else + desc = (void *)vcpu->arch.host_idt_base + vector * 8; +#endif + + entry = gate_offset(*desc); + asm( + "mov %0, %%" _ASM_DX " \n\t" +#ifdef CONFIG_X86_64 + "mov %%" _ASM_SP ", %%" _ASM_BX " \n\t" + "and $0xfffffffffffffff0, %%" _ASM_SP " \n\t" + "mov %%ss, %%" _ASM_AX " \n\t" + "push %%" _ASM_AX " \n\t" + "push %%" _ASM_BX " \n\t" +#endif + "pushf \n\t" + "pop %%" _ASM_AX " \n\t" + "or $0x200, %%" _ASM_AX " \n\t" + "push %%" _ASM_AX " \n\t" + "mov %%cs, %%" _ASM_AX " \n\t" + "push %%" _ASM_AX " \n\t" + "push intr_return \n\t" + "jmp *%% " _ASM_DX " \n\t" + "1: \n\t" + ".pushsection .rodata \n\t" + ".global intr_return \n\t" + "intr_return: " _ASM_PTR " 1b \n\t" + ".popsection \n\t" + : : "m"(entry) : +#ifdef CONFIG_X86_64 + "rax", "rbx", "rdx" +#else + "eax", "edx" +#endif + ); + } else + local_irq_enable(); +} + static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx) { u32 exit_intr_info; @@ -6764,7 +6816,7 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) * Other fields are different per CPU, and will be set later when * vmx_vcpu_load() is called, and when vmx_save_host_state() is called. */ - vmx_set_constant_host_state(); + vmx_set_constant_host_state(vcpu); /* * HOST_RSP is normally set correctly in vmx_vcpu_run() just before @@ -7361,6 +7413,7 @@ static struct kvm_x86_ops vmx_x86_ops = { .set_tdp_cr3 = vmx_set_cr3, .check_intercept = vmx_check_intercept, + .handle_external_intr = vmx_handle_external_intr, }; static int __init vmx_init(void) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index b9f5529..676b399 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -5767,7 +5767,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) vcpu->mode = OUTSIDE_GUEST_MODE; smp_wmb(); - local_irq_enable(); + + kvm_x86_ops->handle_external_intr(vcpu); ++vcpu->stat.exits;