From patchwork Sun Jun 13 12:32:18 2010 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Nadav Har'El X-Patchwork-Id: 105798 Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by demeter.kernel.org (8.14.3/8.14.3) with ESMTP id o5DCWOmC028842 for ; Sun, 13 Jun 2010 12:32:24 GMT Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1753456Ab0FMMcW (ORCPT ); Sun, 13 Jun 2010 08:32:22 -0400 Received: from mtagate7.de.ibm.com ([195.212.17.167]:43204 "EHLO mtagate7.de.ibm.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1753443Ab0FMMcV (ORCPT ); Sun, 13 Jun 2010 08:32:21 -0400 Received: from d12nrmr1607.megacenter.de.ibm.com (d12nrmr1607.megacenter.de.ibm.com [9.149.167.49]) by mtagate7.de.ibm.com (8.13.1/8.13.1) with ESMTP id o5DCWK8O015261 for ; Sun, 13 Jun 2010 12:32:20 GMT Received: from d12av04.megacenter.de.ibm.com (d12av04.megacenter.de.ibm.com [9.149.165.229]) by d12nrmr1607.megacenter.de.ibm.com (8.13.8/8.13.8/NCO v10.0) with ESMTP id o5DCWK1Q1118348 for ; Sun, 13 Jun 2010 14:32:20 +0200 Received: from d12av04.megacenter.de.ibm.com (loopback [127.0.0.1]) by d12av04.megacenter.de.ibm.com (8.12.11.20060308/8.13.3) with ESMTP id o5DCWJaJ028543 for ; Sun, 13 Jun 2010 14:32:19 +0200 Received: from rice.haifa.ibm.com (rice.haifa.ibm.com [9.148.8.205]) by d12av04.megacenter.de.ibm.com (8.12.11.20060308/8.12.11) with ESMTP id o5DCWIL7028540 (version=TLSv1/SSLv3 cipher=DHE-RSA-AES256-SHA bits=256 verify=NO); Sun, 13 Jun 2010 14:32:19 +0200 Received: from rice.haifa.ibm.com (lnx-nyh.haifa.ibm.com [127.0.0.1]) by rice.haifa.ibm.com (8.14.4/8.14.4) with ESMTP id o5DCWIrC013122; Sun, 13 Jun 2010 15:32:18 +0300 Received: (from nyh@localhost) by rice.haifa.ibm.com (8.14.4/8.14.4/Submit) id o5DCWIHl013120; Sun, 13 Jun 2010 15:32:18 +0300 Date: Sun, 13 Jun 2010 15:32:18 +0300 Message-Id: <201006131232.o5DCWIHl013120@rice.haifa.ibm.com> X-Authentication-Warning: rice.haifa.ibm.com: nyh set sender to "Nadav Har'El" using -f Cc: kvm@vger.kernel.org To: avi@redhat.com From: "Nadav Har'El" References: <1276431753-nyh@il.ibm.com> Subject: [PATCH 19/24] Deciding if L0 or L1 should handle an L2 exit Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org X-Greylist: IP, sender and recipient auto-whitelisted, not delayed by milter-greylist-4.2.3 (demeter.kernel.org [140.211.167.41]); Sun, 13 Jun 2010 12:32:24 +0000 (UTC) --- .before/arch/x86/kvm/vmx.c 2010-06-13 15:01:30.000000000 +0300 +++ .after/arch/x86/kvm/vmx.c 2010-06-13 15:01:30.000000000 +0300 @@ -318,6 +318,8 @@ struct nested_vmx { struct shadow_vmcs *l1_shadow_vmcs; /* Level 1 vmcs loaded into the processor */ struct vmcs *l1_vmcs; + /* L2 must run next, and mustn't decide to exit to L1. */ + bool nested_run_pending; }; enum vmcs_field_type { @@ -900,6 +902,24 @@ static inline bool nested_cpu_has_vmx_ep } +static inline bool nested_cpu_has_vmx_msr_bitmap(struct kvm_vcpu *vcpu) +{ + return get_shadow_vmcs(vcpu)->cpu_based_vm_exec_control & + CPU_BASED_USE_MSR_BITMAPS; +} + +static inline bool is_exception(u32 intr_info) +{ + return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK)) + == (INTR_TYPE_HARD_EXCEPTION | INTR_INFO_VALID_MASK); +} + +static inline bool is_nmi(u32 intr_info) +{ + return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK)) + == (INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK); +} + static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr) { int i; @@ -3694,6 +3714,8 @@ static void vmx_set_nmi_mask(struct kvm_ } } +static int nested_vmx_vmexit(struct kvm_vcpu *vcpu, bool is_interrupt); + static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu) { return (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) && @@ -3819,6 +3841,8 @@ static int handle_exception(struct kvm_v if (is_no_device(intr_info)) { vmx_fpu_activate(vcpu); + if (vmx->nested.nested_mode) + vmx->nested.nested_run_pending = 1; return 1; } @@ -4989,6 +5013,202 @@ static int (*kvm_vmx_exit_handlers[])(st static const int kvm_vmx_max_exit_handlers = ARRAY_SIZE(kvm_vmx_exit_handlers); +/* Return 1 if we should exit from L2 to L1 to handle an MSR access exit, + * rather than handle it ourselves in L0. I.e., check L1's MSR bitmap whether + * it expressed interest in the current event (read or write a specific MSR). + */ +static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu, + struct shadow_vmcs *l2svmcs, u32 exit_code) +{ + u32 msr_index = vcpu->arch.regs[VCPU_REGS_RCX]; + struct page *msr_bitmap_page; + void *va; + bool ret; + + if (!cpu_has_vmx_msr_bitmap() || !nested_cpu_has_vmx_msr_bitmap(vcpu)) + return 1; + + msr_bitmap_page = nested_get_page(vcpu, l2svmcs->msr_bitmap); + if (!msr_bitmap_page) { + printk(KERN_INFO "%s error in nested_get_page\n", __func__); + return 0; + } + + va = kmap_atomic(msr_bitmap_page, KM_USER1); + if (exit_code == EXIT_REASON_MSR_WRITE) + va += 0x800; + if (msr_index >= 0xc0000000) { + msr_index -= 0xc0000000; + va += 0x400; + } + if (msr_index > 0x1fff) + return 0; + ret = test_bit(msr_index, va); + kunmap_atomic(va, KM_USER1); + return ret; +} + +/* Return 1 if we should exit from L2 to L1 to handle a CR access exit, + * rather than handle it ourselves in L0. I.e., check if L1 wanted to + * intercept (via guest_host_mask etc.) the current event. + */ +static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu, + struct shadow_vmcs *l2svmcs) +{ + unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); + int cr = exit_qualification & 15; + int reg = (exit_qualification >> 8) & 15; + unsigned long val = kvm_register_read(vcpu, reg); + + switch ((exit_qualification >> 4) & 3) { + case 0: /* mov to cr */ + switch (cr) { + case 0: + if (l2svmcs->cr0_guest_host_mask & + (val ^ l2svmcs->cr0_read_shadow)) + return 1; + break; + case 3: + if (l2svmcs->cpu_based_vm_exec_control & + CPU_BASED_CR3_LOAD_EXITING) + return 1; + break; + case 4: + if (l2svmcs->cr4_guest_host_mask & + (l2svmcs->cr4_read_shadow ^ val)) + return 1; + break; + case 8: + if (l2svmcs->cpu_based_vm_exec_control & + CPU_BASED_CR8_LOAD_EXITING) + return 1; + break; + } + break; + case 2: /* clts */ + if (l2svmcs->cr0_guest_host_mask & X86_CR0_TS) + return 1; + break; + case 1: /* mov from cr */ + switch (cr) { + case 0: + return 1; + case 3: + if (l2svmcs->cpu_based_vm_exec_control & + CPU_BASED_CR3_STORE_EXITING) + return 1; + break; + case 4: + return 1; + break; + case 8: + if (l2svmcs->cpu_based_vm_exec_control & + CPU_BASED_CR8_STORE_EXITING) + return 1; + break; + } + break; + case 3: /* lmsw */ + if (l2svmcs->cr0_guest_host_mask & + (val ^ l2svmcs->cr0_read_shadow)) + return 1; + break; + } + return 0; +} + +/* Return 1 if we should exit from L2 to L1 to handle an exit, or 0 if we + * should handle it ourselves in L0. Only call this when in nested_mode (L2). + */ +static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu, bool afterexit) +{ + u32 exit_code = vmcs_read32(VM_EXIT_REASON); + struct vcpu_vmx *vmx = to_vmx(vcpu); + u32 intr_info = vmcs_read32(VM_EXIT_INTR_INFO); + struct shadow_vmcs *l2svmcs; + int r = 0; + + if (vmx->nested.nested_run_pending) + return 0; + + if (unlikely(vmx->fail)) { + printk(KERN_INFO "%s failed vm entry %x\n", + __func__, vmcs_read32(VM_INSTRUCTION_ERROR)); + return 1; + } + + if (afterexit) { + /* There are some cases where we should let L1 handle certain + * events when these are injected (afterexit==0) but we should + * handle them in L0 on an exit (afterexit==1). + */ + switch (exit_code) { + case EXIT_REASON_EXTERNAL_INTERRUPT: + return 0; + case EXIT_REASON_EXCEPTION_NMI: + if (!is_exception(intr_info)) + return 0; + if (is_page_fault(intr_info) && (!enable_ept)) + return 0; + break; + case EXIT_REASON_EPT_VIOLATION: + if (enable_ept) + return 0; + break; + } + } + + if (!nested_map_current(vcpu)) + return 0; + l2svmcs = get_shadow_vmcs(vcpu); + + switch (exit_code) { + case EXIT_REASON_INVLPG: + if (l2svmcs->cpu_based_vm_exec_control & + CPU_BASED_INVLPG_EXITING) + r = 1; + break; + case EXIT_REASON_MSR_READ: + case EXIT_REASON_MSR_WRITE: + r = nested_vmx_exit_handled_msr(vcpu, l2svmcs, exit_code); + break; + case EXIT_REASON_CR_ACCESS: + r = nested_vmx_exit_handled_cr(vcpu, l2svmcs); + break; + case EXIT_REASON_DR_ACCESS: + if (l2svmcs->cpu_based_vm_exec_control & + CPU_BASED_MOV_DR_EXITING) + r = 1; + break; + case EXIT_REASON_EXCEPTION_NMI: + if (is_external_interrupt(intr_info) && + (l2svmcs->pin_based_vm_exec_control & + PIN_BASED_EXT_INTR_MASK)) + r = 1; + else if (is_nmi(intr_info) && + (l2svmcs->pin_based_vm_exec_control & + PIN_BASED_NMI_EXITING)) + r = 1; + else if (is_exception(intr_info) && + (l2svmcs->exception_bitmap & + (1u << (intr_info & INTR_INFO_VECTOR_MASK)))) + r = 1; + else if (is_page_fault(intr_info)) + r = 1; + break; + case EXIT_REASON_EXTERNAL_INTERRUPT: + if (l2svmcs->pin_based_vm_exec_control & + PIN_BASED_EXT_INTR_MASK) + r = 1; + break; + default: + r = 1; + } + nested_unmap_current(vcpu); + + return r; +} + /* * The guest has exited. See if we can fix it or if we need userspace * assistance. @@ -5005,6 +5225,17 @@ static int vmx_handle_exit(struct kvm_vc if (vmx->emulation_required && emulate_invalid_guest_state) return handle_invalid_guest_state(vcpu); + if (exit_reason == EXIT_REASON_VMLAUNCH || + exit_reason == EXIT_REASON_VMRESUME) + vmx->nested.nested_run_pending = 1; + else + vmx->nested.nested_run_pending = 0; + + if (vmx->nested.nested_mode && nested_vmx_exit_handled(vcpu, true)) { + nested_vmx_vmexit(vcpu, false); + return 1; + } + /* Access CR3 don't cause VMExit in paging mode, so we need * to sync with guest real CR3. */ if (enable_ept && is_paging(vcpu)) @@ -5956,6 +6187,7 @@ static int nested_vmx_run(struct kvm_vcp r = kvm_mmu_load(vcpu); if (unlikely(r)) { printk(KERN_ERR "Error in kvm_mmu_load r %d\n", r); + nested_vmx_vmexit(vcpu, false); set_rflags_to_vmx_fail_valid(vcpu); /* switch back to L1 */ vmx->nested.nested_mode = 0;