From patchwork Thu Jan 27 08:40:06 2011 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Nadav Har'El X-Patchwork-Id: 510651 Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by demeter1.kernel.org (8.14.4/8.14.3) with ESMTP id p0R8eHKh015948 for ; Thu, 27 Jan 2011 08:40:17 GMT Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1752125Ab1A0IkN (ORCPT ); Thu, 27 Jan 2011 03:40:13 -0500 Received: from mtagate4.uk.ibm.com ([194.196.100.164]:55783 "EHLO mtagate4.uk.ibm.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1751818Ab1A0IkM (ORCPT ); Thu, 27 Jan 2011 03:40:12 -0500 Received: from d06nrmr1507.portsmouth.uk.ibm.com (d06nrmr1507.portsmouth.uk.ibm.com [9.149.38.233]) by mtagate4.uk.ibm.com (8.13.1/8.13.1) with ESMTP id p0R8e8iO007545 for ; Thu, 27 Jan 2011 08:40:08 GMT Received: from d06av06.portsmouth.uk.ibm.com (d06av06.portsmouth.uk.ibm.com [9.149.37.217]) by d06nrmr1507.portsmouth.uk.ibm.com (8.13.8/8.13.8/NCO v10.0) with ESMTP id p0R8eBIW1294352 for ; Thu, 27 Jan 2011 08:40:11 GMT Received: from d06av06.portsmouth.uk.ibm.com (loopback [127.0.0.1]) by d06av06.portsmouth.uk.ibm.com (8.14.4/8.13.1/NCO v10.0 AVout) with ESMTP id p0R8e7Y3029712 for ; Thu, 27 Jan 2011 01:40:08 -0700 Received: from rice.haifa.ibm.com (rice.haifa.ibm.com [9.148.8.217]) by d06av06.portsmouth.uk.ibm.com (8.14.4/8.13.1/NCO v10.0 AVin) with ESMTP id p0R8e72n029699 (version=TLSv1/SSLv3 cipher=DHE-RSA-AES256-SHA bits=256 verify=NO); Thu, 27 Jan 2011 01:40:07 -0700 Received: from rice.haifa.ibm.com (lnx-nyh.haifa.ibm.com [127.0.0.1]) by rice.haifa.ibm.com (8.14.4/8.14.4) with ESMTP id p0R8e6bV002682; Thu, 27 Jan 2011 10:40:06 +0200 Received: (from nyh@localhost) by rice.haifa.ibm.com (8.14.4/8.14.4/Submit) id p0R8e6CC002680; Thu, 27 Jan 2011 10:40:06 +0200 Date: Thu, 27 Jan 2011 10:40:06 +0200 Message-Id: <201101270840.p0R8e6CC002680@rice.haifa.ibm.com> X-Authentication-Warning: rice.haifa.ibm.com: nyh set sender to "Nadav Har'El" using -f Cc: gleb@redhat.com, avi@redhat.com To: kvm@vger.kernel.org From: "Nadav Har'El" References: <1296116987-nyh@il.ibm.com> Subject: [PATCH 20/29] nVMX: Deciding if L0 or L1 should handle an L2 exit Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org X-Greylist: IP, sender and recipient auto-whitelisted, not delayed by milter-greylist-4.2.6 (demeter1.kernel.org [140.211.167.41]); Thu, 27 Jan 2011 08:40:17 +0000 (UTC) --- .before/arch/x86/kvm/vmx.c 2011-01-26 18:06:05.000000000 +0200 +++ .after/arch/x86/kvm/vmx.c 2011-01-26 18:06:05.000000000 +0200 @@ -345,6 +345,8 @@ struct nested_vmx { /* Saving the VMCS that we used for running L1 */ struct saved_vmcs saved_vmcs01; struct vmcs_fields *vmcs01_fields; + /* L2 must run next, and mustn't decide to exit to L1. */ + bool nested_run_pending; }; struct vcpu_vmx { @@ -846,6 +848,20 @@ static inline bool nested_vm_need_virtua SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES); } +static inline bool nested_cpu_has_vmx_msr_bitmap(struct kvm_vcpu *vcpu) +{ + return get_vmcs12_fields(vcpu)->cpu_based_vm_exec_control & + CPU_BASED_USE_MSR_BITMAPS; +} + +static inline bool is_exception(u32 intr_info) +{ + return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK)) + == (INTR_TYPE_HARD_EXCEPTION | INTR_INFO_VALID_MASK); +} + +static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, bool is_interrupt); + static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr) { int i; @@ -5024,6 +5040,197 @@ static int (*kvm_vmx_exit_handlers[])(st static const int kvm_vmx_max_exit_handlers = ARRAY_SIZE(kvm_vmx_exit_handlers); +/* + * Return 1 if we should exit from L2 to L1 to handle an MSR access access, + * rather than handle it ourselves in L0. I.e., check L1's MSR bitmap whether + * it expressed interest in the current event (read or write a specific MSR). + */ +static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu, + struct vmcs_fields *vmcs12, u32 exit_reason) +{ + u32 msr_index = vcpu->arch.regs[VCPU_REGS_RCX]; + struct page *msr_bitmap_page; + void *va; + bool ret; + + if (!cpu_has_vmx_msr_bitmap() || !nested_cpu_has_vmx_msr_bitmap(vcpu)) + return 1; + + msr_bitmap_page = nested_get_page(vcpu, vmcs12->msr_bitmap); + if (!msr_bitmap_page) { + printk(KERN_INFO "%s error in nested_get_page\n", __func__); + return 0; + } + + va = kmap_atomic(msr_bitmap_page, KM_USER1); + if (exit_reason == EXIT_REASON_MSR_WRITE) + va += 0x800; + if (msr_index >= 0xc0000000) { + msr_index -= 0xc0000000; + va += 0x400; + } + if (msr_index <= 0x1fff) + ret = test_bit(msr_index, va); + else + ret = 1; /* let L1 handle the wrong parameter */ + kunmap_atomic(va, KM_USER1); + nested_release_page_clean(msr_bitmap_page); + return ret; +} + +/* + * Return 1 if we should exit from L2 to L1 to handle a CR access exit, + * rather than handle it ourselves in L0. I.e., check if L1 wanted to + * intercept (via guest_host_mask etc.) the current event. + */ +static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu, + struct vmcs_fields *vmcs12) +{ + unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); + int cr = exit_qualification & 15; + int reg = (exit_qualification >> 8) & 15; + unsigned long val = kvm_register_read(vcpu, reg); + + switch ((exit_qualification >> 4) & 3) { + case 0: /* mov to cr */ + switch (cr) { + case 0: + if (vmcs12->cr0_guest_host_mask & + (val ^ vmcs12->cr0_read_shadow)) + return 1; + break; + case 3: + if ((vmcs12->cr3_target_count >= 1 && + vmcs12->cr3_target_value0 == val) || + (vmcs12->cr3_target_count >= 2 && + vmcs12->cr3_target_value1 == val) || + (vmcs12->cr3_target_count >= 3 && + vmcs12->cr3_target_value2 == val) || + (vmcs12->cr3_target_count >= 4 && + vmcs12->cr3_target_value3 == val)) + return 0; + if (nested_cpu_has_secondary_exec_ctrls(vcpu) && + (vmcs12->cpu_based_vm_exec_control & + CPU_BASED_CR3_LOAD_EXITING)) { + return 1; + } + break; + case 4: + if (vmcs12->cr4_guest_host_mask & + (vmcs12->cr4_read_shadow ^ val)) + return 1; + break; + case 8: + if (nested_cpu_has_secondary_exec_ctrls(vcpu) && + (vmcs12->cpu_based_vm_exec_control & + CPU_BASED_CR8_LOAD_EXITING)) + return 1; + /* + * TODO: missing else if control & CPU_BASED_TPR_SHADOW + * then set tpr shadow and if below tpr_threshold, exit. + */ + break; + } + break; + case 2: /* clts */ + if (vmcs12->cr0_guest_host_mask & X86_CR0_TS) + return 1; + break; + case 1: /* mov from cr */ + switch (cr) { + case 0: + return 1; + case 3: + if (vmcs12->cpu_based_vm_exec_control & + CPU_BASED_CR3_STORE_EXITING) + return 1; + break; + case 4: + return 1; + break; + case 8: + if (vmcs12->cpu_based_vm_exec_control & + CPU_BASED_CR8_STORE_EXITING) + return 1; + break; + } + break; + case 3: /* lmsw */ + /* + * lmsw can change bits 1..3 of cr0, and only set bit 0 of + * cr0. Other attempted changes are ignored, with no exit. + */ + if (vmcs12->cr0_guest_host_mask & 0xe & + (val ^ vmcs12->cr0_read_shadow)) + return 1; + if ((vmcs12->cr0_guest_host_mask & 0x1) && + !(vmcs12->cr0_read_shadow & 0x1) && + (val & 0x1)) + return 1; + break; + } + return 0; +} + +/* + * Return 1 if we should exit from L2 to L1 to handle an exit, or 0 if we + * should handle it ourselves in L0 (and then continue L2). Only call this + * when in is_guest_mode (L2). + */ +static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu) +{ + u32 exit_reason = vmcs_read32(VM_EXIT_REASON); + u32 intr_info = vmcs_read32(VM_EXIT_INTR_INFO); + struct vcpu_vmx *vmx = to_vmx(vcpu); + struct vmcs_fields *vmcs12 = get_vmcs12_fields(vcpu); + + if (vmx->nested.nested_run_pending) + return 0; + + if (unlikely(vmx->fail)) { + printk(KERN_INFO "%s failed vm entry %x\n", + __func__, vmcs_read32(VM_INSTRUCTION_ERROR)); + return 1; + } + + switch (exit_reason) { + case EXIT_REASON_EXTERNAL_INTERRUPT: + return 0; + case EXIT_REASON_EXCEPTION_NMI: + if (!is_exception(intr_info)) + return 0; + else if (is_page_fault(intr_info)) + return enable_ept; + return vmcs12->exception_bitmap & + (1u << (intr_info & INTR_INFO_VECTOR_MASK)); + case EXIT_REASON_EPT_VIOLATION: + return 0; + case EXIT_REASON_INVLPG: + return vmcs12->cpu_based_vm_exec_control & + CPU_BASED_INVLPG_EXITING; + case EXIT_REASON_MSR_READ: + case EXIT_REASON_MSR_WRITE: + return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason); + case EXIT_REASON_CR_ACCESS: + return nested_vmx_exit_handled_cr(vcpu, vmcs12); + case EXIT_REASON_DR_ACCESS: + return vmcs12->cpu_based_vm_exec_control & + CPU_BASED_MOV_DR_EXITING; + default: + /* + * One particularly interesting case that is covered here is an + * exit caused by L2 running a VMX instruction. L2 is guest + * mode in L1's world, and according to the VMX spec running a + * VMX instruction in guest mode should cause an exit to root + * mode, i.e., to L1. This is why we need to return r=1 for + * those exit reasons too. This enables further nesting: Like + * L0 emulates VMX for L1, we now allow L1 to emulate VMX for + * L2, who will then be able to run L3. + */ + return 1; + } +} + static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2) { *info1 = vmcs_readl(EXIT_QUALIFICATION); @@ -5046,6 +5253,17 @@ static int vmx_handle_exit(struct kvm_vc if (vmx->emulation_required && emulate_invalid_guest_state) return handle_invalid_guest_state(vcpu); + if (exit_reason == EXIT_REASON_VMLAUNCH || + exit_reason == EXIT_REASON_VMRESUME) + vmx->nested.nested_run_pending = 1; + else + vmx->nested.nested_run_pending = 0; + + if (is_guest_mode(vcpu) && nested_vmx_exit_handled(vcpu)) { + nested_vmx_vmexit(vcpu, false); + return 1; + } + if (exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) { vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY; vcpu->run->fail_entry.hardware_entry_failure_reason @@ -6121,6 +6339,8 @@ static int nested_vmx_run(struct kvm_vcp kvm_mmu_reset_context(vcpu); if (unlikely(kvm_mmu_load(vcpu))) { + /* switch back to L1 */ + nested_vmx_vmexit(vcpu, false); /* * TODO: there is no reasonable error number to use. * perhaps a more reasonable thing to do is to