From patchwork Wed Dec 8 17:10:12 2010 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Nadav Har'El X-Patchwork-Id: 391232 Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by demeter1.kernel.org (8.14.4/8.14.3) with ESMTP id oB8HALWD020240 for ; Wed, 8 Dec 2010 17:10:22 GMT Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1752298Ab0LHRKR (ORCPT ); Wed, 8 Dec 2010 12:10:17 -0500 Received: from mtagate6.uk.ibm.com ([194.196.100.166]:59302 "EHLO mtagate6.uk.ibm.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1751074Ab0LHRKQ (ORCPT ); Wed, 8 Dec 2010 12:10:16 -0500 Received: from d06nrmr1707.portsmouth.uk.ibm.com (d06nrmr1707.portsmouth.uk.ibm.com [9.149.39.225]) by mtagate6.uk.ibm.com (8.13.1/8.13.1) with ESMTP id oB8HAEBl005019 for ; Wed, 8 Dec 2010 17:10:14 GMT Received: from d06av07.portsmouth.uk.ibm.com (d06av07.portsmouth.uk.ibm.com [9.149.37.248]) by d06nrmr1707.portsmouth.uk.ibm.com (8.13.8/8.13.8/NCO v10.0) with ESMTP id oB8HAG7h3543174 for ; Wed, 8 Dec 2010 17:10:16 GMT Received: from d06av07.portsmouth.uk.ibm.com (loopback [127.0.0.1]) by d06av07.portsmouth.uk.ibm.com (8.14.4/8.13.1/NCO v10.0 AVout) with ESMTP id oB8HAEoJ006303 for ; Wed, 8 Dec 2010 10:10:14 -0700 Received: from rice.haifa.ibm.com (rice.haifa.ibm.com [9.148.8.217]) by d06av07.portsmouth.uk.ibm.com (8.14.4/8.13.1/NCO v10.0 AVin) with ESMTP id oB8HAD4p006277 (version=TLSv1/SSLv3 cipher=DHE-RSA-AES256-SHA bits=256 verify=NO); Wed, 8 Dec 2010 10:10:14 -0700 Received: from rice.haifa.ibm.com (lnx-nyh.haifa.ibm.com [127.0.0.1]) by rice.haifa.ibm.com (8.14.4/8.14.4) with ESMTP id oB8HADkh008770; Wed, 8 Dec 2010 19:10:13 +0200 Received: (from nyh@localhost) by rice.haifa.ibm.com (8.14.4/8.14.4/Submit) id oB8HACRl008768; Wed, 8 Dec 2010 19:10:12 +0200 Date: Wed, 8 Dec 2010 19:10:12 +0200 Message-Id: <201012081710.oB8HACRl008768@rice.haifa.ibm.com> X-Authentication-Warning: rice.haifa.ibm.com: nyh set sender to "Nadav Har'El" using -f Cc: gleb@redhat.com, avi@redhat.com To: kvm@vger.kernel.org From: "Nadav Har'El" References: <1291827596-nyh@il.ibm.com> Subject: [PATCH 20/28] nVMX: Deciding if L0 or L1 should handle an L2 exit Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org X-Greylist: IP, sender and recipient auto-whitelisted, not delayed by milter-greylist-4.2.3 (demeter1.kernel.org [140.211.167.41]); Wed, 08 Dec 2010 17:10:22 +0000 (UTC) --- .before/arch/x86/kvm/vmx.c 2010-12-08 18:56:51.000000000 +0200 +++ .after/arch/x86/kvm/vmx.c 2010-12-08 18:56:51.000000000 +0200 @@ -333,6 +333,8 @@ struct nested_vmx { struct vmcs_fields *vmcs01_fields; /* Saving some vcpu->arch.* data we had for L1, while running L2 */ unsigned long l1_arch_cr3; + /* L2 must run next, and mustn't decide to exit to L1. */ + bool nested_run_pending; }; struct vcpu_vmx { @@ -845,6 +847,20 @@ static inline bool nested_vm_need_virtua SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES); } +static inline bool nested_cpu_has_vmx_msr_bitmap(struct kvm_vcpu *vcpu) +{ + return get_vmcs12_fields(vcpu)->cpu_based_vm_exec_control & + CPU_BASED_USE_MSR_BITMAPS; +} + +static inline bool is_exception(u32 intr_info) +{ + return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK)) + == (INTR_TYPE_HARD_EXCEPTION | INTR_INFO_VALID_MASK); +} + +static int nested_vmx_vmexit(struct kvm_vcpu *vcpu, bool is_interrupt); + static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr) { int i; @@ -4894,6 +4910,195 @@ static int (*kvm_vmx_exit_handlers[])(st static const int kvm_vmx_max_exit_handlers = ARRAY_SIZE(kvm_vmx_exit_handlers); +/* + * Return 1 if we should exit from L2 to L1 to handle an MSR access access, + * rather than handle it ourselves in L0. I.e., check L1's MSR bitmap whether + * it expressed interest in the current event (read or write a specific MSR). + */ +static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu, + struct vmcs_fields *vmcs12, u32 exit_reason) +{ + u32 msr_index = vcpu->arch.regs[VCPU_REGS_RCX]; + struct page *msr_bitmap_page; + void *va; + bool ret; + + if (!cpu_has_vmx_msr_bitmap() || !nested_cpu_has_vmx_msr_bitmap(vcpu)) + return 1; + + msr_bitmap_page = nested_get_page(vcpu, vmcs12->msr_bitmap); + if (!msr_bitmap_page) { + printk(KERN_INFO "%s error in nested_get_page\n", __func__); + return 0; + } + + va = kmap_atomic(msr_bitmap_page, KM_USER1); + if (exit_reason == EXIT_REASON_MSR_WRITE) + va += 0x800; + if (msr_index >= 0xc0000000) { + msr_index -= 0xc0000000; + va += 0x400; + } + if (msr_index > 0x1fff) + return 0; + ret = test_bit(msr_index, va); + kunmap_atomic(va, KM_USER1); + return ret; +} + +/* + * Return 1 if we should exit from L2 to L1 to handle a CR access exit, + * rather than handle it ourselves in L0. I.e., check if L1 wanted to + * intercept (via guest_host_mask etc.) the current event. + */ +static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu, + struct vmcs_fields *vmcs12) +{ + unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); + int cr = exit_qualification & 15; + int reg = (exit_qualification >> 8) & 15; + unsigned long val = kvm_register_read(vcpu, reg); + + switch ((exit_qualification >> 4) & 3) { + case 0: /* mov to cr */ + switch (cr) { + case 0: + if (vmcs12->cr0_guest_host_mask & + (val ^ vmcs12->cr0_read_shadow)) + return 1; + break; + case 3: + if ((vmcs12->cr3_target_count >= 1 && + vmcs12->cr3_target_value0 == val) || + (vmcs12->cr3_target_count >= 2 && + vmcs12->cr3_target_value1 == val) || + (vmcs12->cr3_target_count >= 3 && + vmcs12->cr3_target_value2 == val) || + (vmcs12->cr3_target_count >= 4 && + vmcs12->cr3_target_value3 == val)) + return 0; + if (nested_cpu_has_secondary_exec_ctrls(vcpu) && + (vmcs12->cpu_based_vm_exec_control & + CPU_BASED_CR3_LOAD_EXITING)){ + return 1; + } + break; + case 4: + if (vmcs12->cr4_guest_host_mask & + (vmcs12->cr4_read_shadow ^ val)) + return 1; + break; + case 8: + if (nested_cpu_has_secondary_exec_ctrls(vcpu) && + (vmcs12->cpu_based_vm_exec_control & + CPU_BASED_CR8_LOAD_EXITING)) + return 1; + /* + * TODO: missing else if control & CPU_BASED_TPR_SHADOW + * then set tpr shadow and if below tpr_threshold, exit. + */ + break; + } + break; + case 2: /* clts */ + if (vmcs12->cr0_guest_host_mask & X86_CR0_TS) + return 1; + break; + case 1: /* mov from cr */ + switch (cr) { + case 0: + return 1; + case 3: + if (vmcs12->cpu_based_vm_exec_control & + CPU_BASED_CR3_STORE_EXITING) + return 1; + break; + case 4: + return 1; + break; + case 8: + if (vmcs12->cpu_based_vm_exec_control & + CPU_BASED_CR8_STORE_EXITING) + return 1; + break; + } + break; + case 3: /* lmsw */ + /* + * lmsw can change bits 1..3 of cr0, and only set bit 0 of + * cr0. Other attempted changes are ignored, with no exit. + */ + if (vmcs12->cr0_guest_host_mask & 0xe & + (val ^ vmcs12->cr0_read_shadow)) + return 1; + if ((vmcs12->cr0_guest_host_mask & 0x1) && + !(vmcs12->cr0_read_shadow & 0x1) && + (val & 0x1)) + return 1; + break; + } + return 0; +} + +/* + * Return 1 if we should exit from L2 to L1 to handle an exit, or 0 if we + * should handle it ourselves in L0 (and then continue L2). Only call this + * when in is_guest_mode (L2). + */ +static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu) +{ + u32 exit_reason = vmcs_read32(VM_EXIT_REASON); + u32 intr_info = vmcs_read32(VM_EXIT_INTR_INFO); + struct vcpu_vmx *vmx = to_vmx(vcpu); + struct vmcs_fields *vmcs12 = get_vmcs12_fields(vcpu); + + if (vmx->nested.nested_run_pending) + return 0; + + if (unlikely(vmx->fail)) { + printk(KERN_INFO "%s failed vm entry %x\n", + __func__, vmcs_read32(VM_INSTRUCTION_ERROR)); + return 1; + } + + switch (exit_reason) { + case EXIT_REASON_EXTERNAL_INTERRUPT: + return 0; + case EXIT_REASON_EXCEPTION_NMI: + if (!is_exception(intr_info)) + return 0; + else if (is_page_fault(intr_info)) + return enable_ept; + return vmcs12->exception_bitmap & + (1u << (intr_info & INTR_INFO_VECTOR_MASK)); + case EXIT_REASON_EPT_VIOLATION: + return 0; + case EXIT_REASON_INVLPG: + return vmcs12->cpu_based_vm_exec_control & + CPU_BASED_INVLPG_EXITING; + case EXIT_REASON_MSR_READ: + case EXIT_REASON_MSR_WRITE: + return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason); + case EXIT_REASON_CR_ACCESS: + return nested_vmx_exit_handled_cr(vcpu, vmcs12); + case EXIT_REASON_DR_ACCESS: + return vmcs12->cpu_based_vm_exec_control & + CPU_BASED_MOV_DR_EXITING; + default: + /* + * One particularly interesting case that is covered here is an + * exit caused by L2 running a VMX instruction. L2 is guest + * mode in L1's world, and according to the VMX spec running a + * VMX instruction in guest mode should cause an exit to root + * mode, i.e., to L1. This is why we need to return r=1 for + * those exit reasons too. This enables further nesting: Like + * L0 emulates VMX for L1, we now allow L1 to emulate VMX for + * L2, who will then be able to run L3. + */ + return 1; + } +} + static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2) { *info1 = vmcs_readl(EXIT_QUALIFICATION); @@ -4921,6 +5126,17 @@ static int vmx_handle_exit(struct kvm_vc if (enable_ept && is_paging(vcpu)) vcpu->arch.cr3 = vmcs_readl(GUEST_CR3); + if (exit_reason == EXIT_REASON_VMLAUNCH || + exit_reason == EXIT_REASON_VMRESUME) + vmx->nested.nested_run_pending = 1; + else + vmx->nested.nested_run_pending = 0; + + if (is_guest_mode(vcpu) && nested_vmx_exit_handled(vcpu)) { + nested_vmx_vmexit(vcpu, false); + return 1; + } + if (exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) { vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY; vcpu->run->fail_entry.hardware_entry_failure_reason @@ -5981,6 +6197,7 @@ static int nested_vmx_run(struct kvm_vcp kvm_mmu_reset_context(vcpu); if (unlikely(kvm_mmu_load(vcpu))) { + nested_vmx_vmexit(vcpu, false); nested_vmx_failValid(vcpu, VMXERR_VMRESUME_CORRUPTED_VMCS /* ? */); /* switch back to L1 */