From patchwork Wed Nov 21 09:04:37 2012 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Xu, Dongxiao" X-Patchwork-Id: 1779001 Return-Path: X-Original-To: patchwork-kvm@patchwork.kernel.org Delivered-To: patchwork-process-083081@patchwork1.kernel.org Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by patchwork1.kernel.org (Postfix) with ESMTP id EBF903FC5A for ; Wed, 21 Nov 2012 09:14:08 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1753382Ab2KUJOD (ORCPT ); Wed, 21 Nov 2012 04:14:03 -0500 Received: from mga09.intel.com ([134.134.136.24]:37448 "EHLO mga09.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1751559Ab2KUJN4 (ORCPT ); Wed, 21 Nov 2012 04:13:56 -0500 Received: from orsmga001.jf.intel.com ([10.7.209.18]) by orsmga102.jf.intel.com with ESMTP; 21 Nov 2012 01:13:16 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="4.83,291,1352102400"; d="scan'208";a="222552151" Received: from unknown (HELO localhost) ([10.239.36.11]) by orsmga001.jf.intel.com with ESMTP; 21 Nov 2012 01:13:54 -0800 From: Dongxiao Xu To: kvm@vger.kernel.org Cc: mtosatti@redhat.com, gleb@redhat.com Subject: [PATCH 4/4] nested vmx: use a list to store the launched vmcs12 for L1 VMM Date: Wed, 21 Nov 2012 17:04:37 +0800 Message-Id: <1353488677-29937-5-git-send-email-dongxiao.xu@intel.com> X-Mailer: git-send-email 1.7.1 In-Reply-To: <1353488677-29937-1-git-send-email-dongxiao.xu@intel.com> References: <1353488677-29937-1-git-send-email-dongxiao.xu@intel.com> Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org The launch state is not a member in the VMCS area, use a separate variable (list) to store it instead. Signed-off-by: Dongxiao Xu --- arch/x86/kvm/vmx.c | 86 +++++++++++++++++++++++++++++++++++++++++++++++++--- 1 files changed, 81 insertions(+), 5 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 6687fb6..d03ab4e 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -177,8 +177,7 @@ struct __packed vmcs12 { u32 revision_id; u32 abort; - u32 launch_state; /* set to 0 by VMCLEAR, to 1 by VMLAUNCH */ - u32 padding[7]; /* room for future expansion */ + u32 padding[8]; /* room for future expansion */ u64 io_bitmap_a; u64 io_bitmap_b; @@ -339,6 +338,11 @@ struct vmcs02_list { struct loaded_vmcs vmcs02; }; +struct vmcs12_list { + unsigned long vmcs12_pa; + struct list_head node; +}; + /* * The nested_vmx structure is part of vcpu_vmx, and holds information we need * for correct emulation of VMX (i.e., nested VMX) on this vcpu. @@ -364,6 +368,8 @@ struct nested_vmx { * we must keep them pinned while L2 runs. */ struct page *apic_access_page; + /* vmcs12_pool contains the launched vmcs12. */ + struct list_head vmcs12_pool; }; struct vcpu_vmx { @@ -614,6 +620,58 @@ static void nested_release_page_clean(struct page *page) kvm_release_page_clean(page); } +static int vmcs12_launched(struct list_head *vmcs12_pool, + unsigned long vmcs12_pa) +{ + struct vmcs12_list *iter; + struct list_head *pos; + int launched = 0; + + list_for_each(pos, vmcs12_pool) { + iter = list_entry(pos, struct vmcs12_list, node); + if (vmcs12_pa == iter->vmcs12_pa) { + launched = 1; + break; + } + } + + return launched; +} + +static int set_vmcs12_launched(struct list_head *vmcs12_pool, + unsigned long vmcs12_pa) +{ + struct vmcs12_list *vmcs12; + + if (vmcs12_launched(vmcs12_pool, vmcs12_pa)) + return 0; + + vmcs12 = kzalloc(sizeof(struct vmcs12_list), GFP_KERNEL); + if (!vmcs12) + return -ENOMEM; + + vmcs12->vmcs12_pa = vmcs12_pa; + list_add(&vmcs12->node, vmcs12_pool); + + return 0; +} + +static void clear_vmcs12_launched(struct list_head *vmcs12_pool, + unsigned long vmcs12_pa) +{ + struct vmcs12_list *iter; + struct list_head *pos; + + list_for_each(pos, vmcs12_pool) { + iter = list_entry(pos, struct vmcs12_list, node); + if (vmcs12_pa == iter->vmcs12_pa) { + list_del(&iter->node); + kfree(iter); + break; + } + } +} + static u64 construct_eptp(unsigned long root_hpa); static void kvm_cpu_vmxon(u64 addr); static void kvm_cpu_vmxoff(void); @@ -5111,6 +5169,18 @@ static void nested_free_all_saved_vmcss(struct vcpu_vmx *vmx) } /* + * Free the vmcs12 list. + */ +static void nested_free_vmcs12_list(struct vcpu_vmx *vmx) +{ + struct vmcs12_list *item, *n; + list_for_each_entry_safe(item, n, &vmx->nested.vmcs12_pool, node) { + list_del(&item->node); + kfree(item); + } +} + +/* * Emulate the VMXON instruction. * Currently, we just remember that VMX is active, and do not save or even * inspect the argument to VMXON (the so-called "VMXON pointer") because we @@ -5207,6 +5277,7 @@ static void free_nested(struct vcpu_vmx *vmx) } nested_free_all_saved_vmcss(vmx); + nested_free_vmcs12_list(vmx); } /* Emulate the VMXOFF instruction */ @@ -5359,7 +5430,7 @@ static int handle_vmclear(struct kvm_vcpu *vcpu) return 1; } vmcs12 = kmap(page); - vmcs12->launch_state = 0; + clear_vmcs12_launched(&vmx->nested.vmcs12_pool, __pa(vmcs12)); kunmap(page); nested_release_page(page); @@ -6467,6 +6538,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) vmx->nested.current_vmptr = -1ull; vmx->nested.current_vmcs12 = NULL; + INIT_LIST_HEAD(&vmx->nested.vmcs12_pool); return &vmx->vcpu; @@ -6846,6 +6918,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) struct vcpu_vmx *vmx = to_vmx(vcpu); int cpu; struct loaded_vmcs *vmcs02; + int is_launched; if (!nested_vmx_check_permission(vcpu) || !nested_vmx_check_vmcs12(vcpu)) @@ -6864,7 +6937,9 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) * for misconfigurations which will anyway be caught by the processor * when using the merged vmcs02. */ - if (vmcs12->launch_state == launch) { + is_launched = + vmcs12_launched(&vmx->nested.vmcs12_pool, __pa(vmcs12)); + if (is_launched == launch) { nested_vmx_failValid(vcpu, launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS : VMXERR_VMRESUME_NONLAUNCHED_VMCS); @@ -6953,7 +7028,8 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) vcpu->cpu = cpu; put_cpu(); - vmcs12->launch_state = 1; + if (set_vmcs12_launched(&vmx->nested.vmcs12_pool, __pa(vmcs12)) < 0) + return -ENOMEM; prepare_vmcs02(vcpu);