From patchwork Thu Nov 22 04:51:59 2012 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Xu, Dongxiao" X-Patchwork-Id: 1784111 Return-Path: X-Original-To: patchwork-kvm@patchwork.kernel.org Delivered-To: patchwork-process-083081@patchwork1.kernel.org Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by patchwork1.kernel.org (Postfix) with ESMTP id B1B5B3FC23 for ; Thu, 22 Nov 2012 18:26:52 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1751275Ab2KVS0r (ORCPT ); Thu, 22 Nov 2012 13:26:47 -0500 Received: from mga11.intel.com ([192.55.52.93]:11004 "EHLO mga11.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1750750Ab2KVS0o (ORCPT ); Thu, 22 Nov 2012 13:26:44 -0500 Received: from fmsmga001.fm.intel.com ([10.253.24.23]) by fmsmga102.fm.intel.com with ESMTP; 21 Nov 2012 21:01:17 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="4.83,298,1352102400"; d="scan'208";a="250893016" Received: from unknown (HELO localhost) ([10.239.36.11]) by fmsmga001.fm.intel.com with ESMTP; 21 Nov 2012 21:01:16 -0800 From: Dongxiao Xu To: kvm@vger.kernel.org Cc: mtosatti@redhat.com, gleb@redhat.com Subject: [PATCH v2 4/4] nested vmx: use a list to store the launched vmcs12 for L1 VMM Date: Thu, 22 Nov 2012 12:51:59 +0800 Message-Id: <1353559919-29439-5-git-send-email-dongxiao.xu@intel.com> X-Mailer: git-send-email 1.7.1 In-Reply-To: <1353559919-29439-1-git-send-email-dongxiao.xu@intel.com> References: <1353559919-29439-1-git-send-email-dongxiao.xu@intel.com> Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org The launch state is not a member in the VMCS area, use a separate variable (list) to store it instead. Signed-off-by: Dongxiao Xu --- arch/x86/kvm/vmx.c | 86 +++++++++++++++++++++++++++++++++++++++++++++++++--- 1 files changed, 81 insertions(+), 5 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 20de88b..3be9265 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -177,8 +177,7 @@ struct __packed vmcs12 { u32 revision_id; u32 abort; - u32 launch_state; /* set to 0 by VMCLEAR, to 1 by VMLAUNCH */ - u32 padding[7]; /* room for future expansion */ + u32 padding[8]; /* room for future expansion */ u64 io_bitmap_a; u64 io_bitmap_b; @@ -339,6 +338,11 @@ struct vmcs02_list { struct loaded_vmcs vmcs02; }; +struct vmcs12_list { + unsigned long vmcs12_pa; + struct list_head node; +}; + /* * The nested_vmx structure is part of vcpu_vmx, and holds information we need * for correct emulation of VMX (i.e., nested VMX) on this vcpu. @@ -364,6 +368,8 @@ struct nested_vmx { * we must keep them pinned while L2 runs. */ struct page *apic_access_page; + /* vmcs12_pool contains the launched vmcs12. */ + struct list_head vmcs12_pool; }; struct vcpu_vmx { @@ -619,6 +625,58 @@ static void nested_release_page_clean(struct page *page) kvm_release_page_clean(page); } +static int vmcs12_launched(struct list_head *vmcs12_pool, + unsigned long vmcs12_pa) +{ + struct vmcs12_list *iter; + struct list_head *pos; + int launched = 0; + + list_for_each(pos, vmcs12_pool) { + iter = list_entry(pos, struct vmcs12_list, node); + if (vmcs12_pa == iter->vmcs12_pa) { + launched = 1; + break; + } + } + + return launched; +} + +static int set_vmcs12_launched(struct list_head *vmcs12_pool, + unsigned long vmcs12_pa) +{ + struct vmcs12_list *vmcs12; + + if (vmcs12_launched(vmcs12_pool, vmcs12_pa)) + return 0; + + vmcs12 = kzalloc(sizeof(struct vmcs12_list), GFP_KERNEL); + if (!vmcs12) + return -ENOMEM; + + vmcs12->vmcs12_pa = vmcs12_pa; + list_add(&vmcs12->node, vmcs12_pool); + + return 0; +} + +static void clear_vmcs12_launched(struct list_head *vmcs12_pool, + unsigned long vmcs12_pa) +{ + struct vmcs12_list *iter; + struct list_head *pos; + + list_for_each(pos, vmcs12_pool) { + iter = list_entry(pos, struct vmcs12_list, node); + if (vmcs12_pa == iter->vmcs12_pa) { + list_del(&iter->node); + kfree(iter); + break; + } + } +} + static u64 construct_eptp(unsigned long root_hpa); static void kvm_cpu_vmxon(u64 addr); static void kvm_cpu_vmxoff(void); @@ -5116,6 +5174,18 @@ static void nested_free_all_saved_vmcss(struct vcpu_vmx *vmx) } /* + * Free the vmcs12 list. + */ +static void nested_free_vmcs12_list(struct vcpu_vmx *vmx) +{ + struct vmcs12_list *item, *n; + list_for_each_entry_safe(item, n, &vmx->nested.vmcs12_pool, node) { + list_del(&item->node); + kfree(item); + } +} + +/* * Emulate the VMXON instruction. * Currently, we just remember that VMX is active, and do not save or even * inspect the argument to VMXON (the so-called "VMXON pointer") because we @@ -5212,6 +5282,7 @@ static void free_nested(struct vcpu_vmx *vmx) } nested_free_all_saved_vmcss(vmx); + nested_free_vmcs12_list(vmx); } /* Emulate the VMXOFF instruction */ @@ -5364,7 +5435,7 @@ static int handle_vmclear(struct kvm_vcpu *vcpu) return 1; } vmcs12 = kmap(page); - vmcs12->launch_state = 0; + clear_vmcs12_launched(&vmx->nested.vmcs12_pool, __pa(vmcs12)); kunmap(page); nested_release_page(page); @@ -6460,6 +6531,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) vmx->nested.current_vmptr = -1ull; vmx->nested.current_vmcs12 = NULL; + INIT_LIST_HEAD(&vmx->nested.vmcs12_pool); return &vmx->vcpu; @@ -6839,6 +6911,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) struct vcpu_vmx *vmx = to_vmx(vcpu); int cpu; struct loaded_vmcs *vmcs02; + int is_launched; if (!nested_vmx_check_permission(vcpu) || !nested_vmx_check_vmcs12(vcpu)) @@ -6857,7 +6930,9 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) * for misconfigurations which will anyway be caught by the processor * when using the merged vmcs02. */ - if (vmcs12->launch_state == launch) { + is_launched = + vmcs12_launched(&vmx->nested.vmcs12_pool, __pa(vmcs12)); + if (is_launched == launch) { nested_vmx_failValid(vcpu, launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS : VMXERR_VMRESUME_NONLAUNCHED_VMCS); @@ -6946,7 +7021,8 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) vcpu->cpu = cpu; put_cpu(); - vmcs12->launch_state = 1; + if (set_vmcs12_launched(&vmx->nested.vmcs12_pool, __pa(vmcs12)) < 0) + return -ENOMEM; prepare_vmcs02(vcpu);