From patchwork Fri Aug 21 04:50:10 2015 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Xiao Guangrong X-Patchwork-Id: 7048041 Return-Path: X-Original-To: patchwork-kvm@patchwork.kernel.org Delivered-To: patchwork-parsemail@patchwork2.web.kernel.org Received: from mail.kernel.org (mail.kernel.org [198.145.29.136]) by patchwork2.web.kernel.org (Postfix) with ESMTP id 72DFCC05AC for ; Fri, 21 Aug 2015 04:58:45 +0000 (UTC) Received: from mail.kernel.org (localhost [127.0.0.1]) by mail.kernel.org (Postfix) with ESMTP id 879D8205DF for ; Fri, 21 Aug 2015 04:58:44 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id 8B47C205DD for ; Fri, 21 Aug 2015 04:58:43 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1753149AbbHUE4q (ORCPT ); Fri, 21 Aug 2015 00:56:46 -0400 Received: from mga03.intel.com ([134.134.136.65]:7286 "EHLO mga03.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1752695AbbHUE4Q (ORCPT ); Fri, 21 Aug 2015 00:56:16 -0400 Received: from orsmga002.jf.intel.com ([10.7.209.21]) by orsmga103.jf.intel.com with ESMTP; 20 Aug 2015 21:56:16 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.15,719,1432623600"; d="scan'208";a="788212223" Received: from xiao.sh.intel.com ([10.239.159.86]) by orsmga002.jf.intel.com with ESMTP; 20 Aug 2015 21:56:15 -0700 From: Xiao Guangrong To: pbonzini@redhat.com Cc: gleb@kernel.org, mtosatti@redhat.com, kvm@vger.kernel.org, linux-kernel@vger.kernel.org, Xiao Guangrong Subject: [PATCH 8/9] KVM: VMX: introduce set_clear_2nd_exec_ctrl() Date: Fri, 21 Aug 2015 12:50:10 +0800 Message-Id: <1440132611-26052-9-git-send-email-guangrong.xiao@linux.intel.com> X-Mailer: git-send-email 2.4.3 In-Reply-To: <1440132611-26052-1-git-send-email-guangrong.xiao@linux.intel.com> References: <1440132611-26052-1-git-send-email-guangrong.xiao@linux.intel.com> Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org X-Spam-Status: No, score=-7.7 required=5.0 tests=BAYES_00, RCVD_IN_DNSWL_HI, RP_MATCHES_RCVD, UNPARSEABLE_RELAY autolearn=unavailable version=3.3.1 X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on mail.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP It's used to clean up the code Signed-off-by: Xiao Guangrong --- arch/x86/kvm/vmx.c | 42 +++++++++++++++++++----------------------- 1 file changed, 19 insertions(+), 23 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 4f238b7..58f7b89 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -4589,6 +4589,18 @@ static void ept_set_mmio_spte_mask(void) kvm_mmu_set_mmio_spte_mask((0x3ull << 62) | 0x6ull); } +static void set_clear_2nd_exec_ctrl(u32 ctrls, bool set) +{ + u32 exec_ctrl = vmcs_read32(SECONDARY_VM_EXEC_CONTROL); + + if (set) + exec_ctrl |= ctrls; + else + exec_ctrl &= ~ctrls; + + vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_ctrl); +} + #define VMX_XSS_EXIT_BITMAP 0 /* * Sets up the vmcs for emulated real mode. @@ -6632,7 +6644,6 @@ static int nested_vmx_check_permission(struct kvm_vcpu *vcpu) static inline void nested_release_vmcs12(struct vcpu_vmx *vmx) { - u32 exec_control; if (vmx->nested.current_vmptr == -1ull) return; @@ -6645,9 +6656,7 @@ static inline void nested_release_vmcs12(struct vcpu_vmx *vmx) they were modified */ copy_shadow_to_vmcs12(vmx); vmx->nested.sync_shadow_vmcs = false; - exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL); - exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS; - vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control); + set_clear_2nd_exec_ctrl(SECONDARY_EXEC_SHADOW_VMCS, false); vmcs_write64(VMCS_LINK_POINTER, -1ull); } vmx->nested.posted_intr_nv = -1; @@ -7043,7 +7052,6 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); gpa_t vmptr; - u32 exec_control; if (!nested_vmx_check_permission(vcpu)) return 1; @@ -7075,9 +7083,8 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu) vmx->nested.current_vmcs12 = new_vmcs12; vmx->nested.current_vmcs12_page = page; if (enable_shadow_vmcs) { - exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL); - exec_control |= SECONDARY_EXEC_SHADOW_VMCS; - vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control); + set_clear_2nd_exec_ctrl(SECONDARY_EXEC_SHADOW_VMCS, + true); vmcs_write64(VMCS_LINK_POINTER, __pa(vmx->nested.current_shadow_vmcs)); vmx->nested.sync_shadow_vmcs = true; @@ -7587,7 +7594,6 @@ static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2) static int vmx_enable_pml(struct vcpu_vmx *vmx) { struct page *pml_pg; - u32 exec_control; pml_pg = alloc_page(GFP_KERNEL | __GFP_ZERO); if (!pml_pg) @@ -7598,24 +7604,18 @@ static int vmx_enable_pml(struct vcpu_vmx *vmx) vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg)); vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1); - exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL); - exec_control |= SECONDARY_EXEC_ENABLE_PML; - vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control); + set_clear_2nd_exec_ctrl(SECONDARY_EXEC_ENABLE_PML, true); return 0; } static void vmx_disable_pml(struct vcpu_vmx *vmx) { - u32 exec_control; - ASSERT(vmx->pml_pg); __free_page(vmx->pml_pg); vmx->pml_pg = NULL; - exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL); - exec_control &= ~SECONDARY_EXEC_ENABLE_PML; - vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control); + set_clear_2nd_exec_ctrl(SECONDARY_EXEC_ENABLE_PML, false); } static void vmx_flush_pml_buffer(struct kvm_vcpu *vcpu) @@ -8689,12 +8689,8 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu) best->ebx &= ~bit(X86_FEATURE_INVPCID); } - if (clear_exe_ctrl) { - u32 exec_ctl = vmcs_read32(SECONDARY_VM_EXEC_CONTROL); - - exec_ctl &= ~clear_exe_ctrl; - vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_ctl); - } + if (clear_exe_ctrl) + set_clear_2nd_exec_ctrl(clear_exe_ctrl, false); if (!guest_cpuid_has_pcommit(vcpu) && nested) vmx->nested.nested_vmx_secondary_ctls_high &=