From patchwork Thu Nov 22 04:51:57 2012 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Xu, Dongxiao" X-Patchwork-Id: 1784161 Return-Path: X-Original-To: patchwork-kvm@patchwork.kernel.org Delivered-To: patchwork-process-083081@patchwork2.kernel.org Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by patchwork2.kernel.org (Postfix) with ESMTP id D96C9DF24C for ; Thu, 22 Nov 2012 18:27:39 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1751915Ab2KVS11 (ORCPT ); Thu, 22 Nov 2012 13:27:27 -0500 Received: from mga09.intel.com ([134.134.136.24]:21290 "EHLO mga09.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1751256Ab2KVS0z (ORCPT ); Thu, 22 Nov 2012 13:26:55 -0500 Received: from orsmga001.jf.intel.com ([10.7.209.18]) by orsmga102.jf.intel.com with ESMTP; 21 Nov 2012 21:00:33 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="4.83,298,1352102400"; d="scan'208";a="222930823" Received: from unknown (HELO localhost) ([10.239.36.11]) by orsmga001.jf.intel.com with ESMTP; 21 Nov 2012 21:01:13 -0800 From: Dongxiao Xu To: kvm@vger.kernel.org Cc: mtosatti@redhat.com, gleb@redhat.com Subject: [PATCH v2 2/4] nested vmx: clean up for nested_cpu_has_xxx functions Date: Thu, 22 Nov 2012 12:51:57 +0800 Message-Id: <1353559919-29439-3-git-send-email-dongxiao.xu@intel.com> X-Mailer: git-send-email 1.7.1 In-Reply-To: <1353559919-29439-1-git-send-email-dongxiao.xu@intel.com> References: <1353559919-29439-1-git-send-email-dongxiao.xu@intel.com> Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org This is a preparation for the later change, which use vmcs12_read() and vmcs12_write() to replace the way to access vmcs12 fields. Since the above functions uses 'vcpu' as parameter, we also use 'vcpu' as the parameter in nested_cpu_has_xxx functions. Signed-off-by: Dongxiao Xu --- arch/x86/kvm/vmx.c | 57 +++++++++++++++++++++++++-------------------------- 1 files changed, 28 insertions(+), 29 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 2f8344f..639cad0 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -889,22 +889,22 @@ static inline bool report_flexpriority(void) return flexpriority_enabled; } -static inline bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit) +static inline bool nested_cpu_has(struct kvm_vcpu *vcpu, u32 bit) { - return vmcs12->cpu_based_vm_exec_control & bit; + return get_vmcs12(vcpu)->cpu_based_vm_exec_control & bit; } -static inline bool nested_cpu_has2(struct vmcs12 *vmcs12, u32 bit) +static inline bool nested_cpu_has2(struct kvm_vcpu *vcpu, u32 bit) { - return (vmcs12->cpu_based_vm_exec_control & + return (get_vmcs12(vcpu)->cpu_based_vm_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) && - (vmcs12->secondary_vm_exec_control & bit); + (get_vmcs12(vcpu)->secondary_vm_exec_control & bit); } -static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12, - struct kvm_vcpu *vcpu) +static inline bool nested_cpu_has_virtual_nmis(struct kvm_vcpu *vcpu) { - return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS; + return get_vmcs12(vcpu)->pin_based_vm_exec_control & + PIN_BASED_VIRTUAL_NMIS; } static inline bool is_exception(u32 intr_info) @@ -1888,7 +1888,7 @@ static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) /* recalculate vmcs02.TSC_OFFSET: */ vmcs12 = get_vmcs12(vcpu); vmcs_write64(TSC_OFFSET, offset + - (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETING) ? + (nested_cpu_has(vcpu, CPU_BASED_USE_TSC_OFFSETING) ? vmcs12->tsc_offset : 0)); } else { vmcs_write64(TSC_OFFSET, offset); @@ -5712,7 +5712,7 @@ static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu, u32 msr_index = vcpu->arch.regs[VCPU_REGS_RCX]; gpa_t bitmap; - if (!nested_cpu_has(get_vmcs12(vcpu), CPU_BASED_USE_MSR_BITMAPS)) + if (!nested_cpu_has(vcpu, CPU_BASED_USE_MSR_BITMAPS)) return 1; /* @@ -5768,7 +5768,7 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu, (vmcs12->cr3_target_count >= 4 && vmcs12->cr3_target_value3 == val)) return 0; - if (nested_cpu_has(vmcs12, CPU_BASED_CR3_LOAD_EXITING)) + if (nested_cpu_has(vcpu, CPU_BASED_CR3_LOAD_EXITING)) return 1; break; case 4: @@ -5777,7 +5777,7 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu, return 1; break; case 8: - if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING)) + if (nested_cpu_has(vcpu, CPU_BASED_CR8_LOAD_EXITING)) return 1; break; } @@ -5865,15 +5865,15 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu) case EXIT_REASON_CPUID: return 1; case EXIT_REASON_HLT: - return nested_cpu_has(vmcs12, CPU_BASED_HLT_EXITING); + return nested_cpu_has(vcpu, CPU_BASED_HLT_EXITING); case EXIT_REASON_INVD: return 1; case EXIT_REASON_INVLPG: - return nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING); + return nested_cpu_has(vcpu, CPU_BASED_INVLPG_EXITING); case EXIT_REASON_RDPMC: - return nested_cpu_has(vmcs12, CPU_BASED_RDPMC_EXITING); + return nested_cpu_has(vcpu, CPU_BASED_RDPMC_EXITING); case EXIT_REASON_RDTSC: - return nested_cpu_has(vmcs12, CPU_BASED_RDTSC_EXITING); + return nested_cpu_has(vcpu, CPU_BASED_RDTSC_EXITING); case EXIT_REASON_VMCALL: case EXIT_REASON_VMCLEAR: case EXIT_REASON_VMLAUNCH: case EXIT_REASON_VMPTRLD: case EXIT_REASON_VMPTRST: case EXIT_REASON_VMREAD: @@ -5887,7 +5887,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu) case EXIT_REASON_CR_ACCESS: return nested_vmx_exit_handled_cr(vcpu, vmcs12); case EXIT_REASON_DR_ACCESS: - return nested_cpu_has(vmcs12, CPU_BASED_MOV_DR_EXITING); + return nested_cpu_has(vcpu, CPU_BASED_MOV_DR_EXITING); case EXIT_REASON_IO_INSTRUCTION: /* TODO: support IO bitmaps */ return 1; @@ -5897,25 +5897,26 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu) case EXIT_REASON_INVALID_STATE: return 1; case EXIT_REASON_MWAIT_INSTRUCTION: - return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING); + return nested_cpu_has(vcpu, CPU_BASED_MWAIT_EXITING); case EXIT_REASON_MONITOR_INSTRUCTION: - return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_EXITING); + return nested_cpu_has(vcpu, CPU_BASED_MONITOR_EXITING); case EXIT_REASON_PAUSE_INSTRUCTION: - return nested_cpu_has(vmcs12, CPU_BASED_PAUSE_EXITING) || - nested_cpu_has2(vmcs12, - SECONDARY_EXEC_PAUSE_LOOP_EXITING); + return nested_cpu_has(vcpu, CPU_BASED_PAUSE_EXITING) || + nested_cpu_has2(vcpu, + SECONDARY_EXEC_PAUSE_LOOP_EXITING); case EXIT_REASON_MCE_DURING_VMENTRY: return 0; case EXIT_REASON_TPR_BELOW_THRESHOLD: return 1; case EXIT_REASON_APIC_ACCESS: - return nested_cpu_has2(vmcs12, + return nested_cpu_has2(vcpu, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES); case EXIT_REASON_EPT_VIOLATION: case EXIT_REASON_EPT_MISCONFIG: return 0; case EXIT_REASON_WBINVD: - return nested_cpu_has2(vmcs12, SECONDARY_EXEC_WBINVD_EXITING); + return nested_cpu_has2(vcpu, + SECONDARY_EXEC_WBINVD_EXITING); case EXIT_REASON_XSETBV: return 1; default: @@ -5985,8 +5986,7 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu) __func__, vectoring_info, exit_reason); if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked && - !(is_guest_mode(vcpu) && nested_cpu_has_virtual_nmis( - get_vmcs12(vcpu), vcpu)))) { + !(is_guest_mode(vcpu) && nested_cpu_has_virtual_nmis(vcpu)))) { if (vmx_interrupt_allowed(vcpu)) { vmx->soft_vnmi_blocked = 0; } else if (vmx->vnmi_blocked_time > 1000000000LL && @@ -6679,8 +6679,7 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) exec_control &= ~SECONDARY_EXEC_RDTSCP; /* Take the following fields only from vmcs12 */ exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; - if (nested_cpu_has(vmcs12, - CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)) + if (nested_cpu_has(vcpu, CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)) exec_control |= vmcs12->secondary_vm_exec_control; if (exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) { @@ -6855,7 +6854,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) return 1; } - if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) && + if (nested_cpu_has2(vcpu, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) && !IS_ALIGNED(vmcs12->apic_access_addr, PAGE_SIZE)) { /*TODO: Also verify bits beyond physical address width are 0*/ nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);