@@ -5197,7 +5197,7 @@ int hvm_debug_op(struct vcpu *v, int32_t op)
{
case XEN_DOMCTL_DEBUG_OP_SINGLE_STEP_ON:
case XEN_DOMCTL_DEBUG_OP_SINGLE_STEP_OFF:
- if ( !cpu_has_monitor_trap_flag )
+ if ( !using_vmx || !cpu_has_monitor_trap_flag )
return -EOPNOTSUPP;
break;
default:
@@ -196,7 +196,7 @@ void cpuid_viridian_leaves(const struct vcpu *v, uint32_t leaf,
res->a = CPUID4A_RELAX_TIMER_INT;
if ( viridian_feature_mask(d) & HVMPV_hcall_remote_tlb_flush )
res->a |= CPUID4A_HCALL_REMOTE_TLB_FLUSH;
- if ( !cpu_has_vmx_apic_reg_virt )
+ if ( !using_vmx || !cpu_has_vmx_apic_reg_virt )
res->a |= CPUID4A_MSR_BASED_APIC;
if ( viridian_feature_mask(d) & HVMPV_hcall_ipi )
res->a |= CPUID4A_SYNTHETIC_CLUSTER_IPI;
@@ -236,7 +236,7 @@ void cpuid_viridian_leaves(const struct vcpu *v, uint32_t leaf,
case 6:
/* Detected and in use hardware features. */
- if ( cpu_has_vmx_virtualize_apic_accesses )
+ if ( using_vmx && cpu_has_vmx_virtualize_apic_accesses )
res->a |= CPUID6A_APIC_OVERLAY;
if ( cpu_has_vmx_msr_bitmap || (read_efer() & EFER_SVME) )
res->a |= CPUID6A_MSR_BITMAPS;
@@ -306,7 +306,7 @@ extern u64 vmx_ept_vpid_cap;
#define cpu_has_vmx_vnmi \
(vmx_pin_based_exec_control & PIN_BASED_VIRTUAL_NMIS)
#define cpu_has_vmx_msr_bitmap \
- (vmx_cpu_based_exec_control & CPU_BASED_ACTIVATE_MSR_BITMAP)
+ (using_vmx && vmx_cpu_based_exec_control & CPU_BASED_ACTIVATE_MSR_BITMAP)
#define cpu_has_vmx_secondary_exec_control \
(vmx_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)
#define cpu_has_vmx_tertiary_exec_control \
@@ -316,7 +316,7 @@ extern u64 vmx_ept_vpid_cap;
#define cpu_has_vmx_dt_exiting \
(vmx_secondary_exec_control & SECONDARY_EXEC_DESCRIPTOR_TABLE_EXITING)
#define cpu_has_vmx_rdtscp \
- (vmx_secondary_exec_control & SECONDARY_EXEC_ENABLE_RDTSCP)
+ (using_vmx && vmx_secondary_exec_control & SECONDARY_EXEC_ENABLE_RDTSCP)
#define cpu_has_vmx_vpid \
(vmx_secondary_exec_control & SECONDARY_EXEC_ENABLE_VPID)
#define cpu_has_monitor_trap_flag \
@@ -333,7 +333,7 @@ extern u64 vmx_ept_vpid_cap;
#define cpu_has_vmx_ple \
(vmx_secondary_exec_control & SECONDARY_EXEC_PAUSE_LOOP_EXITING)
#define cpu_has_vmx_invpcid \
- (vmx_secondary_exec_control & SECONDARY_EXEC_ENABLE_INVPCID)
+ (using_vmx && vmx_secondary_exec_control & SECONDARY_EXEC_ENABLE_INVPCID)
#define cpu_has_vmx_apic_reg_virt \
(vmx_secondary_exec_control & SECONDARY_EXEC_APIC_REGISTER_VIRT)
#define cpu_has_vmx_virtual_intr_delivery \
@@ -347,14 +347,14 @@ extern u64 vmx_ept_vpid_cap;
#define cpu_has_vmx_vmfunc \
(vmx_secondary_exec_control & SECONDARY_EXEC_ENABLE_VM_FUNCTIONS)
#define cpu_has_vmx_virt_exceptions \
- (vmx_secondary_exec_control & SECONDARY_EXEC_ENABLE_VIRT_EXCEPTIONS)
+ (using_vmx && vmx_secondary_exec_control & SECONDARY_EXEC_ENABLE_VIRT_EXCEPTIONS)
#define cpu_has_vmx_pml \
(vmx_secondary_exec_control & SECONDARY_EXEC_ENABLE_PML)
#define cpu_has_vmx_mpx \
- ((vmx_vmexit_control & VM_EXIT_CLEAR_BNDCFGS) && \
+ (using_vmx && (vmx_vmexit_control & VM_EXIT_CLEAR_BNDCFGS) && \
(vmx_vmentry_control & VM_ENTRY_LOAD_BNDCFGS))
#define cpu_has_vmx_xsaves \
- (vmx_secondary_exec_control & SECONDARY_EXEC_XSAVES)
+ (using_vmx && vmx_secondary_exec_control & SECONDARY_EXEC_XSAVES)
#define cpu_has_vmx_tsc_scaling \
(vmx_secondary_exec_control & SECONDARY_EXEC_TSC_SCALING)
#define cpu_has_vmx_bus_lock_detection \
@@ -1130,7 +1130,7 @@ void cpuid_hypervisor_leaves(const struct vcpu *v, uint32_t leaf,
if ( !is_hvm_domain(d) || subleaf != 0 )
break;
- if ( cpu_has_vmx_apic_reg_virt )
+ if ( using_vmx && cpu_has_vmx_apic_reg_virt )
res->a |= XEN_HVM_CPUID_APIC_ACCESS_VIRT;
/*
@@ -1139,7 +1139,8 @@ void cpuid_hypervisor_leaves(const struct vcpu *v, uint32_t leaf,
* and wrmsr in the guest will run without VMEXITs (see
* vmx_vlapic_msr_changed()).
*/
- if ( cpu_has_vmx_virtualize_x2apic_mode &&
+ if ( using_vmx &&
+ cpu_has_vmx_virtualize_x2apic_mode &&
cpu_has_vmx_apic_reg_virt &&
cpu_has_vmx_virtual_intr_delivery )
res->a |= XEN_HVM_CPUID_X2APIC_VIRT;
There're several places in common code, outside of arch/x86/hvm/vmx, where cpu_has_vmx_* get accessed without checking whether VMX supported first. These macros rely on global variables defined in vmx code, so when VMX support gets disabled accesses to these variables turn into build failures. To overcome these failures, build-time check is done before accessing global variables, so that DCE would remove these variables. Signed-off-by: Sergiy Kibrik <Sergiy_Kibrik@epam.com> CC: Andrew Cooper <andrew.cooper3@citrix.com> CC: Jan Beulich <jbeulich@suse.com> --- changes in v3: - using_vmx instead of cpu_has_vmx - clarify description on why this change needed changes in v2: - do not touch SVM code and macros - drop vmx_ctrl_has_feature() - guard cpu_has_vmx_* macros in common code instead changes in v1: - introduced helper routine vmx_ctrl_has_feature() and used it for all cpu_has_vmx_* macros --- xen/arch/x86/hvm/hvm.c | 2 +- xen/arch/x86/hvm/viridian/viridian.c | 4 ++-- xen/arch/x86/include/asm/hvm/vmx/vmcs.h | 12 ++++++------ xen/arch/x86/traps.c | 5 +++-- 4 files changed, 12 insertions(+), 11 deletions(-)