@@ -219,22 +219,22 @@ static void core2_vpmu_set_msr_bitmap(struct vcpu *v)
/* Allow Read/Write PMU Counters MSR Directly. */
for ( i = 0; i < fixed_pmc_cnt; i++ )
- vmx_clear_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR0 + i, VMX_MSR_RW);
+ vmx_clear_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR0 + i, MSR_RW);
for ( i = 0; i < arch_pmc_cnt; i++ )
{
- vmx_clear_msr_intercept(v, MSR_IA32_PERFCTR0 + i, VMX_MSR_RW);
+ vmx_clear_msr_intercept(v, MSR_IA32_PERFCTR0 + i, MSR_RW);
if ( full_width_write )
- vmx_clear_msr_intercept(v, MSR_IA32_A_PERFCTR0 + i, VMX_MSR_RW);
+ vmx_clear_msr_intercept(v, MSR_IA32_A_PERFCTR0 + i, MSR_RW);
}
/* Allow Read PMU Non-global Controls Directly. */
for ( i = 0; i < arch_pmc_cnt; i++ )
- vmx_clear_msr_intercept(v, MSR_P6_EVNTSEL(i), VMX_MSR_R);
+ vmx_clear_msr_intercept(v, MSR_P6_EVNTSEL(i), MSR_R);
- vmx_clear_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR_CTRL, VMX_MSR_R);
- vmx_clear_msr_intercept(v, MSR_IA32_DS_AREA, VMX_MSR_R);
+ vmx_clear_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_R);
+ vmx_clear_msr_intercept(v, MSR_IA32_DS_AREA, MSR_R);
}
static void core2_vpmu_unset_msr_bitmap(struct vcpu *v)
@@ -242,21 +242,21 @@ static void core2_vpmu_unset_msr_bitmap(struct vcpu *v)
unsigned int i;
for ( i = 0; i < fixed_pmc_cnt; i++ )
- vmx_set_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR0 + i, VMX_MSR_RW);
+ vmx_set_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR0 + i, MSR_RW);
for ( i = 0; i < arch_pmc_cnt; i++ )
{
- vmx_set_msr_intercept(v, MSR_IA32_PERFCTR0 + i, VMX_MSR_RW);
+ vmx_set_msr_intercept(v, MSR_IA32_PERFCTR0 + i, MSR_RW);
if ( full_width_write )
- vmx_set_msr_intercept(v, MSR_IA32_A_PERFCTR0 + i, VMX_MSR_RW);
+ vmx_set_msr_intercept(v, MSR_IA32_A_PERFCTR0 + i, MSR_RW);
}
for ( i = 0; i < arch_pmc_cnt; i++ )
- vmx_set_msr_intercept(v, MSR_P6_EVNTSEL(i), VMX_MSR_R);
+ vmx_set_msr_intercept(v, MSR_P6_EVNTSEL(i), MSR_R);
- vmx_set_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR_CTRL, VMX_MSR_R);
- vmx_set_msr_intercept(v, MSR_IA32_DS_AREA, VMX_MSR_R);
+ vmx_set_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_R);
+ vmx_set_msr_intercept(v, MSR_IA32_DS_AREA, MSR_R);
}
static inline void __core2_vpmu_save(struct vcpu *v)
@@ -892,7 +892,7 @@ static void vmx_set_host_env(struct vcpu *v)
}
void vmx_clear_msr_intercept(struct vcpu *v, unsigned int msr,
- enum vmx_msr_intercept_type type)
+ unsigned int type)
{
struct vmx_msr_bitmap *msr_bitmap = v->arch.hvm.vmx.msr_bitmap;
struct domain *d = v->domain;
@@ -906,17 +906,17 @@ void vmx_clear_msr_intercept(struct vcpu *v, unsigned int msr,
if ( msr <= 0x1fff )
{
- if ( type & VMX_MSR_R )
+ if ( type & MSR_R )
clear_bit(msr, msr_bitmap->read_low);
- if ( type & VMX_MSR_W )
+ if ( type & MSR_W )
clear_bit(msr, msr_bitmap->write_low);
}
else if ( (msr >= 0xc0000000) && (msr <= 0xc0001fff) )
{
msr &= 0x1fff;
- if ( type & VMX_MSR_R )
+ if ( type & MSR_R )
clear_bit(msr, msr_bitmap->read_high);
- if ( type & VMX_MSR_W )
+ if ( type & MSR_W )
clear_bit(msr, msr_bitmap->write_high);
}
else
@@ -924,7 +924,7 @@ void vmx_clear_msr_intercept(struct vcpu *v, unsigned int msr,
}
void vmx_set_msr_intercept(struct vcpu *v, unsigned int msr,
- enum vmx_msr_intercept_type type)
+ unsigned int type)
{
struct vmx_msr_bitmap *msr_bitmap = v->arch.hvm.vmx.msr_bitmap;
@@ -934,17 +934,17 @@ void vmx_set_msr_intercept(struct vcpu *v, unsigned int msr,
if ( msr <= 0x1fff )
{
- if ( type & VMX_MSR_R )
+ if ( type & MSR_R )
set_bit(msr, msr_bitmap->read_low);
- if ( type & VMX_MSR_W )
+ if ( type & MSR_W )
set_bit(msr, msr_bitmap->write_low);
}
else if ( (msr >= 0xc0000000) && (msr <= 0xc0001fff) )
{
msr &= 0x1fff;
- if ( type & VMX_MSR_R )
+ if ( type & MSR_R )
set_bit(msr, msr_bitmap->read_high);
- if ( type & VMX_MSR_W )
+ if ( type & MSR_W )
set_bit(msr, msr_bitmap->write_high);
}
else
@@ -1151,17 +1151,17 @@ static int construct_vmcs(struct vcpu *v)
v->arch.hvm.vmx.msr_bitmap = msr_bitmap;
__vmwrite(MSR_BITMAP, virt_to_maddr(msr_bitmap));
- vmx_clear_msr_intercept(v, MSR_FS_BASE, VMX_MSR_RW);
- vmx_clear_msr_intercept(v, MSR_GS_BASE, VMX_MSR_RW);
- vmx_clear_msr_intercept(v, MSR_SHADOW_GS_BASE, VMX_MSR_RW);
- vmx_clear_msr_intercept(v, MSR_IA32_SYSENTER_CS, VMX_MSR_RW);
- vmx_clear_msr_intercept(v, MSR_IA32_SYSENTER_ESP, VMX_MSR_RW);
- vmx_clear_msr_intercept(v, MSR_IA32_SYSENTER_EIP, VMX_MSR_RW);
+ vmx_clear_msr_intercept(v, MSR_FS_BASE, MSR_RW);
+ vmx_clear_msr_intercept(v, MSR_GS_BASE, MSR_RW);
+ vmx_clear_msr_intercept(v, MSR_SHADOW_GS_BASE, MSR_RW);
+ vmx_clear_msr_intercept(v, MSR_IA32_SYSENTER_CS, MSR_RW);
+ vmx_clear_msr_intercept(v, MSR_IA32_SYSENTER_ESP, MSR_RW);
+ vmx_clear_msr_intercept(v, MSR_IA32_SYSENTER_EIP, MSR_RW);
if ( paging_mode_hap(d) && (!is_iommu_enabled(d) || iommu_snoop) )
- vmx_clear_msr_intercept(v, MSR_IA32_CR_PAT, VMX_MSR_RW);
+ vmx_clear_msr_intercept(v, MSR_IA32_CR_PAT, MSR_RW);
if ( (vmexit_ctl & VM_EXIT_CLEAR_BNDCFGS) &&
(vmentry_ctl & VM_ENTRY_LOAD_BNDCFGS) )
- vmx_clear_msr_intercept(v, MSR_IA32_BNDCFGS, VMX_MSR_RW);
+ vmx_clear_msr_intercept(v, MSR_IA32_BNDCFGS, MSR_RW);
}
/* I/O access bitmap. */
@@ -791,7 +791,7 @@ static void cf_check vmx_cpuid_policy_changed(struct vcpu *v)
*/
if ( cp->feat.ibrsb )
{
- vmx_clear_msr_intercept(v, MSR_SPEC_CTRL, VMX_MSR_RW);
+ vmx_clear_msr_intercept(v, MSR_SPEC_CTRL, MSR_RW);
rc = vmx_add_guest_msr(v, MSR_SPEC_CTRL, 0);
if ( rc )
@@ -799,7 +799,7 @@ static void cf_check vmx_cpuid_policy_changed(struct vcpu *v)
}
else
{
- vmx_set_msr_intercept(v, MSR_SPEC_CTRL, VMX_MSR_RW);
+ vmx_set_msr_intercept(v, MSR_SPEC_CTRL, MSR_RW);
rc = vmx_del_msr(v, MSR_SPEC_CTRL, VMX_MSR_GUEST);
if ( rc && rc != -ESRCH )
@@ -809,20 +809,20 @@ static void cf_check vmx_cpuid_policy_changed(struct vcpu *v)
/* MSR_PRED_CMD is safe to pass through if the guest knows about it. */
if ( cp->feat.ibrsb || cp->extd.ibpb )
- vmx_clear_msr_intercept(v, MSR_PRED_CMD, VMX_MSR_RW);
+ vmx_clear_msr_intercept(v, MSR_PRED_CMD, MSR_RW);
else
- vmx_set_msr_intercept(v, MSR_PRED_CMD, VMX_MSR_RW);
+ vmx_set_msr_intercept(v, MSR_PRED_CMD, MSR_RW);
/* MSR_FLUSH_CMD is safe to pass through if the guest knows about it. */
if ( cp->feat.l1d_flush )
- vmx_clear_msr_intercept(v, MSR_FLUSH_CMD, VMX_MSR_RW);
+ vmx_clear_msr_intercept(v, MSR_FLUSH_CMD, MSR_RW);
else
- vmx_set_msr_intercept(v, MSR_FLUSH_CMD, VMX_MSR_RW);
+ vmx_set_msr_intercept(v, MSR_FLUSH_CMD, MSR_RW);
if ( cp->feat.pks )
- vmx_clear_msr_intercept(v, MSR_PKRS, VMX_MSR_RW);
+ vmx_clear_msr_intercept(v, MSR_PKRS, MSR_RW);
else
- vmx_set_msr_intercept(v, MSR_PKRS, VMX_MSR_RW);
+ vmx_set_msr_intercept(v, MSR_PKRS, MSR_RW);
out:
vmx_vmcs_exit(v);
@@ -1418,7 +1418,7 @@ static void cf_check vmx_handle_cd(struct vcpu *v, unsigned long value)
vmx_get_guest_pat(v, pat);
vmx_set_guest_pat(v, uc_pat);
- vmx_set_msr_intercept(v, MSR_IA32_CR_PAT, VMX_MSR_RW);
+ vmx_set_msr_intercept(v, MSR_IA32_CR_PAT, MSR_RW);
wbinvd(); /* flush possibly polluted cache */
hvm_asid_flush_vcpu(v); /* invalidate memory type cached in TLB */
@@ -1429,7 +1429,7 @@ static void cf_check vmx_handle_cd(struct vcpu *v, unsigned long value)
v->arch.hvm.cache_mode = NORMAL_CACHE_MODE;
vmx_set_guest_pat(v, *pat);
if ( !is_iommu_enabled(v->domain) || iommu_snoop )
- vmx_clear_msr_intercept(v, MSR_IA32_CR_PAT, VMX_MSR_RW);
+ vmx_clear_msr_intercept(v, MSR_IA32_CR_PAT, MSR_RW);
hvm_asid_flush_vcpu(v); /* no need to flush cache */
}
}
@@ -1883,9 +1883,9 @@ static void cf_check vmx_update_guest_efer(struct vcpu *v)
* into hardware, clear the read intercept to avoid unnecessary VMExits.
*/
if ( guest_efer == v->arch.hvm.guest_efer )
- vmx_clear_msr_intercept(v, MSR_EFER, VMX_MSR_R);
+ vmx_clear_msr_intercept(v, MSR_EFER, MSR_R);
else
- vmx_set_msr_intercept(v, MSR_EFER, VMX_MSR_R);
+ vmx_set_msr_intercept(v, MSR_EFER, MSR_R);
}
static void nvmx_enqueue_n2_exceptions(struct vcpu *v,
@@ -2312,7 +2312,7 @@ static void cf_check vmx_enable_msr_interception(struct domain *d, uint32_t msr)
struct vcpu *v;
for_each_vcpu ( d, v )
- vmx_set_msr_intercept(v, msr, VMX_MSR_W);
+ vmx_set_msr_intercept(v, msr, MSR_W);
}
static void cf_check vmx_vcpu_update_eptp(struct vcpu *v)
@@ -3479,17 +3479,17 @@ void cf_check vmx_vlapic_msr_changed(struct vcpu *v)
{
for ( msr = MSR_X2APIC_FIRST;
msr <= MSR_X2APIC_LAST; msr++ )
- vmx_clear_msr_intercept(v, msr, VMX_MSR_R);
+ vmx_clear_msr_intercept(v, msr, MSR_R);
- vmx_set_msr_intercept(v, MSR_X2APIC_PPR, VMX_MSR_R);
- vmx_set_msr_intercept(v, MSR_X2APIC_TMICT, VMX_MSR_R);
- vmx_set_msr_intercept(v, MSR_X2APIC_TMCCT, VMX_MSR_R);
+ vmx_set_msr_intercept(v, MSR_X2APIC_PPR, MSR_R);
+ vmx_set_msr_intercept(v, MSR_X2APIC_TMICT, MSR_R);
+ vmx_set_msr_intercept(v, MSR_X2APIC_TMCCT, MSR_R);
}
if ( cpu_has_vmx_virtual_intr_delivery )
{
- vmx_clear_msr_intercept(v, MSR_X2APIC_TPR, VMX_MSR_W);
- vmx_clear_msr_intercept(v, MSR_X2APIC_EOI, VMX_MSR_W);
- vmx_clear_msr_intercept(v, MSR_X2APIC_SELF, VMX_MSR_W);
+ vmx_clear_msr_intercept(v, MSR_X2APIC_TPR, MSR_W);
+ vmx_clear_msr_intercept(v, MSR_X2APIC_EOI, MSR_W);
+ vmx_clear_msr_intercept(v, MSR_X2APIC_SELF, MSR_W);
}
}
else
@@ -3500,7 +3500,7 @@ void cf_check vmx_vlapic_msr_changed(struct vcpu *v)
SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE) )
for ( msr = MSR_X2APIC_FIRST;
msr <= MSR_X2APIC_LAST; msr++ )
- vmx_set_msr_intercept(v, msr, VMX_MSR_RW);
+ vmx_set_msr_intercept(v, msr, MSR_RW);
vmx_update_secondary_exec_control(v);
vmx_vmcs_exit(v);
@@ -3636,7 +3636,7 @@ static int cf_check vmx_msr_write_intercept(
return X86EMUL_OKAY;
}
- vmx_clear_msr_intercept(v, lbr->base + i, VMX_MSR_RW);
+ vmx_clear_msr_intercept(v, lbr->base + i, MSR_RW);
}
}
@@ -633,18 +633,10 @@ static inline int vmx_write_guest_msr(struct vcpu *v, uint32_t msr,
return 0;
}
-
-/* MSR intercept bitmap infrastructure. */
-enum vmx_msr_intercept_type {
- VMX_MSR_R = 1,
- VMX_MSR_W = 2,
- VMX_MSR_RW = VMX_MSR_R | VMX_MSR_W,
-};
-
void vmx_clear_msr_intercept(struct vcpu *v, unsigned int msr,
- enum vmx_msr_intercept_type type);
+ unsigned int type);
void vmx_set_msr_intercept(struct vcpu *v, unsigned int msr,
- enum vmx_msr_intercept_type type);
+ unsigned int type);
void vmx_vmcs_switch(paddr_t from, paddr_t to);
void vmx_set_eoi_exit_bitmap(struct vcpu *v, u8 vector);
void vmx_clear_eoi_exit_bitmap(struct vcpu *v, u8 vector);
Replace enum vmx_msr_intercept_type with the msr access flags, defined in hvm.h, so that the functions {svm,vmx}_{set,clear}_msr_intercept() share the same prototype. No functional change intended. Signed-off-by: Xenia Ragiadakou <xenia.ragiadakou@amd.com> --- Changes in v2: - change from int to unsigned int the type of param type, reported by Jan xen/arch/x86/cpu/vpmu_intel.c | 24 +++++++------- xen/arch/x86/hvm/vmx/vmcs.c | 36 ++++++++++---------- xen/arch/x86/hvm/vmx/vmx.c | 44 ++++++++++++------------- xen/arch/x86/include/asm/hvm/vmx/vmcs.h | 12 ++----- 4 files changed, 54 insertions(+), 62 deletions(-)