Message ID | 20170217063936.13208-15-haozhong.zhang@intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
>>> On 17.02.17 at 07:39, <haozhong.zhang@intel.com> wrote: > --- a/xen/arch/x86/cpu/mcheck/mce_intel.c > +++ b/xen/arch/x86/cpu/mcheck/mce_intel.c > @@ -916,3 +916,7 @@ int vmce_intel_rdmsr(const struct vcpu *v, uint32_t msr, uint64_t *val) > return 1; > } > > +bool vmce_support_lmce(const struct vcpu *v) > +{ > + return !!(v->arch.vmce.mcg_cap & MCG_LMCE_P); No need for !! here. > @@ -2634,6 +2635,9 @@ static int is_last_branch_msr(u32 ecx) > > static int vmx_msr_read_intercept(unsigned int msr, uint64_t *msr_content) > { > + struct vcpu *v = current; curr please. > + struct domain *d = v->domain; No need for this local variable afaics. > @@ -2651,6 +2655,12 @@ static int vmx_msr_read_intercept(unsigned int msr, uint64_t *msr_content) > __vmread(GUEST_IA32_DEBUGCTL, msr_content); > break; > case MSR_IA32_FEATURE_CONTROL: > + *msr_content = IA32_FEATURE_CONTROL_LOCK; > + if ( vmce_support_lmce(v) ) > + *msr_content |= IA32_FEATURE_CONTROL_LMCE_ON; > + if ( nestedhvm_enabled(d) && d->arch.cpuid->basic.vmx ) Doesn't the right side false imply the left side to be false? Jan
On 02/22/17 08:20 -0700, Jan Beulich wrote: > >>> On 17.02.17 at 07:39, <haozhong.zhang@intel.com> wrote: > > --- a/xen/arch/x86/cpu/mcheck/mce_intel.c > > +++ b/xen/arch/x86/cpu/mcheck/mce_intel.c > > @@ -916,3 +916,7 @@ int vmce_intel_rdmsr(const struct vcpu *v, uint32_t msr, uint64_t *val) > > return 1; > > } > > > > +bool vmce_support_lmce(const struct vcpu *v) > > +{ > > + return !!(v->arch.vmce.mcg_cap & MCG_LMCE_P); > > No need for !! here. will remove > > > @@ -2634,6 +2635,9 @@ static int is_last_branch_msr(u32 ecx) > > > > static int vmx_msr_read_intercept(unsigned int msr, uint64_t *msr_content) > > { > > + struct vcpu *v = current; > > curr please. will change > > > + struct domain *d = v->domain; > > No need for this local variable afaics. will remove > > > @@ -2651,6 +2655,12 @@ static int vmx_msr_read_intercept(unsigned int msr, uint64_t *msr_content) > > __vmread(GUEST_IA32_DEBUGCTL, msr_content); > > break; > > case MSR_IA32_FEATURE_CONTROL: > > + *msr_content = IA32_FEATURE_CONTROL_LOCK; > > + if ( vmce_support_lmce(v) ) > > + *msr_content |= IA32_FEATURE_CONTROL_LMCE_ON; > > + if ( nestedhvm_enabled(d) && d->arch.cpuid->basic.vmx ) > > Doesn't the right side false imply the left side to be false? Yes, I'll remove the right side. Thanks, Haozhong
diff --git a/xen/arch/x86/cpu/mcheck/mce_intel.c b/xen/arch/x86/cpu/mcheck/mce_intel.c index b4cc41a..81507d3 100644 --- a/xen/arch/x86/cpu/mcheck/mce_intel.c +++ b/xen/arch/x86/cpu/mcheck/mce_intel.c @@ -916,3 +916,7 @@ int vmce_intel_rdmsr(const struct vcpu *v, uint32_t msr, uint64_t *val) return 1; } +bool vmce_support_lmce(const struct vcpu *v) +{ + return !!(v->arch.vmce.mcg_cap & MCG_LMCE_P); +} diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c index 42f4fbd..6947af0 100644 --- a/xen/arch/x86/hvm/vmx/vmx.c +++ b/xen/arch/x86/hvm/vmx/vmx.c @@ -56,6 +56,7 @@ #include <asm/hvm/nestedhvm.h> #include <asm/altp2m.h> #include <asm/event.h> +#include <asm/mce.h> #include <asm/monitor.h> #include <public/arch-x86/cpuid.h> @@ -2634,6 +2635,9 @@ static int is_last_branch_msr(u32 ecx) static int vmx_msr_read_intercept(unsigned int msr, uint64_t *msr_content) { + struct vcpu *v = current; + struct domain *d = v->domain; + HVM_DBG_LOG(DBG_LEVEL_MSR, "ecx=%#x", msr); switch ( msr ) @@ -2651,6 +2655,12 @@ static int vmx_msr_read_intercept(unsigned int msr, uint64_t *msr_content) __vmread(GUEST_IA32_DEBUGCTL, msr_content); break; case MSR_IA32_FEATURE_CONTROL: + *msr_content = IA32_FEATURE_CONTROL_LOCK; + if ( vmce_support_lmce(v) ) + *msr_content |= IA32_FEATURE_CONTROL_LMCE_ON; + if ( nestedhvm_enabled(d) && d->arch.cpuid->basic.vmx ) + *msr_content |= IA32_FEATURE_CONTROL_ENABLE_VMXON_OUTSIDE_SMX; + break; case MSR_IA32_VMX_BASIC...MSR_IA32_VMX_VMFUNC: if ( !nvmx_msr_read_intercept(msr, msr_content) ) goto gp_fault; diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c index ec3b946..0060723 100644 --- a/xen/arch/x86/hvm/vmx/vvmx.c +++ b/xen/arch/x86/hvm/vmx/vvmx.c @@ -2020,10 +2020,6 @@ int nvmx_msr_read_intercept(unsigned int msr, u64 *msr_content) data = gen_vmx_msr(data, VMX_ENTRY_CTLS_DEFAULT1, host_data); break; - case MSR_IA32_FEATURE_CONTROL: - data = IA32_FEATURE_CONTROL_LOCK | - IA32_FEATURE_CONTROL_ENABLE_VMXON_OUTSIDE_SMX; - break; case MSR_IA32_VMX_VMCS_ENUM: /* The max index of VVMCS encoding is 0x1f. */ data = 0x1f << 1; diff --git a/xen/include/asm-x86/mce.h b/xen/include/asm-x86/mce.h index 549bef3..6b827ef 100644 --- a/xen/include/asm-x86/mce.h +++ b/xen/include/asm-x86/mce.h @@ -36,6 +36,7 @@ extern void vmce_init_vcpu(struct vcpu *); extern int vmce_restore_vcpu(struct vcpu *, const struct hvm_vmce_vcpu *); extern int vmce_wrmsr(uint32_t msr, uint64_t val); extern int vmce_rdmsr(uint32_t msr, uint64_t *val); +extern bool vmce_support_lmce(const struct vcpu *v); extern unsigned int nr_mce_banks;
If MCG_LMCE_P is present in guest MSR_IA32_MCG_CAP, then set LMCE and LOCK bits in guest MSR_IA32_FEATURE_CONTROL. Intel SDM requires those bits are set before SW can enable LMCE. Signed-off-by: Haozhong Zhang <haozhong.zhang@intel.com> --- Cc: Christoph Egger <chegger@amazon.de> Cc: Liu Jinsong <jinsong.liu@alibaba-inc.com> Cc: Jan Beulich <jbeulich@suse.com> Cc: Andrew Cooper <andrew.cooper3@citrix.com> Cc: Jun Nakajima <jun.nakajima@intel.com> Cc: Kevin Tian <kevin.tian@intel.com> --- xen/arch/x86/cpu/mcheck/mce_intel.c | 4 ++++ xen/arch/x86/hvm/vmx/vmx.c | 10 ++++++++++ xen/arch/x86/hvm/vmx/vvmx.c | 4 ---- xen/include/asm-x86/mce.h | 1 + 4 files changed, 15 insertions(+), 4 deletions(-)