Message ID | 20191021233027.21566-3-aaronlewis@google.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Add support for XSAVES to AMD and unify it with Intel | expand |
On 22/10/19 01:30, Aaron Lewis wrote: > Volume 4 of the SDM says that IA32_XSS is supported > if CPUID(EAX=0DH,ECX=1):EAX.XSS[bit 3] is set, so only the > X86_FEATURE_XSAVES check is necessary (X86_FEATURE_XSAVES is the Linux > name for CPUID(EAX=0DH,ECX=1):EAX.XSS[bit 3]). > > Fixes: 4d763b168e9c5 ("KVM: VMX: check CPUID before allowing read/write of IA32_XSS") > Reviewed-by: Jim Mattson <jmattson@google.com> > Signed-off-by: Aaron Lewis <aaronlewis@google.com> > Change-Id: I9059b9f2e3595e4b09a4cdcf14b933b22ebad419 > --- > arch/x86/kvm/vmx/vmx.c | 24 +++++++++++------------- > 1 file changed, 11 insertions(+), 13 deletions(-) > > diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c > index 34525af44353..a9b070001c3e 100644 > --- a/arch/x86/kvm/vmx/vmx.c > +++ b/arch/x86/kvm/vmx/vmx.c > @@ -1821,10 +1821,8 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) > return vmx_get_vmx_msr(&vmx->nested.msrs, msr_info->index, > &msr_info->data); > case MSR_IA32_XSS: > - if (!vmx_xsaves_supported() || > - (!msr_info->host_initiated && > - !(guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) && > - guest_cpuid_has(vcpu, X86_FEATURE_XSAVES)))) > + if (!msr_info->host_initiated && > + !guest_cpuid_has(vcpu, X86_FEATURE_XSAVES)) > return 1; > msr_info->data = vcpu->arch.ia32_xss; > break; > @@ -2064,10 +2062,8 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) > return 1; > return vmx_set_vmx_msr(vcpu, msr_index, data); > case MSR_IA32_XSS: > - if (!vmx_xsaves_supported() || > - (!msr_info->host_initiated && > - !(guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) && > - guest_cpuid_has(vcpu, X86_FEATURE_XSAVES)))) > + if (!msr_info->host_initiated && > + !guest_cpuid_has(vcpu, X86_FEATURE_XSAVES)) > return 1; > /* > * The only supported bit as of Skylake is bit 8, but > @@ -2076,11 +2072,13 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) > if (data != 0) > return 1; > vcpu->arch.ia32_xss = data; > - if (vcpu->arch.ia32_xss != host_xss) > - add_atomic_switch_msr(vmx, MSR_IA32_XSS, > - vcpu->arch.ia32_xss, host_xss, false); > - else > - clear_atomic_switch_msr(vmx, MSR_IA32_XSS); > + if (vcpu->arch.xsaves_enabled) { > + if (vcpu->arch.ia32_xss != host_xss) > + add_atomic_switch_msr(vmx, MSR_IA32_XSS, > + vcpu->arch.ia32_xss, host_xss, false); > + else > + clear_atomic_switch_msr(vmx, MSR_IA32_XSS); > + } > break; > case MSR_IA32_RTIT_CTL: > if ((pt_mode != PT_MODE_HOST_GUEST) || > The last hunk technically doesn't belong in this patch, but okay. Paolo
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 34525af44353..a9b070001c3e 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -1821,10 +1821,8 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) return vmx_get_vmx_msr(&vmx->nested.msrs, msr_info->index, &msr_info->data); case MSR_IA32_XSS: - if (!vmx_xsaves_supported() || - (!msr_info->host_initiated && - !(guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) && - guest_cpuid_has(vcpu, X86_FEATURE_XSAVES)))) + if (!msr_info->host_initiated && + !guest_cpuid_has(vcpu, X86_FEATURE_XSAVES)) return 1; msr_info->data = vcpu->arch.ia32_xss; break; @@ -2064,10 +2062,8 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) return 1; return vmx_set_vmx_msr(vcpu, msr_index, data); case MSR_IA32_XSS: - if (!vmx_xsaves_supported() || - (!msr_info->host_initiated && - !(guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) && - guest_cpuid_has(vcpu, X86_FEATURE_XSAVES)))) + if (!msr_info->host_initiated && + !guest_cpuid_has(vcpu, X86_FEATURE_XSAVES)) return 1; /* * The only supported bit as of Skylake is bit 8, but @@ -2076,11 +2072,13 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) if (data != 0) return 1; vcpu->arch.ia32_xss = data; - if (vcpu->arch.ia32_xss != host_xss) - add_atomic_switch_msr(vmx, MSR_IA32_XSS, - vcpu->arch.ia32_xss, host_xss, false); - else - clear_atomic_switch_msr(vmx, MSR_IA32_XSS); + if (vcpu->arch.xsaves_enabled) { + if (vcpu->arch.ia32_xss != host_xss) + add_atomic_switch_msr(vmx, MSR_IA32_XSS, + vcpu->arch.ia32_xss, host_xss, false); + else + clear_atomic_switch_msr(vmx, MSR_IA32_XSS); + } break; case MSR_IA32_RTIT_CTL: if ((pt_mode != PT_MODE_HOST_GUEST) ||