@@ -1845,32 +1845,6 @@ static int vmx_get_msr_feature(struct kvm_msr_entry *msr)
}
}
-static bool cet_is_msr_accessible(struct kvm_vcpu *vcpu,
- struct msr_data *msr)
-{
- if (!kvm_cet_user_supported() &&
- !cet_kernel_ibt_supported())
- return false;
-
- if (msr->host_initiated)
- return true;
-
- if (!guest_cpuid_has(vcpu, X86_FEATURE_SHSTK) &&
- !guest_cpuid_has(vcpu, X86_FEATURE_IBT))
- return false;
-
- if (msr->index == MSR_IA32_S_CET &&
- guest_cpuid_has(vcpu, X86_FEATURE_IBT))
- return true;
-
- if ((msr->index == MSR_IA32_PL3_SSP ||
- msr->index == MSR_KVM_GUEST_SSP) &&
- !guest_cpuid_has(vcpu, X86_FEATURE_SHSTK))
- return false;
-
- return true;
-}
-
/*
* Reads an msr value (of 'msr_info->index') into 'msr_info->data'.
* Returns 0 on success, non-0 otherwise.
@@ -2014,7 +1988,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_IA32_PL3_SSP:
case MSR_KVM_GUEST_SSP:
case MSR_IA32_S_CET:
- if (!cet_is_msr_accessible(vcpu, msr_info))
+ if (!kvm_cet_is_msr_accessible(vcpu, msr_info))
return 1;
if (msr_info->index == MSR_KVM_GUEST_SSP)
msr_info->data = vmcs_readl(GUEST_SSP);
@@ -2363,7 +2337,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
break;
case MSR_IA32_U_CET:
case MSR_IA32_S_CET:
- if (!cet_is_msr_accessible(vcpu, msr_info))
+ if (!kvm_cet_is_msr_accessible(vcpu, msr_info))
return 1;
if ((data & GENMASK(9, 6)) ||
is_noncanonical_address(data, vcpu))
@@ -2375,7 +2349,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
break;
case MSR_IA32_PL3_SSP:
case MSR_KVM_GUEST_SSP:
- if (!cet_is_msr_accessible(vcpu, msr_info))
+ if (!kvm_cet_is_msr_accessible(vcpu, msr_info))
return 1;
if ((data & GENMASK(2, 0)) ||
is_noncanonical_address(data, vcpu))
@@ -13475,6 +13475,32 @@ int kvm_sev_es_string_io(struct kvm_vcpu *vcpu, unsigned int size,
}
EXPORT_SYMBOL_GPL(kvm_sev_es_string_io);
+bool kvm_cet_is_msr_accessible(struct kvm_vcpu *vcpu, struct msr_data *msr)
+{
+ if (!kvm_cet_user_supported() &&
+ !cet_kernel_ibt_supported())
+ return false;
+
+ if (msr->host_initiated)
+ return true;
+
+ if (!guest_cpuid_has(vcpu, X86_FEATURE_SHSTK) &&
+ !guest_cpuid_has(vcpu, X86_FEATURE_IBT))
+ return false;
+
+ if (msr->index == MSR_IA32_S_CET &&
+ guest_cpuid_has(vcpu, X86_FEATURE_IBT))
+ return true;
+
+ if ((msr->index == MSR_IA32_PL3_SSP ||
+ msr->index == MSR_KVM_GUEST_SSP) &&
+ !guest_cpuid_has(vcpu, X86_FEATURE_SHSTK))
+ return false;
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(kvm_cet_is_msr_accessible);
+
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_entry);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_fast_mmio);
@@ -502,6 +502,8 @@ int kvm_sev_es_string_io(struct kvm_vcpu *vcpu, unsigned int size,
unsigned int port, void *data, unsigned int count,
int in);
+bool kvm_cet_is_msr_accessible(struct kvm_vcpu *vcpu, struct msr_data *msr);
+
/*
* We've already loaded guest MSRs in __msr_io() when check the MSR index.
* In case vcpu has been preempted, we need to disable preemption, check
cet_is_msr_accessible can also by used for shadow stack support in SVM. Move this to common x86 kvm code. Signed-off-by: John Allen <john.allen@amd.com> --- arch/x86/kvm/vmx/vmx.c | 32 +++----------------------------- arch/x86/kvm/x86.c | 26 ++++++++++++++++++++++++++ arch/x86/kvm/x86.h | 2 ++ 3 files changed, 31 insertions(+), 29 deletions(-)