@@ -770,6 +770,7 @@ void fpregs_lock_and_load(void)
if (test_thread_flag(TIF_NEED_FPU_LOAD))
fpregs_restore_userregs();
}
+EXPORT_SYMBOL_GPL(fpregs_lock_and_load);
#ifdef CONFIG_X86_DEBUG_FPU
/*
@@ -1955,6 +1955,26 @@ static int vmx_get_msr_feature(struct kvm_msr_entry *msr)
}
}
+static bool cet_is_msr_accessible(struct kvm_vcpu *vcpu,
+ struct msr_data *msr)
+{
+ if (!kvm_cet_user_supported())
+ return false;
+
+ if (msr->host_initiated)
+ return true;
+
+ if (!guest_cpuid_has(vcpu, X86_FEATURE_SHSTK) &&
+ !guest_cpuid_has(vcpu, X86_FEATURE_IBT))
+ return false;
+
+ if (msr->index == MSR_IA32_PL3_SSP &&
+ !guest_cpuid_has(vcpu, X86_FEATURE_SHSTK))
+ return false;
+
+ return true;
+}
+
/*
* Reads an msr value (of 'msr_info->index') into 'msr_info->data'.
* Returns 0 on success, non-0 otherwise.
@@ -2093,6 +2113,12 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
else
msr_info->data = vmx->pt_desc.guest.addr_a[index / 2];
break;
+ case MSR_IA32_U_CET:
+ case MSR_IA32_PL3_SSP:
+ if (!cet_is_msr_accessible(vcpu, msr_info))
+ return 1;
+ kvm_get_xsave_msr(msr_info);
+ break;
case MSR_IA32_DEBUGCTLMSR:
msr_info->data = vmcs_read64(GUEST_IA32_DEBUGCTL);
break;
@@ -2405,6 +2431,22 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
else
vmx->pt_desc.guest.addr_a[index / 2] = data;
break;
+ case MSR_IA32_U_CET:
+ if (!cet_is_msr_accessible(vcpu, msr_info))
+ return 1;
+ if ((data & GENMASK(9, 6)) ||
+ is_noncanonical_address(data, vcpu))
+ return 1;
+ kvm_set_xsave_msr(msr_info);
+ break;
+ case MSR_IA32_PL3_SSP:
+ if (!cet_is_msr_accessible(vcpu, msr_info))
+ return 1;
+ if ((data & GENMASK(2, 0)) ||
+ is_noncanonical_address(data, vcpu))
+ return 1;
+ kvm_set_xsave_msr(msr_info);
+ break;
case MSR_IA32_PERF_CAPABILITIES:
if (data && !vcpu_to_pmu(vcpu)->version)
return 1;
@@ -2,6 +2,7 @@
#ifndef ARCH_X86_KVM_X86_H
#define ARCH_X86_KVM_X86_H
+#include <asm/fpu/api.h>
#include <linux/kvm_host.h>
#include <asm/fpu/xstate.h>
#include <asm/mce.h>
@@ -370,6 +371,16 @@ static inline bool kvm_mpx_supported(void)
== (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR);
}
+/*
+ * Guest CET user mode states depend on host XSAVES/XRSTORS to save/restore
+ * when vCPU enter/exit user space. If host doesn't support CET user bit in
+ * XSS msr, then treat this case as KVM doesn't support CET user mode.
+ */
+static inline bool kvm_cet_user_supported(void)
+{
+ return !!(kvm_caps.supported_xss & XFEATURE_MASK_CET_USER);
+}
+
extern unsigned int min_timer_period_us;
extern bool enable_vmware_backdoor;
@@ -550,4 +561,23 @@ int kvm_sev_es_string_io(struct kvm_vcpu *vcpu, unsigned int size,
unsigned int port, void *data, unsigned int count,
int in);
+/*
+ * We've already loaded guest MSRs in __msr_io() after check the MSR index.
+ * In case vcpu has been preempted, we need to disable preemption, check
+ * and reload the guest fpu states before read/write xsaves-managed MSRs.
+ */
+static inline void kvm_get_xsave_msr(struct msr_data *msr_info)
+{
+ fpregs_lock_and_load();
+ rdmsrl(msr_info->index, msr_info->data);
+ fpregs_unlock();
+}
+
+static inline void kvm_set_xsave_msr(struct msr_data *msr_info)
+{
+ fpregs_lock_and_load();
+ wrmsrl(msr_info->index, msr_info->data);
+ fpregs_unlock();
+}
+
#endif