@@ -134,6 +134,13 @@ static const struct svm_direct_access_msrs {
{ .index = X2APIC_MSR(APIC_TMICT), .always = false },
{ .index = X2APIC_MSR(APIC_TMCCT), .always = false },
{ .index = X2APIC_MSR(APIC_TDCR), .always = false },
+ { .index = MSR_IA32_U_CET, .always = false },
+ { .index = MSR_IA32_S_CET, .always = false },
+ { .index = MSR_IA32_INT_SSP_TAB, .always = false },
+ { .index = MSR_IA32_PL0_SSP, .always = false },
+ { .index = MSR_IA32_PL1_SSP, .always = false },
+ { .index = MSR_IA32_PL2_SSP, .always = false },
+ { .index = MSR_IA32_PL3_SSP, .always = false },
{ .index = MSR_INVALID, .always = false },
};
@@ -1174,6 +1181,16 @@ static inline void init_vmcb_after_set_cpuid(struct kvm_vcpu *vcpu)
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_EIP, 1, 1);
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_ESP, 1, 1);
}
+
+ if (kvm_cet_user_supported() && guest_cpuid_has(vcpu, X86_FEATURE_SHSTK)) {
+ set_msr_interception(vcpu, svm->msrpm, MSR_IA32_U_CET, 1, 1);
+ set_msr_interception(vcpu, svm->msrpm, MSR_IA32_S_CET, 1, 1);
+ set_msr_interception(vcpu, svm->msrpm, MSR_IA32_INT_SSP_TAB, 1, 1);
+ set_msr_interception(vcpu, svm->msrpm, MSR_IA32_PL0_SSP, 1, 1);
+ set_msr_interception(vcpu, svm->msrpm, MSR_IA32_PL1_SSP, 1, 1);
+ set_msr_interception(vcpu, svm->msrpm, MSR_IA32_PL2_SSP, 1, 1);
+ set_msr_interception(vcpu, svm->msrpm, MSR_IA32_PL3_SSP, 1, 1);
+ }
}
static void init_vmcb(struct kvm_vcpu *vcpu)
@@ -29,7 +29,7 @@
#define IOPM_SIZE PAGE_SIZE * 3
#define MSRPM_SIZE PAGE_SIZE * 2
-#define MAX_DIRECT_ACCESS_MSRS 46
+#define MAX_DIRECT_ACCESS_MSRS 53
#define MSRPM_OFFSETS 32
extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
extern bool npt_enabled;
If kvm supports shadow stack, pass through shadow stack MSRs to improve guest performance. Signed-off-by: John Allen <john.allen@amd.com> --- arch/x86/kvm/svm/svm.c | 17 +++++++++++++++++ arch/x86/kvm/svm/svm.h | 2 +- 2 files changed, 18 insertions(+), 1 deletion(-)