@@ -583,6 +583,9 @@ static bool is_valid_passthrough_msr(u32 msr)
case MSR_LBR_CORE_TO ... MSR_LBR_CORE_TO + 8:
/* LBR MSRs. These are handled in vmx_update_intercept_for_lbr_msrs() */
return true;
+ case MSR_IA32_U_CET:
+ case MSR_IA32_PL3_SSP:
+ return true;
}
r = possible_passthrough_msr_slot(msr) != -ENOENT;
@@ -7595,6 +7598,23 @@ static void update_intel_pt_cfg(struct kvm_vcpu *vcpu)
vmx->pt_desc.ctl_bitmask &= ~(0xfULL << (32 + i * 4));
}
+static bool is_cet_state_supported(struct kvm_vcpu *vcpu, u32 xss_state)
+{
+ return (kvm_caps.supported_xss & xss_state) &&
+ (guest_cpuid_has(vcpu, X86_FEATURE_SHSTK) ||
+ guest_cpuid_has(vcpu, X86_FEATURE_IBT));
+}
+
+static void vmx_update_intercept_for_cet_msr(struct kvm_vcpu *vcpu)
+{
+ bool incpt = !is_cet_state_supported(vcpu, XFEATURE_MASK_CET_USER);
+
+ vmx_set_intercept_for_msr(vcpu, MSR_IA32_U_CET, MSR_TYPE_RW, incpt);
+
+ incpt |= !guest_cpuid_has(vcpu, X86_FEATURE_SHSTK);
+ vmx_set_intercept_for_msr(vcpu, MSR_IA32_PL3_SSP, MSR_TYPE_RW, incpt);
+}
+
static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -7657,6 +7677,9 @@ static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
/* Refresh #PF interception to account for MAXPHYADDR changes. */
vmx_update_exception_bitmap(vcpu);
+
+ if (kvm_cet_user_supported())
+ vmx_update_intercept_for_cet_msr(vcpu);
}
static __init void vmx_set_cpu_caps(void)