diff mbox series

[RFC,v2,1/6] KVM: x86: SVM: Emulate reads and writes to shadow stack MSRs

Message ID 20230524155339.415820-2-john.allen@amd.com (mailing list archive)
State New, archived
Headers show
Series [RFC,v2,1/6] KVM: x86: SVM: Emulate reads and writes to shadow stack MSRs | expand

Commit Message

John Allen May 24, 2023, 3:53 p.m. UTC
Set up interception of shadow stack MSRs. In the event that shadow stack
is unsupported on the host or the MSRs are otherwise inaccessible, the
interception code will return an error. In certain circumstances such as
host initiated MSR reads or writes, the interception code will get or
set the requested MSR value.

Signed-off-by: John Allen <john.allen@amd.com>
---
 arch/x86/kvm/svm/svm.c | 58 ++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 58 insertions(+)
diff mbox series

Patch

diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index eb308c9994f9..822d7a65e92a 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -2800,6 +2800,31 @@  static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 		if (guest_cpuid_is_intel(vcpu))
 			msr_info->data |= (u64)svm->sysenter_esp_hi << 32;
 		break;
+	case MSR_IA32_S_CET:
+		if (!kvm_cet_is_msr_accessible(vcpu, msr_info))
+			return 1;
+		msr_info->data = svm->vmcb->save.s_cet;
+		break;
+	case MSR_IA32_U_CET:
+		if (!kvm_cet_is_msr_accessible(vcpu, msr_info))
+			return 1;
+		kvm_get_xsave_msr(msr_info);
+		break;
+	case MSR_IA32_INT_SSP_TAB:
+		if (!kvm_cet_is_msr_accessible(vcpu, msr_info))
+			return 1;
+		msr_info->data = svm->vmcb->save.isst_addr;
+		break;
+	case MSR_KVM_GUEST_SSP:
+		if (!kvm_cet_is_msr_accessible(vcpu, msr_info))
+			return 1;
+		msr_info->data = svm->vmcb->save.ssp;
+		break;
+	case MSR_IA32_PL0_SSP ... MSR_IA32_PL3_SSP:
+		if (!kvm_cet_is_msr_accessible(vcpu, msr_info))
+			return 1;
+		kvm_get_xsave_msr(msr_info);
+		break;
 	case MSR_TSC_AUX:
 		msr_info->data = svm->tsc_aux;
 		break;
@@ -3016,6 +3041,39 @@  static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
 		svm->vmcb01.ptr->save.sysenter_esp = (u32)data;
 		svm->sysenter_esp_hi = guest_cpuid_is_intel(vcpu) ? (data >> 32) : 0;
 		break;
+	case MSR_IA32_S_CET:
+		if (!kvm_cet_is_msr_accessible(vcpu, msr))
+			return 1;
+		svm->vmcb->save.s_cet = data;
+		break;
+	case MSR_IA32_U_CET:
+		if (!kvm_cet_is_msr_accessible(vcpu, msr))
+			return 1;
+		kvm_set_xsave_msr(msr);
+		break;
+	case MSR_IA32_INT_SSP_TAB:
+		if (!kvm_cet_is_msr_accessible(vcpu, msr))
+			return 1;
+		if (is_noncanonical_address(data, vcpu))
+			return 1;
+		svm->vmcb->save.isst_addr = data;
+		break;
+	case MSR_KVM_GUEST_SSP:
+		if (!kvm_cet_is_msr_accessible(vcpu, msr))
+			return 1;
+		/* SSP MSR values should be a 4-byte aligned canonical addresses */
+		if ((data & GENMASK(1, 0)) || is_noncanonical_address(data, vcpu))
+			return 1;
+		svm->vmcb->save.ssp = data;
+		break;
+	case MSR_IA32_PL0_SSP ... MSR_IA32_PL3_SSP:
+		if (!kvm_cet_is_msr_accessible(vcpu, msr))
+			return 1;
+		/* SSP MSR values should be a 4-byte aligned canonical addresses */
+		if ((data & GENMASK(1, 0)) || is_noncanonical_address(data, vcpu))
+			return 1;
+		kvm_set_xsave_msr(msr);
+		break;
 	case MSR_TSC_AUX:
 		/*
 		 * TSC_AUX is usually changed only during boot and never read