diff mbox series

[RFC,v7,12/64] KVM: SEV: Implement .fault_is_private callback

Message ID 20221214194056.161492-13-michael.roth@amd.com (mailing list archive)
State Not Applicable
Delegated to: Herbert Xu
Headers show
Series Add AMD Secure Nested Paging (SEV-SNP) Hypervisor Support | expand

Commit Message

Michael Roth Dec. 14, 2022, 7:40 p.m. UTC
KVM MMU will use this to determine whether an #NPF should be serviced
with restricted memory or not.

Signed-off-by: Michael Roth <michael.roth@amd.com>
---
 arch/x86/kvm/svm/sev.c | 23 +++++++++++++++++++++++
 arch/x86/kvm/svm/svm.c |  2 ++
 arch/x86/kvm/svm/svm.h |  2 ++
 3 files changed, 27 insertions(+)
diff mbox series

Patch

diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index ae4920aeb281..6579ed218f6a 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -3179,3 +3179,26 @@  void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
 
 	ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, 1);
 }
+
+int sev_fault_is_private(struct kvm *kvm, gpa_t gpa, u64 error_code, bool *private_fault)
+{
+	gfn_t gfn = gpa_to_gfn(gpa);
+
+	if (!kvm_is_upm_enabled(kvm) || !sev_guest(kvm))
+		goto out_unhandled;
+
+	/*
+	 * For SEV, the hypervisor is not aware of implicit conversions in the
+	 * guest, so it relies purely on explicit conversions via
+	 * KVM_EXIT_HYPERCALL, so the resulting handling by userspace should
+	 * update the backing memory source accordingly. Therefore, the backing
+	 * source is the only indicator of whether the fault should be treated
+	 * as private or not.
+	 */
+	*private_fault = kvm_mem_is_private(kvm, gfn);
+
+	return 1;
+
+out_unhandled:
+	return 0;
+}
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 7f3e4d91c0c6..fc7885869f7e 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -4830,6 +4830,8 @@  static struct kvm_x86_ops svm_x86_ops __initdata = {
 
 	.vcpu_deliver_sipi_vector = svm_vcpu_deliver_sipi_vector,
 	.vcpu_get_apicv_inhibit_reasons = avic_vcpu_get_apicv_inhibit_reasons,
+
+	.fault_is_private = sev_fault_is_private,
 };
 
 /*
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 4826e6cc611b..c760ec51a910 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -683,6 +683,8 @@  void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
 void sev_es_prepare_switch_to_guest(struct sev_es_save_area *hostsa);
 void sev_es_unmap_ghcb(struct vcpu_svm *svm);
 
+int sev_fault_is_private(struct kvm *kvm, gpa_t gpa, u64 error_code, bool *private_fault);
+
 /* vmenter.S */
 
 void __svm_sev_es_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted);