diff mbox series

[v11,09/35] KVM: x86: Determine shared/private faults based on vm_type

Message ID 20231230172351.574091-10-michael.roth@amd.com (mailing list archive)
State New, archived
Headers show
Series Add AMD Secure Nested Paging (SEV-SNP) Hypervisor Support | expand

Commit Message

Michael Roth Dec. 30, 2023, 5:23 p.m. UTC
For KVM_X86_SNP_VM, only the PFERR_GUEST_ENC_MASK flag is needed to
determine with an #NPF is due to a private/shared access by the guest.
Implement that handling here. Also add handling needed to deal with
SNP guests which in some cases will make MMIO accesses with the
encryption bit.

Signed-off-by: Michael Roth <michael.roth@amd.com>
---
 arch/x86/kvm/mmu/mmu.c          | 12 ++++++++++--
 arch/x86/kvm/mmu/mmu_internal.h | 20 +++++++++++++++++++-
 2 files changed, 29 insertions(+), 3 deletions(-)

Comments

Paolo Bonzini Feb. 12, 2024, 10:31 a.m. UTC | #1
On Sat, Dec 30, 2023 at 6:24 PM Michael Roth <michael.roth@amd.com> wrote:
>
> For KVM_X86_SNP_VM, only the PFERR_GUEST_ENC_MASK flag is needed to
> determine with an #NPF is due to a private/shared access by the guest.
> Implement that handling here. Also add handling needed to deal with
> SNP guests which in some cases will make MMIO accesses with the
> encryption bit.
>
> Signed-off-by: Michael Roth <michael.roth@amd.com>
> ---
>  arch/x86/kvm/mmu/mmu.c          | 12 ++++++++++--
>  arch/x86/kvm/mmu/mmu_internal.h | 20 +++++++++++++++++++-
>  2 files changed, 29 insertions(+), 3 deletions(-)
>
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index d3fbfe0686a0..61213f6648a1 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -4331,6 +4331,7 @@ static int kvm_faultin_pfn_private(struct kvm_vcpu *vcpu,
>  static int __kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
>  {
>         struct kvm_memory_slot *slot = fault->slot;
> +       bool private_fault = fault->is_private;

I think it's nicer to just make the fault !is_private in
kvm_mmu_do_page_fault().

> +static bool kvm_mmu_fault_is_private(struct kvm *kvm, gpa_t gpa, u64 err)
> +{
> +       bool private_fault = false;
> +
> +       if (kvm_is_vm_type(kvm, KVM_X86_SNP_VM)) {
> +               private_fault = !!(err & PFERR_GUEST_ENC_MASK);
> +       } else if (kvm_is_vm_type(kvm, KVM_X86_SW_PROTECTED_VM)) {
> +               /*
> +                * This handling is for gmem self-tests and guests that treat
> +                * userspace as the authority on whether a fault should be
> +                * private or not.
> +                */
> +               private_fault = kvm_mem_is_private(kvm, gpa >> PAGE_SHIFT);
> +       }

Any reason to remove the is_private page fault that was there in
previous versions of the patch?  I don't really like having both TDX
and SVM-specific code in this function.

Paolo

> +       return private_fault;
> +}
> +
>  /*
>   * Return values of handle_mmio_page_fault(), mmu.page_fault(), fast_page_fault(),
>   * and of course kvm_mmu_do_page_fault().
> @@ -298,7 +316,7 @@ static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
>                 .max_level = KVM_MAX_HUGEPAGE_LEVEL,
>                 .req_level = PG_LEVEL_4K,
>                 .goal_level = PG_LEVEL_4K,
> -               .is_private = kvm_mem_is_private(vcpu->kvm, cr2_or_gpa >> PAGE_SHIFT),
> +               .is_private = kvm_mmu_fault_is_private(vcpu->kvm, cr2_or_gpa, err),
>         };
>         int r;
>
> --
> 2.25.1
>
Sean Christopherson Feb. 12, 2024, 4:27 p.m. UTC | #2
On Mon, Feb 12, 2024, Paolo Bonzini wrote:
> On Sat, Dec 30, 2023 at 6:24 PM Michael Roth <michael.roth@amd.com> wrote:
> >
> > For KVM_X86_SNP_VM, only the PFERR_GUEST_ENC_MASK flag is needed to
> > determine with an #NPF is due to a private/shared access by the guest.
> > Implement that handling here. Also add handling needed to deal with
> > SNP guests which in some cases will make MMIO accesses with the
> > encryption bit.
> >
> > Signed-off-by: Michael Roth <michael.roth@amd.com>
> > ---
> >  arch/x86/kvm/mmu/mmu.c          | 12 ++++++++++--
> >  arch/x86/kvm/mmu/mmu_internal.h | 20 +++++++++++++++++++-
> >  2 files changed, 29 insertions(+), 3 deletions(-)
> >
> > diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> > index d3fbfe0686a0..61213f6648a1 100644
> > --- a/arch/x86/kvm/mmu/mmu.c
> > +++ b/arch/x86/kvm/mmu/mmu.c
> > @@ -4331,6 +4331,7 @@ static int kvm_faultin_pfn_private(struct kvm_vcpu *vcpu,
> >  static int __kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
> >  {
> >         struct kvm_memory_slot *slot = fault->slot;
> > +       bool private_fault = fault->is_private;
> 
> I think it's nicer to just make the fault !is_private in
> kvm_mmu_do_page_fault().

Yeah.  I'm starting to recall more of this discussion.  This is one of the reasons
I suggested/requested stuffing the error code to piggy-back the new SNP bit; doing
so allows is_private to be computed from the get-go without needing any vendor
specific hooks.
Michael Roth Feb. 12, 2024, 4:47 p.m. UTC | #3
On Mon, Feb 12, 2024 at 08:27:21AM -0800, Sean Christopherson wrote:
> On Mon, Feb 12, 2024, Paolo Bonzini wrote:
> > On Sat, Dec 30, 2023 at 6:24 PM Michael Roth <michael.roth@amd.com> wrote:
> > >
> > > For KVM_X86_SNP_VM, only the PFERR_GUEST_ENC_MASK flag is needed to
> > > determine with an #NPF is due to a private/shared access by the guest.
> > > Implement that handling here. Also add handling needed to deal with
> > > SNP guests which in some cases will make MMIO accesses with the
> > > encryption bit.
> > >
> > > Signed-off-by: Michael Roth <michael.roth@amd.com>
> > > ---
> > >  arch/x86/kvm/mmu/mmu.c          | 12 ++++++++++--
> > >  arch/x86/kvm/mmu/mmu_internal.h | 20 +++++++++++++++++++-
> > >  2 files changed, 29 insertions(+), 3 deletions(-)
> > >
> > > diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> > > index d3fbfe0686a0..61213f6648a1 100644
> > > --- a/arch/x86/kvm/mmu/mmu.c
> > > +++ b/arch/x86/kvm/mmu/mmu.c
> > > @@ -4331,6 +4331,7 @@ static int kvm_faultin_pfn_private(struct kvm_vcpu *vcpu,
> > >  static int __kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
> > >  {
> > >         struct kvm_memory_slot *slot = fault->slot;
> > > +       bool private_fault = fault->is_private;
> > 
> > I think it's nicer to just make the fault !is_private in
> > kvm_mmu_do_page_fault().
> 
> Yeah.  I'm starting to recall more of this discussion.  This is one of the reasons
> I suggested/requested stuffing the error code to piggy-back the new SNP bit; doing
> so allows is_private to be computed from the get-go without needing any vendor
> specific hooks.

Makes sense to me. Based on your suggestion here:

  https://lore.kernel.org/kvm/ZcUO5sFEAIH68JIA@google.com/

I was planning to drop this patch and adopt the TDX implementation:

  https://github.com/intel/tdx/commit/3717a903ef453aa7b62e7eb65f230566b7f158d4

-Mike
diff mbox series

Patch

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index d3fbfe0686a0..61213f6648a1 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -4331,6 +4331,7 @@  static int kvm_faultin_pfn_private(struct kvm_vcpu *vcpu,
 static int __kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
 {
 	struct kvm_memory_slot *slot = fault->slot;
+	bool private_fault = fault->is_private;
 	bool async;
 
 	/*
@@ -4360,12 +4361,19 @@  static int __kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
 			return RET_PF_EMULATE;
 	}
 
-	if (fault->is_private != kvm_mem_is_private(vcpu->kvm, fault->gfn)) {
+	/*
+	 * In some cases SNP guests will make MMIO accesses with the encryption
+	 * bit set. Handle these via the normal MMIO fault path.
+	 */
+	if (!slot && private_fault && kvm_is_vm_type(vcpu->kvm, KVM_X86_SNP_VM))
+		private_fault = false;
+
+	if (private_fault != kvm_mem_is_private(vcpu->kvm, fault->gfn)) {
 		kvm_mmu_prepare_memory_fault_exit(vcpu, fault);
 		return -EFAULT;
 	}
 
-	if (fault->is_private)
+	if (private_fault)
 		return kvm_faultin_pfn_private(vcpu, fault);
 
 	async = false;
diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h
index 21f55e8b4dc6..e519dd363c28 100644
--- a/arch/x86/kvm/mmu/mmu_internal.h
+++ b/arch/x86/kvm/mmu/mmu_internal.h
@@ -251,6 +251,24 @@  struct kvm_page_fault {
 
 int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);
 
+static bool kvm_mmu_fault_is_private(struct kvm *kvm, gpa_t gpa, u64 err)
+{
+	bool private_fault = false;
+
+	if (kvm_is_vm_type(kvm, KVM_X86_SNP_VM)) {
+		private_fault = !!(err & PFERR_GUEST_ENC_MASK);
+	} else if (kvm_is_vm_type(kvm, KVM_X86_SW_PROTECTED_VM)) {
+		/*
+		 * This handling is for gmem self-tests and guests that treat
+		 * userspace as the authority on whether a fault should be
+		 * private or not.
+		 */
+		private_fault = kvm_mem_is_private(kvm, gpa >> PAGE_SHIFT);
+	}
+
+	return private_fault;
+}
+
 /*
  * Return values of handle_mmio_page_fault(), mmu.page_fault(), fast_page_fault(),
  * and of course kvm_mmu_do_page_fault().
@@ -298,7 +316,7 @@  static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
 		.max_level = KVM_MAX_HUGEPAGE_LEVEL,
 		.req_level = PG_LEVEL_4K,
 		.goal_level = PG_LEVEL_4K,
-		.is_private = kvm_mem_is_private(vcpu->kvm, cr2_or_gpa >> PAGE_SHIFT),
+		.is_private = kvm_mmu_fault_is_private(vcpu->kvm, cr2_or_gpa, err),
 	};
 	int r;