Message ID | 20221214194056.161492-5-michael.roth@amd.com (mailing list archive) |
---|---|
State | Not Applicable |
Delegated to: | Herbert Xu |
Headers | show |
Series | Add AMD Secure Nested Paging (SEV-SNP) Hypervisor Support | expand |
On Wed, Dec 14, 2022 at 01:39:56PM -0600, Michael Roth wrote: > This callback is used by the KVM MMU to check whether a #NPF was > or a private GPA or not. s/or // > > Signed-off-by: Michael Roth <michael.roth@amd.com> > --- > arch/x86/include/asm/kvm-x86-ops.h | 1 + > arch/x86/include/asm/kvm_host.h | 1 + > arch/x86/kvm/mmu/mmu.c | 3 +-- > arch/x86/kvm/mmu/mmu_internal.h | 40 +++++++++++++++++++++++++++--- > 4 files changed, 39 insertions(+), 6 deletions(-) > > diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h > index f530a550c092..efae987cdce0 100644 > --- a/arch/x86/include/asm/kvm-x86-ops.h > +++ b/arch/x86/include/asm/kvm-x86-ops.h > @@ -132,6 +132,7 @@ KVM_X86_OP(complete_emulated_msr) > KVM_X86_OP(vcpu_deliver_sipi_vector) > KVM_X86_OP_OPTIONAL_RET0(vcpu_get_apicv_inhibit_reasons); > KVM_X86_OP_OPTIONAL_RET0(private_mem_enabled); > +KVM_X86_OP_OPTIONAL_RET0(fault_is_private); > > #undef KVM_X86_OP > #undef KVM_X86_OP_OPTIONAL > diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h > index 9317abffbf68..92539708f062 100644 > --- a/arch/x86/include/asm/kvm_host.h > +++ b/arch/x86/include/asm/kvm_host.h > @@ -1636,6 +1636,7 @@ struct kvm_x86_ops { > void (*load_mmu_pgd)(struct kvm_vcpu *vcpu, hpa_t root_hpa, > int root_level); > int (*private_mem_enabled)(struct kvm *kvm); > + int (*fault_is_private)(struct kvm *kvm, gpa_t gpa, u64 error_code, bool *private_fault); bool and then you don't need the silly "== 1" at the call site. > > bool (*has_wbinvd_exit)(void); ... > @@ -261,13 +293,13 @@ enum { > }; > > static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, > - u32 err, bool prefetch) > + u64 err, bool prefetch) The u32 -> u64 change of err could use a sentence or two of clarification in the commit message... > { > bool is_tdp = likely(vcpu->arch.mmu->page_fault == kvm_tdp_page_fault); > > struct kvm_page_fault fault = { > .addr = cr2_or_gpa, > - .error_code = err, > + .error_code = lower_32_bits(err), > .exec = err & PFERR_FETCH_MASK, > .write = err & PFERR_WRITE_MASK, > .present = err & PFERR_PRESENT_MASK, > @@ -281,8 +313,8 @@ static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, > .max_level = KVM_MAX_HUGEPAGE_LEVEL, > .req_level = PG_LEVEL_4K, > .goal_level = PG_LEVEL_4K, > - .is_private = IS_ENABLED(CONFIG_HAVE_KVM_PRIVATE_MEM_TESTING) && is_tdp && > - kvm_mem_is_private(vcpu->kvm, cr2_or_gpa >> PAGE_SHIFT), > + .is_private = is_tdp && kvm_mmu_fault_is_private(vcpu->kvm, > + cr2_or_gpa, err), > }; > int r; > > -- > 2.25.1 >
On Thu, Dec 29, 2022 at 05:14:03PM +0100, Borislav Petkov wrote: > On Wed, Dec 14, 2022 at 01:39:56PM -0600, Michael Roth wrote: > > This callback is used by the KVM MMU to check whether a #NPF was > > or a private GPA or not. > > s/or // > > > > > Signed-off-by: Michael Roth <michael.roth@amd.com> > > --- > > arch/x86/include/asm/kvm-x86-ops.h | 1 + > > arch/x86/include/asm/kvm_host.h | 1 + > > arch/x86/kvm/mmu/mmu.c | 3 +-- > > arch/x86/kvm/mmu/mmu_internal.h | 40 +++++++++++++++++++++++++++--- > > 4 files changed, 39 insertions(+), 6 deletions(-) > > > > diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h > > index f530a550c092..efae987cdce0 100644 > > --- a/arch/x86/include/asm/kvm-x86-ops.h > > +++ b/arch/x86/include/asm/kvm-x86-ops.h > > @@ -132,6 +132,7 @@ KVM_X86_OP(complete_emulated_msr) > > KVM_X86_OP(vcpu_deliver_sipi_vector) > > KVM_X86_OP_OPTIONAL_RET0(vcpu_get_apicv_inhibit_reasons); > > KVM_X86_OP_OPTIONAL_RET0(private_mem_enabled); > > +KVM_X86_OP_OPTIONAL_RET0(fault_is_private); > > > > #undef KVM_X86_OP > > #undef KVM_X86_OP_OPTIONAL > > diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h > > index 9317abffbf68..92539708f062 100644 > > --- a/arch/x86/include/asm/kvm_host.h > > +++ b/arch/x86/include/asm/kvm_host.h > > @@ -1636,6 +1636,7 @@ struct kvm_x86_ops { > > void (*load_mmu_pgd)(struct kvm_vcpu *vcpu, hpa_t root_hpa, > > int root_level); > > int (*private_mem_enabled)(struct kvm *kvm); > > + int (*fault_is_private)(struct kvm *kvm, gpa_t gpa, u64 error_code, bool *private_fault); > > bool > > and then you don't need the silly "== 1" at the call site. Obviously I need to add some proper documentation for this, but a 1 return basically means 'private_fault' pass-by-ref arg has been set with the appropriate value, whereas 0 means "there's no platform-specific handling for this, so if you have some generic way to determine this then use that instead". This is mainly to handle CONFIG_HAVE_KVM_PRIVATE_MEM_TESTING, which just parrots whatever kvm_mem_is_private() returns to support running KVM selftests without needed hardware/platform support. If we don't take care to skip this check where the above fault_is_private() hook returns 1, then it ends up breaking SNP in cases where the kernel has been compiled with CONFIG_HAVE_KVM_PRIVATE_MEM_TESTING, since SNP relies on the page fault flags to make this determination, not kvm_mem_is_private(), which normally only tracks the memory attributes set by userspace via KVM_SET_MEMORY_ATTRIBUTES ioctl. > > > > > bool (*has_wbinvd_exit)(void); > > ... > > > @@ -261,13 +293,13 @@ enum { > > }; > > > > static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, > > - u32 err, bool prefetch) > > + u64 err, bool prefetch) > > The u32 -> u64 change of err could use a sentence or two of > clarification in the commit message... Will do. -Mike > > > { > > bool is_tdp = likely(vcpu->arch.mmu->page_fault == kvm_tdp_page_fault); > > > > struct kvm_page_fault fault = { > > .addr = cr2_or_gpa, > > - .error_code = err, > > + .error_code = lower_32_bits(err), > > .exec = err & PFERR_FETCH_MASK, > > .write = err & PFERR_WRITE_MASK, > > .present = err & PFERR_PRESENT_MASK, > > @@ -281,8 +313,8 @@ static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, > > .max_level = KVM_MAX_HUGEPAGE_LEVEL, > > .req_level = PG_LEVEL_4K, > > .goal_level = PG_LEVEL_4K, > > - .is_private = IS_ENABLED(CONFIG_HAVE_KVM_PRIVATE_MEM_TESTING) && is_tdp && > > - kvm_mem_is_private(vcpu->kvm, cr2_or_gpa >> PAGE_SHIFT), > > + .is_private = is_tdp && kvm_mmu_fault_is_private(vcpu->kvm, > > + cr2_or_gpa, err), > > }; > > int r; > > > > -- > > 2.25.1 > > > > -- > Regards/Gruss, > Boris. > > https://people.kernel.org/tglx/notes-about-netiquette
On Wed, Jan 04, 2023 at 08:42:56PM -0600, Michael Roth wrote: > Obviously I need to add some proper documentation for this, but a 1 > return basically means 'private_fault' pass-by-ref arg has been set > with the appropriate value, whereas 0 means "there's no platform-specific > handling for this, so if you have some generic way to determine this > then use that instead". Still binary, tho, and can be bool, right? I.e., you can just as well do: if (static_call(kvm_x86_fault_is_private)(kvm, gpa, err, &private_fault)) goto out; at the call site. > This is mainly to handle CONFIG_HAVE_KVM_PRIVATE_MEM_TESTING, which > just parrots whatever kvm_mem_is_private() returns to support running > KVM selftests without needed hardware/platform support. If we don't > take care to skip this check where the above fault_is_private() hook > returns 1, then it ends up breaking SNP in cases where the kernel has > been compiled with CONFIG_HAVE_KVM_PRIVATE_MEM_TESTING, since SNP > relies on the page fault flags to make this determination, not > kvm_mem_is_private(), which normally only tracks the memory attributes > set by userspace via KVM_SET_MEMORY_ATTRIBUTES ioctl. Some of that explanation belongs into the commit message, which is a bit lacking...
On Fri, Jan 13, 2023, Borislav Petkov wrote: > On Wed, Jan 04, 2023 at 08:42:56PM -0600, Michael Roth wrote: > > Obviously I need to add some proper documentation for this, but a 1 > > return basically means 'private_fault' pass-by-ref arg has been set > > with the appropriate value, whereas 0 means "there's no platform-specific > > handling for this, so if you have some generic way to determine this > > then use that instead". > > Still binary, tho, and can be bool, right? > > I.e., you can just as well do: > > if (static_call(kvm_x86_fault_is_private)(kvm, gpa, err, &private_fault)) > goto out; > > at the call site. Ya. Don't spend too much time trying to make this look super pretty though, there are subtle bugs inherited from the base UPM series that need to be sorted out and will impact this code. E.g. invoking kvm_mem_is_private() outside of the protection of mmu_invalidate_seq means changes to the attributes may not be reflected in the page tables. I'm also hoping we can avoid a callback entirely, though that may prove to be more pain than gain. I'm poking at the UPM and testing series right now, will circle back to this and TDX in a few weeks to see if there's a sane way to communicate shared vs. private without having to resort to a callback, and without having races between page faults, KVM_SET_MEMORY_ATTRIBUTES, and KVM_SET_USER_MEMORY_REGION2. > > This is mainly to handle CONFIG_HAVE_KVM_PRIVATE_MEM_TESTING, which > > just parrots whatever kvm_mem_is_private() returns to support running > > KVM selftests without needed hardware/platform support. If we don't > > take care to skip this check where the above fault_is_private() hook > > returns 1, then it ends up breaking SNP in cases where the kernel has > > been compiled with CONFIG_HAVE_KVM_PRIVATE_MEM_TESTING, since SNP > > relies on the page fault flags to make this determination, not > > kvm_mem_is_private(), which normally only tracks the memory attributes > > set by userspace via KVM_SET_MEMORY_ATTRIBUTES ioctl. > > Some of that explanation belongs into the commit message, which is a bit > lacking... I'll circle back to this too when I give this series (and TDX) a proper look, there's got too be a better way to handle this.
On Fri, Jan 13, 2023 at 03:48:59PM +0000, Sean Christopherson wrote: > Ya. Don't spend too much time trying to make this look super pretty though, there > are subtle bugs inherited from the base UPM series that need to be sorted out and > will impact this code. Yeah, I'm simply trying to find my way around the code and no better way than reviewing it. But thanks for the heads up. > I'll circle back to this too when I give this series (and TDX) a proper look, > there's got too be a better way to handle this. Good. Thx.
On Fri, Jan 13, 2023 at 03:48:59PM +0000, Sean Christopherson wrote: > On Fri, Jan 13, 2023, Borislav Petkov wrote: > > On Wed, Jan 04, 2023 at 08:42:56PM -0600, Michael Roth wrote: > > > Obviously I need to add some proper documentation for this, but a 1 > > > return basically means 'private_fault' pass-by-ref arg has been set > > > with the appropriate value, whereas 0 means "there's no platform-specific > > > handling for this, so if you have some generic way to determine this > > > then use that instead". > > > > Still binary, tho, and can be bool, right? > > > > I.e., you can just as well do: > > > > if (static_call(kvm_x86_fault_is_private)(kvm, gpa, err, &private_fault)) > > goto out; > > > > at the call site. > > Ya. Don't spend too much time trying to make this look super pretty though, there > are subtle bugs inherited from the base UPM series that need to be sorted out and > will impact this code. E.g. invoking kvm_mem_is_private() outside of the protection > of mmu_invalidate_seq means changes to the attributes may not be reflected in the > page tables. > > I'm also hoping we can avoid a callback entirely, though that may prove to be > more pain than gain. I'm poking at the UPM and testing series right now, will > circle back to this and TDX in a few weeks to see if there's a sane way to communicate > shared vs. private without having to resort to a callback, and without having > races between page faults, KVM_SET_MEMORY_ATTRIBUTES, and KVM_SET_USER_MEMORY_REGION2. Can circle back on this, but for v8 at least I've kept the callback, but simplified SVM implementation of it so that it's only needed for SNP. For protected-SEV it will fall through to the same generic handling used by UPM self-tests. It seems like it's safe to have a callback of that sort here for TDX/SNP (or whatever we end up replacing the callback with), since the #NPF flags themselves won't change based on attribute updates, and the subsequent comparison to kvm_mem_is_private() will happen after mmu_invalidate_seq is logged. But for protected-SEV and UPM selftests the initial kvm_mem_is_private() can become stale vs. the one in __kvm_faultin_pfn(), but it seems like ATM it would only lead to a spurious KVM_EXIT_MEMORY_FAULT, which SEV at least should treat at an implicit page-state change and be able to recover from. But yah, not ideal, and maybe for self-tests that makes it difficult to tell if things are working as expected or not. Maybe we should just skip setting fault->is_private here in the non-TDX/non-SNP cases, and just have some other indicator so it's initialized/ignored in kvm_mem_is_private() later. I think some iterations of UPM did it this way prior to 'is_private' becoming const. > > > > This is mainly to handle CONFIG_HAVE_KVM_PRIVATE_MEM_TESTING, which > > > just parrots whatever kvm_mem_is_private() returns to support running > > > KVM selftests without needed hardware/platform support. If we don't > > > take care to skip this check where the above fault_is_private() hook > > > returns 1, then it ends up breaking SNP in cases where the kernel has > > > been compiled with CONFIG_HAVE_KVM_PRIVATE_MEM_TESTING, since SNP > > > relies on the page fault flags to make this determination, not > > > kvm_mem_is_private(), which normally only tracks the memory attributes > > > set by userspace via KVM_SET_MEMORY_ATTRIBUTES ioctl. > > > > Some of that explanation belongs into the commit message, which is a bit > > lacking... > > I'll circle back to this too when I give this series (and TDX) a proper look, > there's got too be a better way to handle this. > It seems like for SNP/TDX we just need to register the shared/encrypted bits with KVM MMU and let it handle checking the #NPF flags, but can iterate on that for the next spin when we have a better idea what it should look like. -Mike
diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h index f530a550c092..efae987cdce0 100644 --- a/arch/x86/include/asm/kvm-x86-ops.h +++ b/arch/x86/include/asm/kvm-x86-ops.h @@ -132,6 +132,7 @@ KVM_X86_OP(complete_emulated_msr) KVM_X86_OP(vcpu_deliver_sipi_vector) KVM_X86_OP_OPTIONAL_RET0(vcpu_get_apicv_inhibit_reasons); KVM_X86_OP_OPTIONAL_RET0(private_mem_enabled); +KVM_X86_OP_OPTIONAL_RET0(fault_is_private); #undef KVM_X86_OP #undef KVM_X86_OP_OPTIONAL diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 9317abffbf68..92539708f062 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1636,6 +1636,7 @@ struct kvm_x86_ops { void (*load_mmu_pgd)(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level); int (*private_mem_enabled)(struct kvm *kvm); + int (*fault_is_private)(struct kvm *kvm, gpa_t gpa, u64 error_code, bool *private_fault); bool (*has_wbinvd_exit)(void); diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index b3ffc61c668c..61a7c221b966 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -5646,8 +5646,7 @@ int noinline kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 err } if (r == RET_PF_INVALID) { - r = kvm_mmu_do_page_fault(vcpu, cr2_or_gpa, - lower_32_bits(error_code), false); + r = kvm_mmu_do_page_fault(vcpu, cr2_or_gpa, error_code, false); if (KVM_BUG_ON(r == RET_PF_INVALID, vcpu->kvm)) return -EIO; } diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h index e2f508db0b6e..04ea8da86510 100644 --- a/arch/x86/kvm/mmu/mmu_internal.h +++ b/arch/x86/kvm/mmu/mmu_internal.h @@ -230,6 +230,38 @@ struct kvm_page_fault { int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault); +static bool kvm_mmu_fault_is_private(struct kvm *kvm, gpa_t gpa, u64 err) +{ + struct kvm_memory_slot *slot; + bool private_fault = false; + gfn_t gfn = gpa_to_gfn(gpa); + + slot = gfn_to_memslot(kvm, gfn); + if (!slot) { + pr_debug("%s: no slot, GFN: 0x%llx\n", __func__, gfn); + goto out; + } + + if (!kvm_slot_can_be_private(slot)) { + pr_debug("%s: slot is not private, GFN: 0x%llx\n", __func__, gfn); + goto out; + } + + if (static_call(kvm_x86_fault_is_private)(kvm, gpa, err, &private_fault) == 1) + goto out; + + /* + * Handling below is for UPM self-tests and guests that use + * slot->shared_bitmap for encrypted access tracking. + */ + if (IS_ENABLED(CONFIG_HAVE_KVM_PRIVATE_MEM_TESTING)) + private_fault = kvm_mem_is_private(kvm, gpa >> PAGE_SHIFT); + +out: + pr_debug("%s: GFN: 0x%llx, private: %d\n", __func__, gfn, private_fault); + return private_fault; +} + /* * Return values of handle_mmio_page_fault(), mmu.page_fault(), fast_page_fault(), * and of course kvm_mmu_do_page_fault(). @@ -261,13 +293,13 @@ enum { }; static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, - u32 err, bool prefetch) + u64 err, bool prefetch) { bool is_tdp = likely(vcpu->arch.mmu->page_fault == kvm_tdp_page_fault); struct kvm_page_fault fault = { .addr = cr2_or_gpa, - .error_code = err, + .error_code = lower_32_bits(err), .exec = err & PFERR_FETCH_MASK, .write = err & PFERR_WRITE_MASK, .present = err & PFERR_PRESENT_MASK, @@ -281,8 +313,8 @@ static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, .max_level = KVM_MAX_HUGEPAGE_LEVEL, .req_level = PG_LEVEL_4K, .goal_level = PG_LEVEL_4K, - .is_private = IS_ENABLED(CONFIG_HAVE_KVM_PRIVATE_MEM_TESTING) && is_tdp && - kvm_mem_is_private(vcpu->kvm, cr2_or_gpa >> PAGE_SHIFT), + .is_private = is_tdp && kvm_mmu_fault_is_private(vcpu->kvm, + cr2_or_gpa, err), }; int r;
This callback is used by the KVM MMU to check whether a #NPF was or a private GPA or not. Signed-off-by: Michael Roth <michael.roth@amd.com> --- arch/x86/include/asm/kvm-x86-ops.h | 1 + arch/x86/include/asm/kvm_host.h | 1 + arch/x86/kvm/mmu/mmu.c | 3 +-- arch/x86/kvm/mmu/mmu_internal.h | 40 +++++++++++++++++++++++++++--- 4 files changed, 39 insertions(+), 6 deletions(-)