Message ID | 20240711222755.57476-9-pbonzini@redhat.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | KVM: guest_memfd: lazy preparation of pages + prefault support for SEV-SNP | expand |
On Thu, Jul 11, 2024 at 06:27:51PM -0400, Paolo Bonzini wrote: > It is enough to return 0 if a guest need not do any preparation. > This is in fact how sev_gmem_prepare() works for non-SNP guests, > and it extends naturally to Intel hosts: the x86 callback for > gmem_prepare is optional and returns 0 if not defined. > > Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> > --- > arch/x86/kvm/x86.c | 5 ----- > include/linux/kvm_host.h | 1 - > virt/kvm/guest_memfd.c | 13 +++---------- > 3 files changed, 3 insertions(+), 16 deletions(-) > > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c > index a1c85591f92c..4f58423c6148 100644 > --- a/arch/x86/kvm/x86.c > +++ b/arch/x86/kvm/x86.c > @@ -13604,11 +13604,6 @@ bool kvm_arch_no_poll(struct kvm_vcpu *vcpu) > EXPORT_SYMBOL_GPL(kvm_arch_no_poll); > > #ifdef CONFIG_HAVE_KVM_ARCH_GMEM_PREPARE > -bool kvm_arch_gmem_prepare_needed(struct kvm *kvm) > -{ > - return kvm->arch.vm_type == KVM_X86_SNP_VM; > -} > - > int kvm_arch_gmem_prepare(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, int max_order) > { > return static_call(kvm_x86_gmem_prepare)(kvm, pfn, gfn, max_order); > diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h > index eb8404e9aa03..f6e11991442d 100644 > --- a/include/linux/kvm_host.h > +++ b/include/linux/kvm_host.h > @@ -2443,7 +2443,6 @@ static inline int kvm_gmem_get_pfn(struct kvm *kvm, > > #ifdef CONFIG_HAVE_KVM_ARCH_GMEM_PREPARE > int kvm_arch_gmem_prepare(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, int max_order); > -bool kvm_arch_gmem_prepare_needed(struct kvm *kvm); > #endif > > /** > diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c > index f4d82719ec19..509360eefea5 100644 > --- a/virt/kvm/guest_memfd.c > +++ b/virt/kvm/guest_memfd.c > @@ -29,16 +29,9 @@ static int __kvm_gmem_prepare_folio(struct kvm *kvm, struct kvm_memory_slot *slo > pgoff_t index, struct folio *folio) > { > #ifdef CONFIG_HAVE_KVM_ARCH_GMEM_PREPARE > - kvm_pfn_t pfn; > - gfn_t gfn; > - int rc; > - > - if (!kvm_arch_gmem_prepare_needed(kvm)) > - return 0; > - > - pfn = folio_file_pfn(folio, index); > - gfn = slot->base_gfn + index - slot->gmem.pgoff; > - rc = kvm_arch_gmem_prepare(kvm, gfn, pfn, folio_order(folio)); > + kvm_pfn_t pfn = folio_file_pfn(folio, index); > + gfn_t gfn = slot->base_gfn + index - slot->gmem.pgoff; > + int rc = kvm_arch_gmem_prepare(kvm, gfn, pfn, folio_order(folio)); Looks like this hunk was meant to be part of a different patch. Otherwise: Reviewed-by: Michael Roth <michael.roth@amd.com> -Mike > if (rc) { > pr_warn_ratelimited("gmem: Failed to prepare folio for index %lx GFN %llx PFN %llx error %d.\n", > index, gfn, pfn, rc); > -- > 2.43.0 > > >
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index a1c85591f92c..4f58423c6148 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -13604,11 +13604,6 @@ bool kvm_arch_no_poll(struct kvm_vcpu *vcpu) EXPORT_SYMBOL_GPL(kvm_arch_no_poll); #ifdef CONFIG_HAVE_KVM_ARCH_GMEM_PREPARE -bool kvm_arch_gmem_prepare_needed(struct kvm *kvm) -{ - return kvm->arch.vm_type == KVM_X86_SNP_VM; -} - int kvm_arch_gmem_prepare(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, int max_order) { return static_call(kvm_x86_gmem_prepare)(kvm, pfn, gfn, max_order); diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index eb8404e9aa03..f6e11991442d 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -2443,7 +2443,6 @@ static inline int kvm_gmem_get_pfn(struct kvm *kvm, #ifdef CONFIG_HAVE_KVM_ARCH_GMEM_PREPARE int kvm_arch_gmem_prepare(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, int max_order); -bool kvm_arch_gmem_prepare_needed(struct kvm *kvm); #endif /** diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c index f4d82719ec19..509360eefea5 100644 --- a/virt/kvm/guest_memfd.c +++ b/virt/kvm/guest_memfd.c @@ -29,16 +29,9 @@ static int __kvm_gmem_prepare_folio(struct kvm *kvm, struct kvm_memory_slot *slo pgoff_t index, struct folio *folio) { #ifdef CONFIG_HAVE_KVM_ARCH_GMEM_PREPARE - kvm_pfn_t pfn; - gfn_t gfn; - int rc; - - if (!kvm_arch_gmem_prepare_needed(kvm)) - return 0; - - pfn = folio_file_pfn(folio, index); - gfn = slot->base_gfn + index - slot->gmem.pgoff; - rc = kvm_arch_gmem_prepare(kvm, gfn, pfn, folio_order(folio)); + kvm_pfn_t pfn = folio_file_pfn(folio, index); + gfn_t gfn = slot->base_gfn + index - slot->gmem.pgoff; + int rc = kvm_arch_gmem_prepare(kvm, gfn, pfn, folio_order(folio)); if (rc) { pr_warn_ratelimited("gmem: Failed to prepare folio for index %lx GFN %llx PFN %llx error %d.\n", index, gfn, pfn, rc);
It is enough to return 0 if a guest need not do any preparation. This is in fact how sev_gmem_prepare() works for non-SNP guests, and it extends naturally to Intel hosts: the x86 callback for gmem_prepare is optional and returns 0 if not defined. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> --- arch/x86/kvm/x86.c | 5 ----- include/linux/kvm_host.h | 1 - virt/kvm/guest_memfd.c | 13 +++---------- 3 files changed, 3 insertions(+), 16 deletions(-)