Message ID | 20240726235234.228822-46-seanjc@google.com (mailing list archive) |
---|---|
State | Superseded |
Headers | show |
Series | KVM: Stop grabbing references to PFNMAP'd pages | expand |
On 7/27/24 01:51, Sean Christopherson wrote: > Provide the "struct page" associated with a guest_memfd pfn as an output > from __kvm_gmem_get_pfn() so that KVM guest page fault handlers can ^^^^^^^^^^^^^^^^^^^^ Just "kvm_gmem_get_pfn()". > directly put the page instead of having to rely on > kvm_pfn_to_refcounted_page(). This will conflict with my series, where I'm introducing folio_file_pfn() and using it here: > - page = folio_file_page(folio, index); > + *page = folio_file_page(folio, index); > > - *pfn = page_to_pfn(page); > + *pfn = page_to_pfn(*page); > if (max_order) > *max_order = 0; That said, I think it's better to turn kvm_gmem_get_pfn() into kvm_gmem_get_page() here, and pull the page_to_pfn() or page_to_phys() to the caller as applicable. This highlights that the caller always gets a refcounted page with guest_memfd. diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 901be9e420a4..bcc4a4c594ef 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -4348,13 +4348,14 @@ static int kvm_faultin_pfn_private(struct kvm_vcpu *vcpu, return -EFAULT; } - r = kvm_gmem_get_pfn(vcpu->kvm, fault->slot, fault->gfn, &fault->pfn, + r = kvm_gmem_get_page(vcpu->kvm, fault->slot, fault->gfn, &fault->refcounted_page, &max_order); if (r) { kvm_mmu_prepare_memory_fault_exit(vcpu, fault); return r; } + fault->pfn = page_to_pfn(page); fault->map_writable = !(fault->slot->flags & KVM_MEM_READONLY); fault->max_level = kvm_max_private_mapping_level(vcpu->kvm, fault->pfn, fault->max_level, max_order); diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index a16c873b3232..db4181d11f2e 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -3847,7 +3847,7 @@ static int __sev_snp_update_protected_guest_state(struct kvm_vcpu *vcpu) if (VALID_PAGE(svm->sev_es.snp_vmsa_gpa)) { gfn_t gfn = gpa_to_gfn(svm->sev_es.snp_vmsa_gpa); struct kvm_memory_slot *slot; - kvm_pfn_t pfn; + struct page *page; slot = gfn_to_memslot(vcpu->kvm, gfn); if (!slot) @@ -3857,7 +3857,7 @@ static int __sev_snp_update_protected_guest_state(struct kvm_vcpu *vcpu) * The new VMSA will be private memory guest memory, so * retrieve the PFN from the gmem backend. */ - if (kvm_gmem_get_pfn(vcpu->kvm, slot, gfn, &pfn, NULL)) + if (kvm_gmem_get_page(vcpu->kvm, slot, gfn, &page, NULL)) return -EINVAL; /* @@ -3873,7 +3873,7 @@ static int __sev_snp_update_protected_guest_state(struct kvm_vcpu *vcpu) svm->sev_es.snp_has_guest_vmsa = true; /* Use the new VMSA */ - svm->vmcb->control.vmsa_pa = pfn_to_hpa(pfn); + svm->vmcb->control.vmsa_pa = page_to_phys(page); /* Mark the vCPU as runnable */ vcpu->arch.pv.pv_unhalted = false; @@ -3886,7 +3886,7 @@ static int __sev_snp_update_protected_guest_state(struct kvm_vcpu *vcpu) * changes then care should be taken to ensure * svm->sev_es.vmsa is pinned through some other means. */ - kvm_release_pfn_clean(pfn); + kvm_release_page_clean(page); } /* @@ -4687,6 +4687,7 @@ void sev_handle_rmp_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code) struct kvm *kvm = vcpu->kvm; int order, rmp_level, ret; bool assigned; + struct page *page; kvm_pfn_t pfn; gfn_t gfn; @@ -4712,13 +4713,14 @@ void sev_handle_rmp_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code) return; } - ret = kvm_gmem_get_pfn(kvm, slot, gfn, &pfn, &order); + ret = kvm_gmem_get_page(kvm, slot, gfn, &page, &order); if (ret) { pr_warn_ratelimited("SEV: Unexpected RMP fault, no backing page for private GPA 0x%llx\n", gpa); return; } + pfn = page_to_pfn(page); ret = snp_lookup_rmpentry(pfn, &assigned, &rmp_level); if (ret || !assigned) { pr_warn_ratelimited("SEV: Unexpected RMP fault, no assigned RMP entry found for GPA 0x%llx PFN 0x%llx error %d\n", @@ -4770,7 +4772,7 @@ void sev_handle_rmp_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code) out: trace_kvm_rmp_fault(vcpu, gpa, pfn, error_code, rmp_level, ret); out_no_trace: - put_page(pfn_to_page(pfn)); + kvm_release_page_unused(page); } static bool is_pfn_range_shared(kvm_pfn_t start, kvm_pfn_t end) And the change in virt/kvm/guest_memfd.c then is just as trivial, apart from all the renaming: - *pfn = folio_file_pfn(folio, index); + *page = folio_file_page(folio, index); Paolo
On Tue, Jul 30, 2024, Paolo Bonzini wrote: > On 7/27/24 01:51, Sean Christopherson wrote: > > Provide the "struct page" associated with a guest_memfd pfn as an output > > from __kvm_gmem_get_pfn() so that KVM guest page fault handlers can > ^^^^^^^^^^^^^^^^^^^^ > > Just "kvm_gmem_get_pfn()". > > > directly put the page instead of having to rely on > > kvm_pfn_to_refcounted_page(). > > This will conflict with my series, where I'm introducing > folio_file_pfn() and using it here: > > - page = folio_file_page(folio, index); > > + *page = folio_file_page(folio, index); > > - *pfn = page_to_pfn(page); > > + *pfn = page_to_pfn(*page); > > if (max_order) > > *max_order = 0; > > That said, I think it's better to turn kvm_gmem_get_pfn() into > kvm_gmem_get_page() here, and pull the page_to_pfn() or page_to_phys() > to the caller as applicable. This highlights that the caller always > gets a refcounted page with guest_memfd. I have mixed feelings on this. On one hand, it's silly/confusing to return a pfn+page pair and thus imply that guest_memfd can return a pfn without a page. On the other hand, if guest_memfd does ever serve pfns without a struct page, it could be quite painful to unwind all of the arch arch code we'll accrue that assumes guest_memfd only ever returns a refcounted page (as evidenced by this series). The probability of guest_memfd not having struct page for mapped pfns is likely very low, but at the same time, providing a pfn+page pair doesn't cost us much. And if it turns out that not having struct page is nonsensical, deferring the kvm_gmem_get_pfn() => kvm_gmem_get_page() conversion could be annoying, but highly unlikely to be painful since it should be 100% mechanical. Whereas reverting back to kvm_gmem_get_pfn() if we make the wrong decision now could mean doing surgery on a pile of arch code.
On 7/30/24 22:00, Sean Christopherson wrote: > The probability of guest_memfd not having struct page for mapped pfns is likely > very low, but at the same time, providing a pfn+page pair doesn't cost us much. > And if it turns out that not having struct page is nonsensical, deferring the > kvm_gmem_get_pfn() => kvm_gmem_get_page() conversion could be annoying, but highly > unlikely to be painful since it should be 100% mechanical. Whereas reverting back > to kvm_gmem_get_pfn() if we make the wrong decision now could mean doing surgery > on a pile of arch code. Ok, fair enough. The conflict resolution is trivial either way (I also checked the TDX series and miraculously it has only one conflict which is also trivial). Paolo
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 53555ea5e5bb..146e57c9c86d 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -4353,7 +4353,7 @@ static int kvm_mmu_faultin_pfn_private(struct kvm_vcpu *vcpu, } r = kvm_gmem_get_pfn(vcpu->kvm, fault->slot, fault->gfn, &fault->pfn, - &max_order); + &fault->refcounted_page, &max_order); if (r) { kvm_mmu_prepare_memory_fault_exit(vcpu, fault); return r; diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 62f63fd714df..5c125e4c1096 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -3847,6 +3847,7 @@ static int __sev_snp_update_protected_guest_state(struct kvm_vcpu *vcpu) if (VALID_PAGE(svm->sev_es.snp_vmsa_gpa)) { gfn_t gfn = gpa_to_gfn(svm->sev_es.snp_vmsa_gpa); struct kvm_memory_slot *slot; + struct page *page; kvm_pfn_t pfn; slot = gfn_to_memslot(vcpu->kvm, gfn); @@ -3857,7 +3858,7 @@ static int __sev_snp_update_protected_guest_state(struct kvm_vcpu *vcpu) * The new VMSA will be private memory guest memory, so * retrieve the PFN from the gmem backend. */ - if (kvm_gmem_get_pfn(vcpu->kvm, slot, gfn, &pfn, NULL)) + if (kvm_gmem_get_pfn(vcpu->kvm, slot, gfn, &pfn, &page, NULL)) return -EINVAL; /* @@ -3886,7 +3887,7 @@ static int __sev_snp_update_protected_guest_state(struct kvm_vcpu *vcpu) * changes then care should be taken to ensure * svm->sev_es.vmsa is pinned through some other means. */ - kvm_release_pfn_clean(pfn); + kvm_release_page_clean(page); } /* @@ -4686,6 +4687,7 @@ void sev_handle_rmp_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code) struct kvm_memory_slot *slot; struct kvm *kvm = vcpu->kvm; int order, rmp_level, ret; + struct page *page; bool assigned; kvm_pfn_t pfn; gfn_t gfn; @@ -4712,7 +4714,7 @@ void sev_handle_rmp_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code) return; } - ret = kvm_gmem_get_pfn(kvm, slot, gfn, &pfn, &order); + ret = kvm_gmem_get_pfn(kvm, slot, gfn, &pfn, &page, &order); if (ret) { pr_warn_ratelimited("SEV: Unexpected RMP fault, no backing page for private GPA 0x%llx\n", gpa); @@ -4770,7 +4772,7 @@ void sev_handle_rmp_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code) out: trace_kvm_rmp_fault(vcpu, gpa, pfn, error_code, rmp_level, ret); out_no_trace: - put_page(pfn_to_page(pfn)); + kvm_release_page_unused(page); } static bool is_pfn_range_shared(kvm_pfn_t start, kvm_pfn_t end) diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index e0548ae92659..9d2a97eb30e4 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -2462,11 +2462,13 @@ static inline bool kvm_mem_is_private(struct kvm *kvm, gfn_t gfn) #ifdef CONFIG_KVM_PRIVATE_MEM int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot, - gfn_t gfn, kvm_pfn_t *pfn, int *max_order); + gfn_t gfn, kvm_pfn_t *pfn, struct page **page, + int *max_order); #else static inline int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn, - kvm_pfn_t *pfn, int *max_order) + kvm_pfn_t *pfn, struct page **page, + int *max_order) { KVM_BUG_ON(1, kvm); return -EIO; diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c index 1c509c351261..ad1f9e73cd13 100644 --- a/virt/kvm/guest_memfd.c +++ b/virt/kvm/guest_memfd.c @@ -542,12 +542,12 @@ void kvm_gmem_unbind(struct kvm_memory_slot *slot) } static int __kvm_gmem_get_pfn(struct file *file, struct kvm_memory_slot *slot, - gfn_t gfn, kvm_pfn_t *pfn, int *max_order, bool prepare) + gfn_t gfn, kvm_pfn_t *pfn, struct page **page, + int *max_order, bool prepare) { pgoff_t index = gfn - slot->base_gfn + slot->gmem.pgoff; struct kvm_gmem *gmem = file->private_data; struct folio *folio; - struct page *page; int r; if (file != slot->gmem.file) { @@ -571,9 +571,9 @@ static int __kvm_gmem_get_pfn(struct file *file, struct kvm_memory_slot *slot, return -EHWPOISON; } - page = folio_file_page(folio, index); + *page = folio_file_page(folio, index); - *pfn = page_to_pfn(page); + *pfn = page_to_pfn(*page); if (max_order) *max_order = 0; @@ -585,7 +585,8 @@ static int __kvm_gmem_get_pfn(struct file *file, struct kvm_memory_slot *slot, } int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot, - gfn_t gfn, kvm_pfn_t *pfn, int *max_order) + gfn_t gfn, kvm_pfn_t *pfn, struct page **page, + int *max_order) { struct file *file = kvm_gmem_get_file(slot); int r; @@ -593,7 +594,7 @@ int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot, if (!file) return -EFAULT; - r = __kvm_gmem_get_pfn(file, slot, gfn, pfn, max_order, true); + r = __kvm_gmem_get_pfn(file, slot, gfn, pfn, page, max_order, true); fput(file); return r; } @@ -604,6 +605,7 @@ long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long { struct file *file; struct kvm_memory_slot *slot; + struct page *page; void __user *p; int ret = 0, max_order; @@ -633,7 +635,8 @@ long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long break; } - ret = __kvm_gmem_get_pfn(file, slot, gfn, &pfn, &max_order, false); + ret = __kvm_gmem_get_pfn(file, slot, gfn, &pfn, &page, + &max_order, false); if (ret) break; @@ -644,7 +647,7 @@ long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long p = src ? src + i * PAGE_SIZE : NULL; ret = post_populate(kvm, gfn, pfn, p, max_order, opaque); - put_page(pfn_to_page(pfn)); + put_page(page); if (ret) break; }
Provide the "struct page" associated with a guest_memfd pfn as an output from __kvm_gmem_get_pfn() so that KVM guest page fault handlers can directly put the page instead of having to rely on kvm_pfn_to_refcounted_page(). Signed-off-by: Sean Christopherson <seanjc@google.com> --- arch/x86/kvm/mmu/mmu.c | 2 +- arch/x86/kvm/svm/sev.c | 10 ++++++---- include/linux/kvm_host.h | 6 ++++-- virt/kvm/guest_memfd.c | 19 +++++++++++-------- 4 files changed, 22 insertions(+), 15 deletions(-)