@@ -535,6 +535,7 @@ int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
gfn_t gfn, kvm_pfn_t *pfn, int *max_order)
{
pgoff_t index = gfn - slot->base_gfn + slot->gmem.pgoff;
+ pgoff_t huge_index;
struct kvm_gmem *gmem;
struct folio *folio;
struct page *page;
@@ -574,13 +575,12 @@ int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
goto success;
/*
- * For simplicity, allow mapping a hugepage if and only if the entire
- * binding is compatible, i.e. don't bother supporting mapping interior
- * sub-ranges with hugepages (unless userspace comes up with a *really*
- * strong use case for needing hugepages within unaligned bindings).
+ * Only report the true order of the backing folio if it is fully
+ * contained by the range this GFN's memslot is bound to.
*/
- if (!IS_ALIGNED(slot->gmem.pgoff, 1ull << *max_order) ||
- !IS_ALIGNED(slot->npages, 1ull << *max_order))
+ huge_index = ALIGN(index, 1ull << *max_order);
+ if (huge_index < ALIGN(slot->gmem.pgoff, 1ull << *max_order) ||
+ huge_index + (1ull << *max_order) > slot->gmem.pgoff + slot->npages)
*max_order = 0;
success:
r = 0;
Rather than requiring an entire memslot's gmem binding to be hugepage-aligned to make use of hugepages, relax the check to simply ensure that a large folio is completely contained by the range the memslot is bound to. Otherwise, userspace components like QEMU may inadvertantly disable the use of hugepages depending on how they handle splitting up regions of guest memory for legacy regions, ROMs, etc. Cc: Sean Christopherson <seanjc@google.com> Signed-off-by: Michael Roth <michael.roth@amd.com> --- virt/kvm/guest_memfd.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-)