diff mbox series

[RFC,3/6] mm: Introduce try_vma_alloc_zeroed_movable_folio()

Message ID 20230317105802.2634004-4-ryan.roberts@arm.com (mailing list archive)
State New, archived
Headers show
Series variable-order, large folios for anonymous memory | expand

Commit Message

Ryan Roberts March 17, 2023, 10:57 a.m. UTC
Like vma_alloc_zeroed_movable_folio(), except it will opportunistically
attempt to allocate high-order folios, retrying with lower orders all
the way to order-0, until success. The user must check what they got
with folio_order().

This will be used to oportunistically allocate large folios for
anonymous memory with a sensible fallback under pressure.

For attempts to allocate non-0 orders, we set __GFP_NORETRY to prevent
high latency due to reclaim, instead preferring to just try for a lower
order. The same approach is used by the readahead code when allocating
large folios.

Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
---
 mm/memory.c | 27 ++++++++++++++++++++++++---
 1 file changed, 24 insertions(+), 3 deletions(-)

--
2.25.1
diff mbox series

Patch

diff --git a/mm/memory.c b/mm/memory.c
index 8798da968686..c9e09415ee18 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3024,6 +3024,27 @@  static inline void wp_page_reuse(struct vm_fault *vmf)
 	count_vm_event(PGREUSE);
 }

+/*
+ * Opportunistically attempt to allocate high-order folios, retrying with lower
+ * orders all the way to order-0, until success. The user must check what they
+ * got with folio_order().
+ */
+static struct folio *try_vma_alloc_zeroed_movable_folio(
+						struct vm_area_struct *vma,
+						unsigned long vaddr, int order)
+{
+	struct folio *folio;
+	gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN;
+
+	for (; order > 0; order--) {
+		folio = vma_alloc_zeroed_movable_folio(vma, vaddr, gfp, order);
+		if (folio)
+			return folio;
+	}
+
+	return vma_alloc_zeroed_movable_folio(vma, vaddr, 0, 0);
+}
+
 /*
  * Handle the case of a page which we actually need to copy to a new page,
  * either due to COW or unsharing.
@@ -3061,8 +3082,8 @@  static vm_fault_t wp_page_copy(struct vm_fault *vmf)
 		goto oom;

 	if (is_zero_pfn(pte_pfn(vmf->orig_pte))) {
-		new_folio = vma_alloc_zeroed_movable_folio(vma, vmf->address,
-									0, 0);
+		new_folio = try_vma_alloc_zeroed_movable_folio(vma,
+							vmf->address, 0);
 		if (!new_folio)
 			goto oom;
 	} else {
@@ -4050,7 +4071,7 @@  static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
 	/* Allocate our own private page. */
 	if (unlikely(anon_vma_prepare(vma)))
 		goto oom;
-	folio = vma_alloc_zeroed_movable_folio(vma, vmf->address, 0, 0);
+	folio = try_vma_alloc_zeroed_movable_folio(vma, vmf->address, 0);
 	if (!folio)
 		goto oom;