diff mbox series

[v3,3/4] huge_page: allow arch override for folio_zero_user()

Message ID 20250414034607.762653-4-ankur.a.arora@oracle.com (mailing list archive)
State New
Headers show
Series mm/folio_zero_user: add multi-page clearing | expand

Commit Message

Ankur Arora April 14, 2025, 3:46 a.m. UTC
folio_zero_user() is constrained to operate in a page-at-a-time fashion
because it needs to handle the CONFIG_HIGHMEM case. Additionally,
cooperative preemption models (none, voluntary) need regular
invocations of cond_resched() which limits the chunk size when zeroing.

Move the page-at-a-time handling to __folio_zero_user(). And allow an
architecture specific override. Note that when running under
CONFIG_PREEMPT_DYNAMIC, we could switch between cooperative and
preemptible models at runtime, falling back to __folio_zero_user() in
the first case.

Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Ankur Arora <ankur.a.arora@oracle.com>
---
 mm/memory.c | 38 +++++++++++++++++++++++++++++++++-----
 1 file changed, 33 insertions(+), 5 deletions(-)
diff mbox series

Patch

diff --git a/mm/memory.c b/mm/memory.c
index 2d8c265fc7d6..ac6a19d7bdf4 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -7235,6 +7235,32 @@  static int clear_subpage(unsigned long addr, int idx, void *arg)
 	return 0;
 }
 
+/*
+ * __folio_zero_user - page-at-a-time zeroing.
+ *
+ * Handle cases where we have nothing better available. This could be
+ * for a few reasons:
+ *
+ *   - the architecture does not support multi-page zeroing (no override
+ *     for folio_zero_user_preemptible()): because there might be no
+ *     optimized zeroing primitive, or because CONFIG_HIGHMEM is supported.
+ *
+ *   - !preempt_model_preemptible(): need to call cond_resched()
+ *     periodically to provide reasonable latency.
+ */
+static void __folio_zero_user(struct folio *folio, unsigned long addr_hint)
+{
+	unsigned int nr_pages = folio_nr_pages(folio);
+
+	if (unlikely(nr_pages > MAX_ORDER_NR_PAGES))
+		clear_gigantic_page(folio, addr_hint, nr_pages);
+	else
+		process_huge_page(addr_hint, nr_pages, clear_subpage, folio);
+}
+
+void __weak folio_zero_user_preemptible(struct folio *, unsigned long)
+	__alias(__folio_zero_user);
+
 /**
  * folio_zero_user - Zero a folio which will be mapped to userspace.
  * @folio: The folio to zero.
@@ -7242,12 +7268,14 @@  static int clear_subpage(unsigned long addr, int idx, void *arg)
  */
 void folio_zero_user(struct folio *folio, unsigned long addr_hint)
 {
-	unsigned int nr_pages = folio_nr_pages(folio);
-
-	if (unlikely(nr_pages > MAX_ORDER_NR_PAGES))
-		clear_gigantic_page(folio, addr_hint, nr_pages);
+	/*
+	 * Use the arch optimized version if we are preemptible and can
+	 * do zeroing of extended extents without worrying about latency.
+	 */
+	if (preempt_model_preemptible())
+		folio_zero_user_preemptible(folio, addr_hint);
 	else
-		process_huge_page(addr_hint, nr_pages, clear_subpage, folio);
+		__folio_zero_user(folio, addr_hint);
 }
 
 static int copy_user_gigantic_page(struct folio *dst, struct folio *src,