diff mbox series

[v2,16/17] khugepaged: Implement strict policy for mTHP collapse

Message ID 20250211111326.14295-17-dev.jain@arm.com (mailing list archive)
State New
Headers show
Series khugepaged: Asynchronous mTHP collapse | expand

Commit Message

Dev Jain Feb. 11, 2025, 11:13 a.m. UTC
As noted in the discussion thread ending at [1], avoid the creep problem by
collapsing to mTHPs only if max_ptes_none is zero or 511. Along with this,
make mTHP collapse conditions stricter by removing scaling of max_ptes_shared
and max_ptes_swap, and consider collapse only if there are no shared or
swap PTEs in the range.

[1] https://lore.kernel.org/all/8114d47b-b383-4d6e-ab65-a0e88b99c873@arm.com/

Signed-off-by: Dev Jain <dev.jain@arm.com>
---
 mm/khugepaged.c | 37 ++++++++++++++++++++++++++++++++-----
 1 file changed, 32 insertions(+), 5 deletions(-)
diff mbox series

Patch

diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index d2bb008b95e7..b589f889bb5a 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -417,6 +417,17 @@  static inline int hpage_collapse_test_exit_or_disable(struct mm_struct *mm)
 
 static bool thp_enabled(void)
 {
+	bool anon_pmd_enabled = (test_bit(PMD_ORDER, &huge_anon_orders_always) ||
+				 test_bit(PMD_ORDER, &huge_anon_orders_madvise) ||
+			         (test_bit(PMD_ORDER, &huge_anon_orders_inherit) &&
+			         hugepage_global_enabled()));
+
+	/*
+	 * If PMD_ORDER is ineligible for collapse, check if mTHP collapse policy is obeyed;
+	 * see Documentation/admin-guide/transhuge.rst
+	 */
+	bool anon_collapse_mthp = (khugepaged_max_ptes_none == 0 ||
+			      khugepaged_max_ptes_none == HPAGE_PMD_NR - 1);
 	/*
 	 * We cover the anon, shmem and the file-backed case here; file-backed
 	 * hugepages, when configured in, are determined by the global control.
@@ -427,8 +438,9 @@  static bool thp_enabled(void)
 	if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) &&
 	    hugepage_global_enabled())
 		return true;
-	if (huge_anon_orders_always || huge_anon_orders_madvise ||
-	    (huge_anon_orders_inherit && hugepage_global_enabled()))
+	if ((huge_anon_orders_always || huge_anon_orders_madvise ||
+	    (huge_anon_orders_inherit && hugepage_global_enabled())) &&
+	    (anon_pmd_enabled || anon_collapse_mthp))
 		return true;
 	if (IS_ENABLED(CONFIG_SHMEM) && shmem_hpage_pmd_enabled())
 		return true;
@@ -578,13 +590,16 @@  static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
 	pte_t *_pte;
 	int none_or_zero = 0, shared = 0, result = SCAN_FAIL, referenced = 0;
 	bool writable = false;
-	unsigned int max_ptes_shared = khugepaged_max_ptes_shared >> (HPAGE_PMD_ORDER - order);
+	unsigned int max_ptes_shared = khugepaged_max_ptes_shared;
 	unsigned int max_ptes_none = khugepaged_max_ptes_none >> (HPAGE_PMD_ORDER - order);
 	bool all_pfns_present = true;
 	bool all_pfns_contig = true;
 	bool first_pfn_aligned = true;
 	pte_t prev_pteval;
 
+	if (order != HPAGE_PMD_ORDER)
+		max_ptes_shared = 0;
+
 	for (_pte = pte; _pte < pte + (1UL << order);
 	     _pte++, address += PAGE_SIZE) {
 		pte_t pteval = ptep_get(_pte);
@@ -1453,11 +1468,16 @@  static int hpage_collapse_scan_pmd(struct mm_struct *mm,
 	order = highest_order(orders);
 	VM_BUG_ON(address & ((PAGE_SIZE << order) - 1));
 
+	max_ptes_none = khugepaged_max_ptes_none;
+	max_ptes_shared = khugepaged_max_ptes_shared;
+	max_ptes_swap = khugepaged_max_ptes_swap;
+
 scan_pte_range:
 
-	max_ptes_shared = khugepaged_max_ptes_shared >> (HPAGE_PMD_ORDER - order);
+	if (order != HPAGE_PMD_ORDER)
+		max_ptes_shared = max_ptes_swap = 0;
+
 	max_ptes_none = khugepaged_max_ptes_none >> (HPAGE_PMD_ORDER - order);
-	max_ptes_swap = khugepaged_max_ptes_swap >> (HPAGE_PMD_ORDER - order);
 	referenced = 0, shared = 0, none_or_zero = 0, unmapped = 0;
 	all_pfns_present = true, all_pfns_contig = true, first_pfn_aligned = true;
 
@@ -2651,6 +2671,11 @@  static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
 	int order;
 	bool is_file_vma;
 	int prev_progress = 0;
+	bool collapse_mthp = true;
+
+	/* Avoid the creep problem; see Documentation/admin-guide/transhuge.rst */
+	if (khugepaged_max_ptes_none && khugepaged_max_ptes_none != HPAGE_PMD_NR - 1)
+		collapse_mthp = false;
 
 	VM_BUG_ON(!pages);
 	lockdep_assert_held(&khugepaged_mm_lock);
@@ -2710,6 +2735,8 @@  static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
 			/* select the highest possible order for the VMA */
 			order = highest_order(orders);
 			while (orders) {
+				if (order != HPAGE_PMD_ORDER && !collapse_mthp)
+					goto skip;
 				hend = round_down(vma->vm_end, PAGE_SIZE << order);
 				if (khugepaged_scan.address <= hend)
 					break;