diff mbox series

[RFC,v2,09/17] mm: Update wp_page_reuse() to operate on range of pages

Message ID 20230414130303.2345383-10-ryan.roberts@arm.com (mailing list archive)
State New, archived
Headers show
Series variable-order, large folios for anonymous memory | expand

Commit Message

Ryan Roberts April 14, 2023, 1:02 p.m. UTC
We will shortly be updating do_wp_page() to be able to reuse a range of
pages from a large anon folio. As an enabling step, modify
wp_page_reuse() to operate on a range of pages, if a struct
anon_folio_range is passed in. Batching in this way allows us to batch
up the cache maintenance and event counting for small performance
improvements.

Currently all callsites pass range=NULL, so no functional changes
intended.

Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
---
 mm/memory.c | 80 +++++++++++++++++++++++++++++++++++++++--------------
 1 file changed, 60 insertions(+), 20 deletions(-)

--
2.25.1
diff mbox series

Patch

diff --git a/mm/memory.c b/mm/memory.c
index f92a28064596..83835ff5a818 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3030,6 +3030,14 @@  static inline int max_anon_folio_order(struct vm_area_struct *vma)
 	return ANON_FOLIO_ORDER_MAX;
 }

+struct anon_folio_range {
+	unsigned long va_start;
+	pte_t *pte_start;
+	struct page *pg_start;
+	int nr;
+	bool exclusive;
+};
+
 /*
  * Returns index of first pte that is not none, or nr if all are none.
  */
@@ -3122,31 +3130,63 @@  static int calc_anon_folio_order_alloc(struct vm_fault *vmf, int order)
  * case, all we need to do here is to mark the page as writable and update
  * any related book-keeping.
  */
-static inline void wp_page_reuse(struct vm_fault *vmf)
+static inline void wp_page_reuse(struct vm_fault *vmf,
+					struct anon_folio_range *range)
 	__releases(vmf->ptl)
 {
 	struct vm_area_struct *vma = vmf->vma;
-	struct page *page = vmf->page;
+	unsigned long addr;
+	pte_t *pte;
+	struct page *page;
+	int nr;
 	pte_t entry;
+	int change = 0;
+	int i;

 	VM_BUG_ON(!(vmf->flags & FAULT_FLAG_WRITE));
-	VM_BUG_ON(page && PageAnon(page) && !PageAnonExclusive(page));

-	/*
-	 * Clear the pages cpupid information as the existing
-	 * information potentially belongs to a now completely
-	 * unrelated process.
-	 */
-	if (page)
-		page_cpupid_xchg_last(page, (1 << LAST_CPUPID_SHIFT) - 1);
+	if (range) {
+		addr = range->va_start;
+		pte = range->pte_start;
+		page = range->pg_start;
+		nr = range->nr;
+	} else {
+		addr = vmf->address;
+		pte = vmf->pte;
+		page = vmf->page;
+		nr = 1;
+	}
+
+	if (page) {
+		for (i = 0; i < nr; i++, page++) {
+			VM_BUG_ON(PageAnon(page) && !PageAnonExclusive(page));
+
+			/*
+			 * Clear the pages cpupid information as the existing
+			 * information potentially belongs to a now completely
+			 * unrelated process.
+			 */
+			page_cpupid_xchg_last(page,
+					(1 << LAST_CPUPID_SHIFT) - 1);
+		}
+	}
+
+	flush_cache_range(vma, addr, addr + (nr << PAGE_SHIFT));
+
+	for (i = 0; i < nr; i++) {
+		entry = pte_mkyoung(pte[i]);
+		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
+		change |= ptep_set_access_flags(vma,
+					addr + (i << PAGE_SHIFT),
+					pte + i,
+					entry, 1);
+	}
+
+	if (change)
+		update_mmu_cache_range(vma, addr, pte, nr);

-	flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
-	entry = pte_mkyoung(vmf->orig_pte);
-	entry = maybe_mkwrite(pte_mkdirty(entry), vma);
-	if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1))
-		update_mmu_cache(vma, vmf->address, vmf->pte);
 	pte_unmap_unlock(vmf->pte, vmf->ptl);
-	count_vm_event(PGREUSE);
+	count_vm_events(PGREUSE, nr);
 }

 /*
@@ -3359,7 +3399,7 @@  vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf)
 		pte_unmap_unlock(vmf->pte, vmf->ptl);
 		return VM_FAULT_NOPAGE;
 	}
-	wp_page_reuse(vmf);
+	wp_page_reuse(vmf, NULL);
 	return 0;
 }

@@ -3381,7 +3421,7 @@  static vm_fault_t wp_pfn_shared(struct vm_fault *vmf)
 			return ret;
 		return finish_mkwrite_fault(vmf);
 	}
-	wp_page_reuse(vmf);
+	wp_page_reuse(vmf, NULL);
 	return 0;
 }

@@ -3410,7 +3450,7 @@  static vm_fault_t wp_page_shared(struct vm_fault *vmf)
 			return tmp;
 		}
 	} else {
-		wp_page_reuse(vmf);
+		wp_page_reuse(vmf, NULL);
 		lock_page(vmf->page);
 	}
 	ret |= fault_dirty_shared_page(vmf);
@@ -3534,7 +3574,7 @@  static vm_fault_t do_wp_page(struct vm_fault *vmf)
 			pte_unmap_unlock(vmf->pte, vmf->ptl);
 			return 0;
 		}
-		wp_page_reuse(vmf);
+		wp_page_reuse(vmf, NULL);
 		return 0;
 	}
 copy: