diff mbox series

[RFC,V2,3/9] mm: batch folio copying during migration

Message ID 20250319192211.10092-4-shivankg@amd.com (mailing list archive)
State New
Headers show
Series Enhancements to Page Migration with Multi-threading and Batch Offloading to DMA | expand

Commit Message

Shivank Garg March 19, 2025, 7:22 p.m. UTC
Introduce the folios_copy() and folios_mc_copy() to copy the folio content
from the list of src folios to the list of dst folios.

This is preparatory patch for batch page migration offloading.

Signed-off-by: Shivank Garg <shivankg@amd.com>
---
 include/linux/mm.h |  4 ++++
 mm/util.c          | 41 +++++++++++++++++++++++++++++++++++++++++
 2 files changed, 45 insertions(+)
diff mbox series

Patch

diff --git a/include/linux/mm.h b/include/linux/mm.h
index 8483e09aeb2c..612cba3d3dac 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1301,7 +1301,11 @@  void __folio_put(struct folio *folio);
 
 void split_page(struct page *page, unsigned int order);
 void folio_copy(struct folio *dst, struct folio *src);
+void folios_copy(struct list_head *dst_list, struct list_head *src_list,
+		 int __maybe_unused folios_cnt);
 int folio_mc_copy(struct folio *dst, struct folio *src);
+int folios_mc_copy(struct list_head *dst_list, struct list_head *src_list,
+		 int __maybe_unused folios_cnt);
 
 unsigned long nr_free_buffer_pages(void);
 
diff --git a/mm/util.c b/mm/util.c
index 8c965474d329..5d00d4c5b2dd 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -908,6 +908,47 @@  int folio_mc_copy(struct folio *dst, struct folio *src)
 }
 EXPORT_SYMBOL(folio_mc_copy);
 
+/**
+ * folios_copy - Copy the contents of list of folios.
+ * @dst_list: Folios to copy to.
+ * @src_list: Folios to copy from.
+ *
+ * The folio contents are copied from @src_list to @dst_list.
+ * Assume the caller has validated that lists are not empty and both lists
+ * have equal number of folios. This may sleep.
+ */
+void folios_copy(struct list_head *dst_list, struct list_head *src_list,
+		 int __maybe_unused folios_cnt)
+{
+	struct folio *src, *dst;
+
+	dst = list_first_entry(dst_list, struct folio, lru);
+	list_for_each_entry(src, src_list, lru) {
+		cond_resched();
+		folio_copy(dst, src);
+		dst = list_next_entry(dst, lru);
+	}
+}
+
+int folios_mc_copy(struct list_head *dst_list, struct list_head *src_list,
+		 int __maybe_unused folios_cnt)
+{
+	struct folio *src, *dst;
+	int ret;
+
+	dst = list_first_entry(dst_list, struct folio, lru);
+	list_for_each_entry(src, src_list, lru) {
+		cond_resched();
+		ret = folio_mc_copy(dst, src);
+		if (ret)
+			return ret;
+		dst = list_next_entry(dst, lru);
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(folios_mc_copy);
+
 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;
 int sysctl_overcommit_ratio __read_mostly = 50;
 unsigned long sysctl_overcommit_kbytes __read_mostly;