new file mode 100644
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _MIGRATE_DMA_H
+#define _MIGRATE_DMA_H
+#include <linux/migrate_mode.h>
+
+#define MIGRATOR_NAME_LEN 32
+struct migrator {
+ char name[MIGRATOR_NAME_LEN];
+ void (*migrate_dma)(struct list_head *dst_list, struct list_head *src_list);
+ bool (*can_migrate_dma)(struct folio *dst, struct folio *src);
+ struct rcu_head srcu_head;
+ struct module *owner;
+};
+
+extern struct migrator migrator;
+extern struct mutex migrator_mut;
+extern struct srcu_struct mig_srcu;
+
+#ifdef CONFIG_DMA_MIGRATION
+void srcu_mig_cb(struct rcu_head *head);
+void dma_update_migrator(struct migrator *mig);
+unsigned char *get_active_migrator_name(void);
+bool can_dma_migrate(struct folio *dst, struct folio *src);
+void start_offloading(struct migrator *migrator);
+void stop_offloading(void);
+#else
+static inline void srcu_mig_cb(struct rcu_head *head) { };
+static inline void dma_update_migrator(struct migrator *mig) { };
+static inline unsigned char *get_active_migrator_name(void) { return NULL; };
+static inline bool can_dma_migrate(struct folio *dst, struct folio *src) {return true; };
+static inline void start_offloading(struct migrator *migrator) { };
+static inline void stop_offloading(void) { };
+#endif /* CONFIG_DMA_MIGRATION */
+
+#endif /* _MIGRATE_DMA_H */
@@ -662,6 +662,14 @@ config MIGRATION
config DEVICE_MIGRATION
def_bool MIGRATION && ZONE_DEVICE
+config DMA_MIGRATION
+ bool "Migrate Pages offloading copy to DMA"
+ def_bool n
+ depends on MIGRATION
+ help
+ An interface allowing external modules or driver to offload
+ page copying in page migration.
+
config ARCH_ENABLE_HUGEPAGE_MIGRATION
bool
@@ -87,6 +87,7 @@ obj-$(CONFIG_FAILSLAB) += failslab.o
obj-$(CONFIG_FAIL_PAGE_ALLOC) += fail_page_alloc.o
obj-$(CONFIG_MEMTEST) += memtest.o
obj-$(CONFIG_MIGRATION) += migrate.o
+obj-$(CONFIG_DMA_MIGRATION) += migrate_dma.o
obj-$(CONFIG_NUMA) += memory-tiers.o
obj-$(CONFIG_DEVICE_MIGRATION) += migrate_device.o
obj-$(CONFIG_TRANSPARENT_HUGEPAGE) += huge_memory.o khugepaged.o
@@ -50,6 +50,7 @@
#include <linux/random.h>
#include <linux/sched/sysctl.h>
#include <linux/memory-tiers.h>
+#include <linux/migrate_dma.h>
#include <asm/tlbflush.h>
@@ -656,6 +657,37 @@ void folio_migrate_copy(struct folio *newfolio, struct folio *folio)
}
EXPORT_SYMBOL(folio_migrate_copy);
+DEFINE_STATIC_CALL(_folios_copy, folios_copy);
+DEFINE_STATIC_CALL(_can_dma_migrate, can_dma_migrate);
+
+#ifdef CONFIG_DMA_MIGRATION
+void srcu_mig_cb(struct rcu_head *head)
+{
+ static_call_query(_folios_copy);
+}
+
+void dma_update_migrator(struct migrator *mig)
+{
+ int index;
+
+ mutex_lock(&migrator_mut);
+ index = srcu_read_lock(&mig_srcu);
+ strscpy(migrator.name, mig ? mig->name : "kernel", MIGRATOR_NAME_LEN);
+ static_call_update(_folios_copy, mig ? mig->migrate_dma : folios_copy);
+ static_call_update(_can_dma_migrate, mig ? mig->can_migrate_dma : can_dma_migrate);
+ if (READ_ONCE(migrator.owner))
+ module_put(migrator.owner);
+ xchg(&migrator.owner, mig ? mig->owner : NULL);
+ if (READ_ONCE(migrator.owner))
+ try_module_get(migrator.owner);
+ srcu_read_unlock(&mig_srcu, index);
+ mutex_unlock(&migrator_mut);
+ call_srcu(&mig_srcu, &migrator.srcu_head, srcu_mig_cb);
+ srcu_barrier(&mig_srcu);
+}
+
+#endif /* CONFIG_DMA_MIGRATION */
+
/************************************************************
* Migration functions
***********************************************************/
@@ -1686,6 +1718,7 @@ static void migrate_folios_batch_move(struct list_head *src_folios,
struct anon_vma *anon_vma = NULL;
bool is_lru;
int is_thp = 0;
+ bool can_migrate = true;
struct migrate_folio_info *mig_info, *mig_info2;
LIST_HEAD(temp_src_folios);
LIST_HEAD(temp_dst_folios);
@@ -1720,7 +1753,10 @@ static void migrate_folios_batch_move(struct list_head *src_folios,
* This does everything except the page copy. The actual page copy
* is handled later in a batch manner.
*/
- if (likely(is_lru)) {
+ can_migrate = static_call(_can_dma_migrate)(dst, folio);
+ if (unlikely(!can_migrate))
+ rc = -EAGAIN;
+ else if (likely(is_lru)) {
struct address_space *mapping = folio_mapping(folio);
if (!mapping)
@@ -1786,7 +1822,7 @@ static void migrate_folios_batch_move(struct list_head *src_folios,
goto out;
/* Batch copy the folios */
- folios_copy(dst_folios, src_folios);
+ static_call(_folios_copy)(dst_folios, src_folios);
/*
* Iterate the folio lists to remove migration pte and restore them
new file mode 100644
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/migrate.h>
+#include <linux/migrate_dma.h>
+#include <linux/rculist.h>
+#include <linux/static_call.h>
+
+atomic_t dispatch_to_dma = ATOMIC_INIT(0);
+EXPORT_SYMBOL_GPL(dispatch_to_dma);
+
+DEFINE_MUTEX(migrator_mut);
+DEFINE_SRCU(mig_srcu);
+
+struct migrator migrator = {
+ .name = "kernel",
+ .migrate_dma = folios_copy,
+ .can_migrate_dma = can_dma_migrate,
+ .srcu_head.func = srcu_mig_cb,
+ .owner = NULL,
+};
+
+bool can_dma_migrate(struct folio *dst, struct folio *src)
+{
+ return true;
+}
+EXPORT_SYMBOL_GPL(can_dma_migrate);
+
+void start_offloading(struct migrator *m)
+{
+ int offloading = 0;
+
+ pr_info("starting migration offload by %s\n", m->name);
+ dma_update_migrator(m);
+ atomic_try_cmpxchg(&dispatch_to_dma, &offloading, 1);
+}
+EXPORT_SYMBOL_GPL(start_offloading);
+
+void stop_offloading(void)
+{
+ int offloading = 1;
+
+ pr_info("stopping migration offload by %s\n", migrator.name);
+ dma_update_migrator(NULL);
+ atomic_try_cmpxchg(&dispatch_to_dma, &offloading, 0);
+}
+EXPORT_SYMBOL_GPL(stop_offloading);
+
+unsigned char *get_active_migrator_name(void)
+{
+ return migrator.name;
+}
+EXPORT_SYMBOL_GPL(get_active_migrator_name);