@@ -78,4 +78,10 @@ static inline int dpt_map_module(struct dpt *dpt, char *module_name)
(dpt_map(dpt, THIS_MODULE->core_layout.base, \
THIS_MODULE->core_layout.size))
+extern int dpt_map_percpu(struct dpt *dpt, void *percpu_ptr, size_t size);
+extern void dpt_unmap_percpu(struct dpt *dpt, void *percpu_ptr);
+
+#define DPT_MAP_CPUVAR(dpt, cpuvar) \
+ dpt_map_percpu(dpt, &cpuvar, sizeof(cpuvar))
+
#endif
@@ -771,6 +771,44 @@ void dpt_unmap(struct dpt *dpt, void *ptr)
}
EXPORT_SYMBOL(dpt_unmap);
+void dpt_unmap_percpu(struct dpt *dpt, void *percpu_ptr)
+{
+ void *ptr;
+ int cpu;
+
+ pr_debug("DPT %p: UNMAP PERCPU %px\n", dpt, percpu_ptr);
+ for_each_possible_cpu(cpu) {
+ ptr = per_cpu_ptr(percpu_ptr, cpu);
+ pr_debug("DPT %p: UNMAP PERCPU%d %px\n", dpt, cpu, ptr);
+ dpt_unmap(dpt, ptr);
+ }
+}
+EXPORT_SYMBOL(dpt_unmap_percpu);
+
+int dpt_map_percpu(struct dpt *dpt, void *percpu_ptr, size_t size)
+{
+ int cpu, err;
+ void *ptr;
+
+ pr_debug("DPT %p: MAP PERCPU %px\n", dpt, percpu_ptr);
+ for_each_possible_cpu(cpu) {
+ ptr = per_cpu_ptr(percpu_ptr, cpu);
+ pr_debug("DPT %p: MAP PERCPU%d %px\n", dpt, cpu, ptr);
+ err = dpt_map(dpt, ptr, size);
+ if (err) {
+ /*
+ * Need to unmap any percpu mapping which has
+ * succeeded before the failure.
+ */
+ dpt_unmap_percpu(dpt, percpu_ptr);
+ return err;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(dpt_map_percpu);
+
/*
* dpt_create - allocate a page-table and create a corresponding
* decorated page-table. The page-table is allocated and aligned
Provide functions to copy page-table entries from the kernel page-table to a decorated page-table for a percpu buffer. A percpu buffer have a different VA range for each cpu and all them have to be copied. Signed-off-by: Alexandre Chartre <alexandre.chartre@oracle.com> --- arch/x86/include/asm/dpt.h | 6 ++++++ arch/x86/mm/dpt.c | 38 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 44 insertions(+)