@@ -7,7 +7,9 @@
#include <asm-generic/set_memory.h>
#define set_memory_rox set_memory_rox
+#define numa_set_memory_rox numa_set_memory_rox
int set_memory_rox(unsigned long addr, int numpages);
+int numa_set_memory_rox(unsigned long addr, int numpages);
/*
* The set_memory_* API can be used to change various attributes of a virtual
@@ -58,6 +60,18 @@ int set_pages_array_uc(struct page **pages, int addrinarray);
int set_pages_array_wc(struct page **pages, int addrinarray);
int set_pages_array_wb(struct page **pages, int addrinarray);
+#ifdef CONFIG_KERNEL_REPLICATION
+int numa_set_memory_np(unsigned long addr, int numpages);
+int numa_set_memory_np_noalias(unsigned long addr, int numpages);
+int numa_set_memory_global(unsigned long addr, int numpages);
+int numa_set_memory_nonglobal(unsigned long addr, int numpages);
+#else
+#define numa_set_memory_np set_memory_np
+#define numa_set_memory_np_noalias set_memory_np_noalias
+#define numa_set_memory_global set_memory_global
+#define numa_set_memory_nonglobal set_memory_nonglobal
+#endif /* CONFIG_KERNEL_REPLICATION */
+
/*
* For legacy compatibility with the old APIs, a few functions
* are provided that work on a "struct page".
@@ -22,6 +22,7 @@
#include <linux/cc_platform.h>
#include <linux/set_memory.h>
#include <linux/memregion.h>
+#include <linux/numa_replication.h>
#include <asm/e820/api.h>
#include <asm/processor.h>
@@ -1790,7 +1791,7 @@ static int __change_page_attr_set_clr(struct cpa_data *cpa, int primary)
return ret;
}
-static int change_page_attr_set_clr(unsigned long *addr, int numpages,
+static int change_page_attr_set_clr_pgd(pgd_t *pgd, unsigned long *addr, int numpages,
pgprot_t mask_set, pgprot_t mask_clr,
int force_split, int in_flag,
struct page **pages)
@@ -1845,6 +1846,7 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
cpa.flags = in_flag;
cpa.curpage = 0;
cpa.force_split = force_split;
+ cpa.pgd = pgd;
ret = __change_page_attr_set_clr(&cpa, 1);
@@ -1873,6 +1875,15 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
return ret;
}
+static int change_page_attr_set_clr(unsigned long *addr, int numpages,
+ pgprot_t mask_set, pgprot_t mask_clr,
+ int force_split, int in_flag,
+ struct page **pages)
+{
+ return change_page_attr_set_clr_pgd(NULL, addr, numpages, mask_set,
+ mask_clr, force_split, in_flag, pages);
+}
+
static inline int change_page_attr_set(unsigned long *addr, int numpages,
pgprot_t mask, int array)
{
@@ -1880,6 +1891,13 @@ static inline int change_page_attr_set(unsigned long *addr, int numpages,
(array ? CPA_ARRAY : 0), NULL);
}
+static inline int change_page_attr_set_pgd(pgd_t *pgd, unsigned long *addr, int numpages,
+ pgprot_t mask, int array)
+{
+ return change_page_attr_set_clr_pgd(pgd, addr, numpages, mask, __pgprot(0), 0,
+ (array ? CPA_ARRAY : 0), NULL);
+}
+
static inline int change_page_attr_clear(unsigned long *addr, int numpages,
pgprot_t mask, int array)
{
@@ -1887,6 +1905,13 @@ static inline int change_page_attr_clear(unsigned long *addr, int numpages,
(array ? CPA_ARRAY : 0), NULL);
}
+static inline int change_page_attr_clear_pgd(pgd_t *pgd, unsigned long *addr, int numpages,
+ pgprot_t mask, int array)
+{
+ return change_page_attr_set_clr_pgd(pgd, addr, numpages, __pgprot(0), mask, 0,
+ (array ? CPA_ARRAY : 0), NULL);
+}
+
static inline int cpa_set_pages_array(struct page **pages, int numpages,
pgprot_t mask)
{
@@ -2122,6 +2147,129 @@ int set_memory_global(unsigned long addr, int numpages)
__pgprot(_PAGE_GLOBAL), 0);
}
+#ifdef CONFIG_KERNEL_REPLICATION
+int numa_set_memory_x(unsigned long addr, int numpages)
+{
+ int ret = 0;
+ int nid;
+
+ if (!(__supported_pte_mask & _PAGE_NX))
+ return 0;
+ for_each_replica(nid)
+ ret |= change_page_attr_clear_pgd(init_mm.pgd_numa[nid], &addr, numpages,
+ __pgprot(_PAGE_NX), 0);
+
+ return ret;
+}
+
+int numa_set_memory_nx(unsigned long addr, int numpages)
+{
+ int ret = 0;
+ int nid;
+
+ if (!(__supported_pte_mask & _PAGE_NX))
+ return 0;
+ for_each_replica(nid)
+ ret |= change_page_attr_set_pgd(init_mm.pgd_numa[nid], &addr, numpages,
+ __pgprot(_PAGE_NX), 0);
+
+ return ret;
+}
+
+int numa_set_memory_ro(unsigned long addr, int numpages)
+{
+ int ret = 0;
+ int nid;
+
+ for_each_replica(nid)
+ ret |= change_page_attr_clear_pgd(init_mm.pgd_numa[nid], &addr, numpages,
+ __pgprot(_PAGE_RW), 0);
+
+ return ret;
+}
+
+int numa_set_memory_rox(unsigned long addr, int numpages)
+{
+ int nid;
+
+ int ret = 0;
+ pgprot_t clr = __pgprot(_PAGE_RW);
+
+ if (__supported_pte_mask & _PAGE_NX)
+ clr.pgprot |= _PAGE_NX;
+
+ for_each_online_node(nid) {
+ ret |= change_page_attr_clear_pgd(init_mm.pgd_numa[nid], &addr, numpages, clr, 0);
+ if (!is_text_replicated())
+ break;
+ }
+ return ret;
+}
+
+int numa_set_memory_rw(unsigned long addr, int numpages)
+{
+ int ret = 0;
+ int nid;
+
+ for_each_replica(nid)
+ ret |= change_page_attr_set_pgd(init_mm.pgd_numa[nid], &addr, numpages,
+ __pgprot(_PAGE_RW), 0);
+
+ return ret;
+}
+
+int numa_set_memory_np(unsigned long addr, int numpages)
+{
+ int ret = 0;
+ int nid;
+
+ for_each_replica(nid)
+ ret |= change_page_attr_clear_pgd(init_mm.pgd_numa[nid], &addr, numpages,
+ __pgprot(_PAGE_PRESENT), 0);
+
+ return ret;
+}
+
+int numa_set_memory_np_noalias(unsigned long addr, int numpages)
+{
+ int ret = 0;
+ int nid;
+ int cpa_flags = CPA_NO_CHECK_ALIAS;
+
+ for_each_replica(nid)
+ ret |= change_page_attr_set_clr_pgd(init_mm.pgd_numa[nid], &addr, numpages,
+ __pgprot(0),
+ __pgprot(_PAGE_PRESENT), 0,
+ cpa_flags, NULL);
+
+ return ret;
+}
+
+int numa_set_memory_global(unsigned long addr, int numpages)
+{
+ int ret = 0;
+ int nid;
+
+ for_each_replica(nid)
+ ret |= change_page_attr_set_pgd(init_mm.pgd_numa[nid], &addr, numpages,
+ __pgprot(_PAGE_GLOBAL), 0);
+
+ return ret;
+}
+
+int numa_set_memory_nonglobal(unsigned long addr, int numpages)
+{
+ int ret = 0;
+ int nid;
+
+ for_each_replica(nid)
+ ret |= change_page_attr_clear_pgd(init_mm.pgd_numa[nid], &addr, numpages,
+ __pgprot(_PAGE_GLOBAL), 0);
+
+ return ret;
+}
+#endif
+
/*
* __set_memory_enc_pgtable() is used for the hypervisors that get
* informed about "encryption" status via page tables.
@@ -10,4 +10,16 @@ int set_memory_rw(unsigned long addr, int numpages);
int set_memory_x(unsigned long addr, int numpages);
int set_memory_nx(unsigned long addr, int numpages);
+#ifdef CONFIG_KERNEL_REPLICATION
+int numa_set_memory_ro(unsigned long addr, int numpages);
+int numa_set_memory_rw(unsigned long addr, int numpages);
+int numa_set_memory_x(unsigned long addr, int numpages);
+int numa_set_memory_nx(unsigned long addr, int numpages);
+#else
+#define numa_set_memory_ro set_memory_ro
+#define numa_set_memory_rw set_memory_rw
+#define numa_set_memory_x set_memory_x
+#define numa_set_memory_nx set_memory_nx
+#endif /* CONFIG_KERNEL_REPLICATION */
+
#endif
@@ -24,6 +24,16 @@ static inline int set_memory_rox(unsigned long addr, int numpages)
}
#endif
+#ifndef numa_set_memory_rox
+static inline int numa_set_memory_rox(unsigned long addr, int numpages)
+{
+ int ret = numa_set_memory_ro(addr, numpages);
+ if (ret)
+ return ret;
+ return numa_set_memory_x(addr, numpages);
+}
+#endif
+
#ifndef CONFIG_ARCH_HAS_SET_DIRECT_MAP
static inline int set_direct_map_invalid_noflush(struct page *page)
{