@@ -37,6 +37,7 @@ enum memblock_flags {
MEMBLOCK_HOTPLUG = 0x1, /* hotpluggable region */
MEMBLOCK_MIRROR = 0x2, /* mirrored region */
MEMBLOCK_NOMAP = 0x4, /* don't add to kernel direct mapping */
+ MEMBLOCK_PRESERVED = 0x8, /* preserved pages region */
};
/**
@@ -111,6 +112,7 @@ void memblock_allow_resize(void);
int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid);
int memblock_add(phys_addr_t base, phys_addr_t size);
int memblock_remove(phys_addr_t base, phys_addr_t size);
+int __memblock_reserve(phys_addr_t base, phys_addr_t size, enum memblock_flags flags);
int memblock_free(phys_addr_t base, phys_addr_t size);
int memblock_reserve(phys_addr_t base, phys_addr_t size);
#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
@@ -215,6 +217,11 @@ static inline bool memblock_is_nomap(struct memblock_region *m)
return m->flags & MEMBLOCK_NOMAP;
}
+static inline bool memblock_is_preserved(struct memblock_region *m)
+{
+ return m->flags & MEMBLOCK_PRESERVED;
+}
+
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn,
unsigned long *end_pfn);
@@ -831,6 +831,12 @@ int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
return memblock_remove_range(&memblock.reserved, base, size);
}
+int __init_memblock __memblock_reserve(phys_addr_t base, phys_addr_t size,
+ enum memblock_flags flags)
+{
+ return memblock_add_range(&memblock.reserved, base, size, MAX_NUMNODES, flags);
+}
+
int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
{
phys_addr_t end = base + size - 1;
@@ -838,7 +844,7 @@ int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
&base, &end, (void *)_RET_IP_);
- return memblock_add_range(&memblock.reserved, base, size, MAX_NUMNODES, 0);
+ return __memblock_reserve(base, size, 0);
}
#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
@@ -170,7 +170,7 @@ static int __init pkram_reserve_page(unsigned long pfn)
size = PAGE_SIZE;
if (memblock_is_region_reserved(base, size) ||
- memblock_reserve(base, size) < 0)
+ __memblock_reserve(base, size, MEMBLOCK_PRESERVED) < 0)
err = -EBUSY;
if (!err)
@@ -1446,7 +1446,7 @@ static void pkram_remove_identity_map(struct page *page)
static int __init pkram_reserve_range_cb(struct pkram_pg_state *st, unsigned long base, unsigned long size)
{
if (memblock_is_region_reserved(base, size) ||
- memblock_reserve(base, size) < 0) {
+ __memblock_reserve(base, size, MEMBLOCK_PRESERVED) < 0) {
pr_warn("PKRAM: reservations exist in [0x%lx,0x%lx]\n", base, base + size - 1);
/*
* Set a lower bound so another walk can undo the earlier,
To support deferred initialization of page structs for preserved pages, separate memblocks containing preserved pages by setting a new flag when adding them to the memblock reserved list. Signed-off-by: Anthony Yznaga <anthony.yznaga@oracle.com> --- include/linux/memblock.h | 7 +++++++ mm/memblock.c | 8 +++++++- mm/pkram.c | 4 ++-- 3 files changed, 16 insertions(+), 3 deletions(-)