@@ -145,6 +145,11 @@ void __next_mem_range_rev(u64 *idx, int nid, enum memblock_flags flags,
void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start,
phys_addr_t *out_end);
+void __next_reserved_mem_range(u64 *idx, enum memblock_flags flags,
+ enum memblock_flags exclflags,
+ phys_addr_t *out_start, phys_addr_t *out_end,
+ int *out_nid);
+
void __memblock_free_late(phys_addr_t base, phys_addr_t size);
/**
@@ -202,6 +207,11 @@ void __memblock_free_late(phys_addr_t base, phys_addr_t size);
i != (u64)ULLONG_MAX; \
__next_reserved_mem_region(&i, p_start, p_end))
+#define for_each_reserved_mem_range(i, flags, exclflags, p_start, p_end, p_nid)\
+ for (i = 0UL, __next_reserved_mem_range(&i, flags, exclflags, p_start, p_end, p_nid); \
+ i != (u64)ULLONG_MAX; \
+ __next_reserved_mem_range(&i, flags, exclflags, p_start, p_end, p_nid))
+
static inline bool memblock_is_hotpluggable(struct memblock_region *m)
{
return m->flags & MEMBLOCK_HOTPLUG;
@@ -987,6 +987,55 @@ void __init_memblock __next_reserved_mem_region(u64 *idx,
*idx = ULLONG_MAX;
}
+/**
+ * __next_reserved_mem_range - next function for for_each_reserved_range()
+ * @idx: pointer to u64 loop variable
+ * @flags: pick blocks based on memory attributes
+ * @exclflags: exclude blocks based on memory attributes
+ * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
+ * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
+ * @out_nid: ptr to int for nid of the range, can be %NULL
+ *
+ * Iterate over all reserved memory ranges.
+ */
+void __init_memblock __next_reserved_mem_range(u64 *idx,
+ enum memblock_flags flags,
+ enum memblock_flags exclflags,
+ phys_addr_t *out_start,
+ phys_addr_t *out_end, int *out_nid)
+{
+ struct memblock_type *type = &memblock.reserved;
+ int _idx = *idx;
+
+ for (; _idx < type->cnt; _idx++) {
+ struct memblock_region *r = &type->regions[_idx];
+ phys_addr_t base = r->base;
+ phys_addr_t size = r->size;
+
+ /* skip preserved pages */
+ if ((exclflags & MEMBLOCK_PRESERVED) && memblock_is_preserved(r))
+ continue;
+
+ /* skip non-preserved pages */
+ if ((flags & MEMBLOCK_PRESERVED) && !memblock_is_preserved(r))
+ continue;
+
+ if (out_start)
+ *out_start = base;
+ if (out_end)
+ *out_end = base + size - 1;
+ if (out_nid)
+ *out_nid = r->nid;
+
+ _idx++;
+ *idx = (u64)_idx;
+ return;
+ }
+
+ /* signal end of iteration */
+ *idx = ULLONG_MAX;
+}
+
static bool should_skip_region(struct memblock_region *m, int nid, int flags)
{
int m_nid = memblock_get_region_node(m);
@@ -1011,7 +1060,7 @@ static bool should_skip_region(struct memblock_region *m, int nid, int flags)
}
/**
- * __next_mem_range - next function for for_each_free_mem_range() etc.
+ * __next__mem_range - next function for for_each_free_mem_range() etc.
* @idx: pointer to u64 loop variable
* @nid: node selector, %NUMA_NO_NODE for all nodes
* @flags: pick from blocks based on memory attributes
To support deferred initialization of page structs for preserved pages, add an iterator of the memblock reserved list that can select or exclude ranges based on memblock flags. Signed-off-by: Anthony Yznaga <anthony.yznaga@oracle.com> --- include/linux/memblock.h | 10 ++++++++++ mm/memblock.c | 51 +++++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 60 insertions(+), 1 deletion(-)