Message ID | 20210421065108.1987-3-rppt@kernel.org (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | arm64: drop pfn_valid_within() and simplify pfn_valid() | expand |
On 21.04.21 08:51, Mike Rapoport wrote: > From: Mike Rapoport <rppt@linux.ibm.com> > > The struct pages representing a reserved memory region are initialized > using reserve_bootmem_range() function. This function is called for each > reserved region just before the memory is freed from memblock to the buddy > page allocator. > > The struct pages for MEMBLOCK_NOMAP regions are kept with the default > values set by the memory map initialization which makes it necessary to > have a special treatment for such pages in pfn_valid() and > pfn_valid_within(). > > Split out initialization of the reserved pages to a function with a > meaningful name and treat the MEMBLOCK_NOMAP regions the same way as the > reserved regions and mark struct pages for the NOMAP regions as > PageReserved. > > Signed-off-by: Mike Rapoport <rppt@linux.ibm.com> > --- > include/linux/memblock.h | 4 +++- > mm/memblock.c | 28 ++++++++++++++++++++++++++-- > 2 files changed, 29 insertions(+), 3 deletions(-) > > diff --git a/include/linux/memblock.h b/include/linux/memblock.h > index 5984fff3f175..634c1a578db8 100644 > --- a/include/linux/memblock.h > +++ b/include/linux/memblock.h > @@ -30,7 +30,9 @@ extern unsigned long long max_possible_pfn; > * @MEMBLOCK_NONE: no special request > * @MEMBLOCK_HOTPLUG: hotpluggable region > * @MEMBLOCK_MIRROR: mirrored region > - * @MEMBLOCK_NOMAP: don't add to kernel direct mapping > + * @MEMBLOCK_NOMAP: don't add to kernel direct mapping and treat as > + * reserved in the memory map; refer to memblock_mark_nomap() description > + * for futher details > */ > enum memblock_flags { > MEMBLOCK_NONE = 0x0, /* No special request */ > diff --git a/mm/memblock.c b/mm/memblock.c > index afaefa8fc6ab..3abf2c3fea7f 100644 > --- a/mm/memblock.c > +++ b/mm/memblock.c > @@ -906,6 +906,11 @@ int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size) > * @base: the base phys addr of the region > * @size: the size of the region > * > + * The memory regions marked with %MEMBLOCK_NOMAP will not be added to the > + * direct mapping of the physical memory. These regions will still be > + * covered by the memory map. The struct page representing NOMAP memory > + * frames in the memory map will be PageReserved() > + * > * Return: 0 on success, -errno on failure. > */ > int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size) > @@ -2002,6 +2007,26 @@ static unsigned long __init __free_memory_core(phys_addr_t start, > return end_pfn - start_pfn; > } > > +static void __init memmap_init_reserved_pages(void) > +{ > + struct memblock_region *region; > + phys_addr_t start, end; > + u64 i; > + > + /* initialize struct pages for the reserved regions */ > + for_each_reserved_mem_range(i, &start, &end) > + reserve_bootmem_region(start, end); > + > + /* and also treat struct pages for the NOMAP regions as PageReserved */ > + for_each_mem_region(region) { > + if (memblock_is_nomap(region)) { > + start = region->base; > + end = start + region->size; > + reserve_bootmem_region(start, end); > + } > + } > +} > + > static unsigned long __init free_low_memory_core_early(void) > { > unsigned long count = 0; > @@ -2010,8 +2035,7 @@ static unsigned long __init free_low_memory_core_early(void) > > memblock_clear_hotplug(0, -1); > > - for_each_reserved_mem_range(i, &start, &end) > - reserve_bootmem_region(start, end); > + memmap_init_reserved_pages(); > > /* > * We need to use NUMA_NO_NODE instead of NODE_DATA(0)->node_id > Reviewed-by: David Hildenbrand <david@redhat.com>
On 4/21/21 12:21 PM, Mike Rapoport wrote: > From: Mike Rapoport <rppt@linux.ibm.com> > > The struct pages representing a reserved memory region are initialized > using reserve_bootmem_range() function. This function is called for each > reserved region just before the memory is freed from memblock to the buddy > page allocator. > > The struct pages for MEMBLOCK_NOMAP regions are kept with the default > values set by the memory map initialization which makes it necessary to > have a special treatment for such pages in pfn_valid() and > pfn_valid_within(). > > Split out initialization of the reserved pages to a function with a > meaningful name and treat the MEMBLOCK_NOMAP regions the same way as the > reserved regions and mark struct pages for the NOMAP regions as > PageReserved. > > Signed-off-by: Mike Rapoport <rppt@linux.ibm.com> > --- > include/linux/memblock.h | 4 +++- > mm/memblock.c | 28 ++++++++++++++++++++++++++-- > 2 files changed, 29 insertions(+), 3 deletions(-) > > diff --git a/include/linux/memblock.h b/include/linux/memblock.h > index 5984fff3f175..634c1a578db8 100644 > --- a/include/linux/memblock.h > +++ b/include/linux/memblock.h > @@ -30,7 +30,9 @@ extern unsigned long long max_possible_pfn; > * @MEMBLOCK_NONE: no special request > * @MEMBLOCK_HOTPLUG: hotpluggable region > * @MEMBLOCK_MIRROR: mirrored region > - * @MEMBLOCK_NOMAP: don't add to kernel direct mapping > + * @MEMBLOCK_NOMAP: don't add to kernel direct mapping and treat as > + * reserved in the memory map; refer to memblock_mark_nomap() description > + * for futher details Small nit - s/futher/further > */ > enum memblock_flags { > MEMBLOCK_NONE = 0x0, /* No special request */ > diff --git a/mm/memblock.c b/mm/memblock.c > index afaefa8fc6ab..3abf2c3fea7f 100644 > --- a/mm/memblock.c > +++ b/mm/memblock.c > @@ -906,6 +906,11 @@ int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size) > * @base: the base phys addr of the region > * @size: the size of the region > * > + * The memory regions marked with %MEMBLOCK_NOMAP will not be added to the > + * direct mapping of the physical memory. These regions will still be > + * covered by the memory map. The struct page representing NOMAP memory > + * frames in the memory map will be PageReserved() > + * > * Return: 0 on success, -errno on failure. > */ > int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size) > @@ -2002,6 +2007,26 @@ static unsigned long __init __free_memory_core(phys_addr_t start, > return end_pfn - start_pfn; > } > > +static void __init memmap_init_reserved_pages(void) > +{ > + struct memblock_region *region; > + phys_addr_t start, end; > + u64 i; > + > + /* initialize struct pages for the reserved regions */ > + for_each_reserved_mem_range(i, &start, &end) > + reserve_bootmem_region(start, end); > + > + /* and also treat struct pages for the NOMAP regions as PageReserved */ > + for_each_mem_region(region) { > + if (memblock_is_nomap(region)) { > + start = region->base; > + end = start + region->size; > + reserve_bootmem_region(start, end); > + } > + } I guess there is no possible method to unify both these loops. > +} > + > static unsigned long __init free_low_memory_core_early(void) > { > unsigned long count = 0; > @@ -2010,8 +2035,7 @@ static unsigned long __init free_low_memory_core_early(void) > > memblock_clear_hotplug(0, -1); > > - for_each_reserved_mem_range(i, &start, &end) > - reserve_bootmem_region(start, end); > + memmap_init_reserved_pages(); > > /* > * We need to use NUMA_NO_NODE instead of NODE_DATA(0)->node_id > Reviewed-by: Anshuman Khandual <anshuman.khandual@arm.com>
diff --git a/include/linux/memblock.h b/include/linux/memblock.h index 5984fff3f175..634c1a578db8 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -30,7 +30,9 @@ extern unsigned long long max_possible_pfn; * @MEMBLOCK_NONE: no special request * @MEMBLOCK_HOTPLUG: hotpluggable region * @MEMBLOCK_MIRROR: mirrored region - * @MEMBLOCK_NOMAP: don't add to kernel direct mapping + * @MEMBLOCK_NOMAP: don't add to kernel direct mapping and treat as + * reserved in the memory map; refer to memblock_mark_nomap() description + * for futher details */ enum memblock_flags { MEMBLOCK_NONE = 0x0, /* No special request */ diff --git a/mm/memblock.c b/mm/memblock.c index afaefa8fc6ab..3abf2c3fea7f 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -906,6 +906,11 @@ int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size) * @base: the base phys addr of the region * @size: the size of the region * + * The memory regions marked with %MEMBLOCK_NOMAP will not be added to the + * direct mapping of the physical memory. These regions will still be + * covered by the memory map. The struct page representing NOMAP memory + * frames in the memory map will be PageReserved() + * * Return: 0 on success, -errno on failure. */ int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size) @@ -2002,6 +2007,26 @@ static unsigned long __init __free_memory_core(phys_addr_t start, return end_pfn - start_pfn; } +static void __init memmap_init_reserved_pages(void) +{ + struct memblock_region *region; + phys_addr_t start, end; + u64 i; + + /* initialize struct pages for the reserved regions */ + for_each_reserved_mem_range(i, &start, &end) + reserve_bootmem_region(start, end); + + /* and also treat struct pages for the NOMAP regions as PageReserved */ + for_each_mem_region(region) { + if (memblock_is_nomap(region)) { + start = region->base; + end = start + region->size; + reserve_bootmem_region(start, end); + } + } +} + static unsigned long __init free_low_memory_core_early(void) { unsigned long count = 0; @@ -2010,8 +2035,7 @@ static unsigned long __init free_low_memory_core_early(void) memblock_clear_hotplug(0, -1); - for_each_reserved_mem_range(i, &start, &end) - reserve_bootmem_region(start, end); + memmap_init_reserved_pages(); /* * We need to use NUMA_NO_NODE instead of NODE_DATA(0)->node_id