@@ -53,6 +53,7 @@ extern unsigned int pageblock_order;
#endif /* CONFIG_HUGETLB_PAGE */
#define pageblock_nr_pages (1UL << pageblock_order)
+#define pageblock_align(pfn) ALIGN((pfn), pageblock_nr_pages)
#define pageblock_start_pfn(pfn) ALIGN_DOWN((pfn), pageblock_nr_pages)
#define pageblock_end_pfn(pfn) ALIGN((pfn) + 1, pageblock_nr_pages)
@@ -2014,12 +2014,12 @@ static void __init free_unused_memmap(void)
* presume that there are no holes in the memory map inside
* a pageblock
*/
- prev_end = ALIGN(end, pageblock_nr_pages);
+ prev_end = pageblock_align(end);
}
#ifdef CONFIG_SPARSEMEM
if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION)) {
- prev_end = ALIGN(end, pageblock_nr_pages);
+ prev_end = pageblock_align(end);
free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION));
}
#endif
@@ -532,7 +532,7 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
struct page *page;
/* isolation is done at page block granularity */
unsigned long isolate_start = pageblock_start_pfn(start_pfn);
- unsigned long isolate_end = ALIGN(end_pfn, pageblock_nr_pages);
+ unsigned long isolate_end = pageblock_align(end_pfn);
int ret;
bool skip_isolation = false;
@@ -577,7 +577,7 @@ void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
unsigned long pfn;
struct page *page;
unsigned long isolate_start = pageblock_start_pfn(start_pfn);
- unsigned long isolate_end = ALIGN(end_pfn, pageblock_nr_pages);
+ unsigned long isolate_end = pageblock_align(end_pfn);
for (pfn = isolate_start;
pfn < isolate_end;
Add pageblock_align() macro and use it to simplify code. Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> --- include/linux/pageblock-flags.h | 1 + mm/memblock.c | 4 ++-- mm/page_isolation.c | 4 ++-- 3 files changed, 5 insertions(+), 4 deletions(-)