Message ID | 35FD53F367049845BC99AC72306C23D103D6DB491609@CNBJMBX05.corpusers.net (mailing list archive) |
---|---|
State | Not Applicable, archived |
Headers | show |
On Mon, Sep 15, 2014 at 07:07:20PM +0800, Wang, Yalin wrote: > this patch extend the start and end address of initrd to be page aligned, > so that we can free all memory including the un-page aligned head or tail > page of initrd, if the start or end address of initrd are not page > aligned, the page can't be freed by free_initrd_mem() function. Better, but I think it's more complicated than it needs to be: > Signed-off-by: Yalin Wang <yalin.wang@sonymobile.com> > --- > arch/arm/mm/init.c | 19 +++++++++++++++++-- > arch/arm64/mm/init.c | 37 +++++++++++++++++++++++++++++++++---- > 2 files changed, 50 insertions(+), 6 deletions(-) > > diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c > index 659c75d..8490b70 100644 > --- a/arch/arm/mm/init.c > +++ b/arch/arm/mm/init.c > @@ -277,6 +277,8 @@ phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align) > void __init arm_memblock_init(const struct machine_desc *mdesc) > { > /* Register the kernel text, kernel data and initrd with memblock. */ > + phys_addr_t phys_initrd_start_orig __maybe_unused; > + phys_addr_t phys_initrd_size_orig __maybe_unused; > #ifdef CONFIG_XIP_KERNEL > memblock_reserve(__pa(_sdata), _end - _sdata); > #else > @@ -289,6 +291,13 @@ void __init arm_memblock_init(const struct machine_desc *mdesc) > phys_initrd_size = initrd_end - initrd_start; > } > initrd_start = initrd_end = 0; > + phys_initrd_start_orig = phys_initrd_start; > + phys_initrd_size_orig = phys_initrd_size; > + /* make sure the start and end address are page aligned */ > + phys_initrd_size = round_up(phys_initrd_start + phys_initrd_size, PAGE_SIZE); > + phys_initrd_start = round_down(phys_initrd_start, PAGE_SIZE); > + phys_initrd_size -= phys_initrd_start; > + > if (phys_initrd_size && > !memblock_is_region_memory(phys_initrd_start, phys_initrd_size)) { > pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region - disabling initrd\n", > @@ -305,9 +314,10 @@ void __init arm_memblock_init(const struct machine_desc *mdesc) > memblock_reserve(phys_initrd_start, phys_initrd_size); > > /* Now convert initrd to virtual addresses */ > - initrd_start = __phys_to_virt(phys_initrd_start); > - initrd_end = initrd_start + phys_initrd_size; > + initrd_start = __phys_to_virt(phys_initrd_start_orig); > + initrd_end = initrd_start + phys_initrd_size_orig; > } > + I think all the above is entirely unnecessary. The memblock APIs (especially memblock_reserve()) will mark the overlapped pages as reserved - they round down the starting address, and round up the end address (calculated from start + size). Hence, this: > @@ -636,6 +646,11 @@ static int keep_initrd; > void free_initrd_mem(unsigned long start, unsigned long end) > { > if (!keep_initrd) { > + if (start == initrd_start) > + start = round_down(start, PAGE_SIZE); > + if (end == initrd_end) > + end = round_up(end, PAGE_SIZE); > + > poison_init_mem((void *)start, PAGE_ALIGN(end) - start); > free_reserved_area((void *)start, (void *)end, -1, "initrd"); > } is the only bit of code you likely need to achieve your goal. Thinking about this, I think that you are quite right to align these. The memory around the initrd is defined to be system memory, and we already free the pages around it, so it *is* wrong not to free the partial initrd pages. Good catch.
Great! yeah, you are right, just keep the change in free_initrd_mem( ) is ok. we don't need keep reserved memory to be aligned , Thanks!
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 659c75d..8490b70 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -277,6 +277,8 @@ phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align) void __init arm_memblock_init(const struct machine_desc *mdesc) { /* Register the kernel text, kernel data and initrd with memblock. */ + phys_addr_t phys_initrd_start_orig __maybe_unused; + phys_addr_t phys_initrd_size_orig __maybe_unused; #ifdef CONFIG_XIP_KERNEL memblock_reserve(__pa(_sdata), _end - _sdata); #else @@ -289,6 +291,13 @@ void __init arm_memblock_init(const struct machine_desc *mdesc) phys_initrd_size = initrd_end - initrd_start; } initrd_start = initrd_end = 0; + phys_initrd_start_orig = phys_initrd_start; + phys_initrd_size_orig = phys_initrd_size; + /* make sure the start and end address are page aligned */ + phys_initrd_size = round_up(phys_initrd_start + phys_initrd_size, PAGE_SIZE); + phys_initrd_start = round_down(phys_initrd_start, PAGE_SIZE); + phys_initrd_size -= phys_initrd_start; + if (phys_initrd_size && !memblock_is_region_memory(phys_initrd_start, phys_initrd_size)) { pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region - disabling initrd\n", @@ -305,9 +314,10 @@ void __init arm_memblock_init(const struct machine_desc *mdesc) memblock_reserve(phys_initrd_start, phys_initrd_size); /* Now convert initrd to virtual addresses */ - initrd_start = __phys_to_virt(phys_initrd_start); - initrd_end = initrd_start + phys_initrd_size; + initrd_start = __phys_to_virt(phys_initrd_start_orig); + initrd_end = initrd_start + phys_initrd_size_orig; } + #endif arm_mm_memblock_reserve(); @@ -636,6 +646,11 @@ static int keep_initrd; void free_initrd_mem(unsigned long start, unsigned long end) { if (!keep_initrd) { + if (start == initrd_start) + start = round_down(start, PAGE_SIZE); + if (end == initrd_end) + end = round_up(end, PAGE_SIZE); + poison_init_mem((void *)start, PAGE_ALIGN(end) - start); free_reserved_area((void *)start, (void *)end, -1, "initrd"); } diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 5472c24..9dfd9a6 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -138,15 +138,38 @@ static void arm64_memory_present(void) void __init arm64_memblock_init(void) { phys_addr_t dma_phys_limit = 0; - + phys_addr_t phys_initrd_start; + phys_addr_t phys_initrd_size; /* * Register the kernel text, kernel data, initrd, and initial * pagetables with memblock. */ memblock_reserve(__pa(_text), _end - _text); #ifdef CONFIG_BLK_DEV_INITRD - if (initrd_start) - memblock_reserve(__virt_to_phys(initrd_start), initrd_end - initrd_start); + if (initrd_start) { + phys_initrd_start = __virt_to_phys(initrd_start); + phys_initrd_size = initrd_end - initrd_start; + /* make sure the start and end address are page aligned */ + phys_initrd_size = round_up(phys_initrd_start + phys_initrd_size, PAGE_SIZE); + phys_initrd_start = round_down(phys_initrd_start, PAGE_SIZE); + phys_initrd_size -= phys_initrd_start; + if (phys_initrd_size && + !memblock_is_region_memory(phys_initrd_start, phys_initrd_size)) { + pr_err("INITRD: %pa+%pa is not a memory region - disabling initrd\n", + &phys_initrd_start, &phys_initrd_size); + phys_initrd_start = phys_initrd_size = 0; + } + if (phys_initrd_size && + memblock_is_region_reserved(phys_initrd_start, phys_initrd_size)) { + pr_err("INITRD: %pa+%pa overlaps in-use memory region - disabling initrd\n", + &phys_initrd_start, &phys_initrd_size); + phys_initrd_start = phys_initrd_size = 0; + } + if (phys_initrd_size) + memblock_reserve(phys_initrd_start, phys_initrd_size); + else + initrd_start = initrd_end = 0; + } #endif if (!efi_enabled(EFI_MEMMAP)) @@ -334,8 +357,14 @@ static int keep_initrd; void free_initrd_mem(unsigned long start, unsigned long end) { - if (!keep_initrd) + if (!keep_initrd) { + if (start == initrd_start) + start = round_down(start, PAGE_SIZE); + if (end == initrd_end) + end = round_up(end, PAGE_SIZE); + free_reserved_area((void *)start, (void *)end, 0, "initrd"); + } } static int __init keepinitrd_setup(char *__unused)
this patch extend the start and end address of initrd to be page aligned, so that we can free all memory including the un-page aligned head or tail page of initrd, if the start or end address of initrd are not page aligned, the page can't be freed by free_initrd_mem() function. Signed-off-by: Yalin Wang <yalin.wang@sonymobile.com> --- arch/arm/mm/init.c | 19 +++++++++++++++++-- arch/arm64/mm/init.c | 37 +++++++++++++++++++++++++++++++++---- 2 files changed, 50 insertions(+), 6 deletions(-)