diff mbox series

[v3] arm64: use generic free_initrd_mem()

Message ID 1569388180-28274-1-git-send-email-rppt@kernel.org (mailing list archive)
State New, archived
Headers show
Series [v3] arm64: use generic free_initrd_mem() | expand

Commit Message

Mike Rapoport Sept. 25, 2019, 5:09 a.m. UTC
From: Mike Rapoport <rppt@linux.ibm.com>

arm64 calls memblock_free() for the initrd area in its implementation of
free_initrd_mem(), but this call has no actual effect that late in the boot
process. By the time initrd is freed, all the reserved memory is managed by
the page allocator and the memblock.reserved is unused, so the only purpose
of the memblock_free() call is to keep track of initrd memory for debugging
and accounting.

Without the memblock_free() call the only difference between arm64 and the
generic versions of free_initrd_mem() is the memory poisoning.

Move memblock_free() call to the generic code, enable it there
for the architectures that define ARCH_KEEP_MEMBLOCK and use the generic
implementaion of free_initrd_mem() on arm64.

Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
---

v3:
* fix powerpc build

v2: 
* add memblock_free() to the generic free_initrd_mem()
* rebase on the current upstream


 arch/arm64/mm/init.c | 12 ------------
 init/initramfs.c     |  5 +++++
 2 files changed, 5 insertions(+), 12 deletions(-)

Comments

Anshuman Khandual Sept. 27, 2019, 6:20 a.m. UTC | #1
On 09/25/2019 10:39 AM, Mike Rapoport wrote:
> From: Mike Rapoport <rppt@linux.ibm.com>
> 
> arm64 calls memblock_free() for the initrd area in its implementation of
> free_initrd_mem(), but this call has no actual effect that late in the boot
> process. By the time initrd is freed, all the reserved memory is managed by
> the page allocator and the memblock.reserved is unused, so the only purpose
> of the memblock_free() call is to keep track of initrd memory for debugging
> and accounting.
> 
> Without the memblock_free() call the only difference between arm64 and the
> generic versions of free_initrd_mem() is the memory poisoning.
> 
> Move memblock_free() call to the generic code, enable it there
> for the architectures that define ARCH_KEEP_MEMBLOCK and use the generic
> implementaion of free_initrd_mem() on arm64.

Small nit. s/implementaion/implementation.

> 
> Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
> ---
> 
> v3:
> * fix powerpc build
> 
> v2: 
> * add memblock_free() to the generic free_initrd_mem()
> * rebase on the current upstream
> 
> 
>  arch/arm64/mm/init.c | 12 ------------
>  init/initramfs.c     |  5 +++++
>  2 files changed, 5 insertions(+), 12 deletions(-)
> 
> diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
> index 45c00a5..87a0e3b 100644
> --- a/arch/arm64/mm/init.c
> +++ b/arch/arm64/mm/init.c
> @@ -580,18 +580,6 @@ void free_initmem(void)
>  	unmap_kernel_range((u64)__init_begin, (u64)(__init_end - __init_begin));
>  }
>  
> -#ifdef CONFIG_BLK_DEV_INITRD
> -void __init free_initrd_mem(unsigned long start, unsigned long end)
> -{
> -	unsigned long aligned_start, aligned_end;
> -
> -	aligned_start = __virt_to_phys(start) & PAGE_MASK;
> -	aligned_end = PAGE_ALIGN(__virt_to_phys(end));
> -	memblock_free(aligned_start, aligned_end - aligned_start);
> -	free_reserved_area((void *)start, (void *)end, 0, "initrd");
> -}
> -#endif
> -
>  /*
>   * Dump out memory limit information on panic.
>   */
> diff --git a/init/initramfs.c b/init/initramfs.c
> index c47dad0..3d61e13 100644
> --- a/init/initramfs.c
> +++ b/init/initramfs.c
> @@ -10,6 +10,7 @@
>  #include <linux/syscalls.h>
>  #include <linux/utime.h>
>  #include <linux/file.h>
> +#include <linux/memblock.h>
>  
>  static ssize_t __init xwrite(int fd, const char *p, size_t count)
>  {
> @@ -531,6 +532,10 @@ void __weak free_initrd_mem(unsigned long start, unsigned long end)
>  {
>  	free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
>  			"initrd");
> +
> +#ifdef CONFIG_ARCH_KEEP_MEMBLOCK

Should not the addresses here be aligned first before calling memblock_free() ?
Without alignment, it breaks present behavior on arm64 which was explicitly added
with 13776f9d40a0 ("arm64: mm: free the initrd reserved memblock in a aligned manner").
Or does initrd always gets allocated with page alignment on other architectures.

> +	memblock_free(__pa(start), end - start);
> +#endif
>  }
>  
>  #ifdef CONFIG_KEXEC_CORE
>
Mike Rapoport Sept. 28, 2019, 7:19 a.m. UTC | #2
On Fri, Sep 27, 2019 at 11:50:42AM +0530, Anshuman Khandual wrote:
> 
> On 09/25/2019 10:39 AM, Mike Rapoport wrote:
> > From: Mike Rapoport <rppt@linux.ibm.com>
> > 
> > arm64 calls memblock_free() for the initrd area in its implementation of
> > free_initrd_mem(), but this call has no actual effect that late in the boot
> > process. By the time initrd is freed, all the reserved memory is managed by
> > the page allocator and the memblock.reserved is unused, so the only purpose
> > of the memblock_free() call is to keep track of initrd memory for debugging
> > and accounting.
> > 
> > Without the memblock_free() call the only difference between arm64 and the
> > generic versions of free_initrd_mem() is the memory poisoning.
> > 
> > Move memblock_free() call to the generic code, enable it there
> > for the architectures that define ARCH_KEEP_MEMBLOCK and use the generic
> > implementaion of free_initrd_mem() on arm64.
> 
> Small nit. s/implementaion/implementation.
> 
> > 
> > Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
> > ---
> > 
> > v3:
> > * fix powerpc build
> > 
> > v2: 
> > * add memblock_free() to the generic free_initrd_mem()
> > * rebase on the current upstream
> > 
> > 
> >  arch/arm64/mm/init.c | 12 ------------
> >  init/initramfs.c     |  5 +++++
> >  2 files changed, 5 insertions(+), 12 deletions(-)
> > 
> > diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
> > index 45c00a5..87a0e3b 100644
> > --- a/arch/arm64/mm/init.c
> > +++ b/arch/arm64/mm/init.c
> > @@ -580,18 +580,6 @@ void free_initmem(void)
> >  	unmap_kernel_range((u64)__init_begin, (u64)(__init_end - __init_begin));
> >  }
> >  
> > -#ifdef CONFIG_BLK_DEV_INITRD
> > -void __init free_initrd_mem(unsigned long start, unsigned long end)
> > -{
> > -	unsigned long aligned_start, aligned_end;
> > -
> > -	aligned_start = __virt_to_phys(start) & PAGE_MASK;
> > -	aligned_end = PAGE_ALIGN(__virt_to_phys(end));
> > -	memblock_free(aligned_start, aligned_end - aligned_start);
> > -	free_reserved_area((void *)start, (void *)end, 0, "initrd");
> > -}
> > -#endif
> > -
> >  /*
> >   * Dump out memory limit information on panic.
> >   */
> > diff --git a/init/initramfs.c b/init/initramfs.c
> > index c47dad0..3d61e13 100644
> > --- a/init/initramfs.c
> > +++ b/init/initramfs.c
> > @@ -10,6 +10,7 @@
> >  #include <linux/syscalls.h>
> >  #include <linux/utime.h>
> >  #include <linux/file.h>
> > +#include <linux/memblock.h>
> >  
> >  static ssize_t __init xwrite(int fd, const char *p, size_t count)
> >  {
> > @@ -531,6 +532,10 @@ void __weak free_initrd_mem(unsigned long start, unsigned long end)
> >  {
> >  	free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
> >  			"initrd");
> > +
> > +#ifdef CONFIG_ARCH_KEEP_MEMBLOCK
> 
> Should not the addresses here be aligned first before calling memblock_free() ?
> Without alignment, it breaks present behavior on arm64 which was explicitly added
> with 13776f9d40a0 ("arm64: mm: free the initrd reserved memblock in a aligned manner").

Well, the present behaviour as of v5.3[.1] is call memblock_free() for the
unaligned initrd area. The commit 13776f9d40a0 ("arm64: mm: free the initrd
reserved memblock in a aligned manner") indeed would fix the reporting in
/sys/fs/memblock/reserved, but it won't change anything beyond that despite
its commit log implies otherwise.

> Or does initrd always gets allocated with page alignment on other architectures.

powerpc reserves aligned area and s390 does not. Other architectures do not
keep memblock  after init.

I'll re-send with the aligned addresses.


> > +	memblock_free(__pa(start), end - start);
> > +#endif
> >  }
> >  
> >  #ifdef CONFIG_KEXEC_CORE
> >
diff mbox series

Patch

diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 45c00a5..87a0e3b 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -580,18 +580,6 @@  void free_initmem(void)
 	unmap_kernel_range((u64)__init_begin, (u64)(__init_end - __init_begin));
 }
 
-#ifdef CONFIG_BLK_DEV_INITRD
-void __init free_initrd_mem(unsigned long start, unsigned long end)
-{
-	unsigned long aligned_start, aligned_end;
-
-	aligned_start = __virt_to_phys(start) & PAGE_MASK;
-	aligned_end = PAGE_ALIGN(__virt_to_phys(end));
-	memblock_free(aligned_start, aligned_end - aligned_start);
-	free_reserved_area((void *)start, (void *)end, 0, "initrd");
-}
-#endif
-
 /*
  * Dump out memory limit information on panic.
  */
diff --git a/init/initramfs.c b/init/initramfs.c
index c47dad0..3d61e13 100644
--- a/init/initramfs.c
+++ b/init/initramfs.c
@@ -10,6 +10,7 @@ 
 #include <linux/syscalls.h>
 #include <linux/utime.h>
 #include <linux/file.h>
+#include <linux/memblock.h>
 
 static ssize_t __init xwrite(int fd, const char *p, size_t count)
 {
@@ -531,6 +532,10 @@  void __weak free_initrd_mem(unsigned long start, unsigned long end)
 {
 	free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
 			"initrd");
+
+#ifdef CONFIG_ARCH_KEEP_MEMBLOCK
+	memblock_free(__pa(start), end - start);
+#endif
 }
 
 #ifdef CONFIG_KEXEC_CORE