diff mbox

[v2,1/2] arm64: don't make early_*map() calls post paging_init()

Message ID 1420551673-31416-2-git-send-email-leif.lindholm@linaro.org (mailing list archive)
State New, archived
Headers show

Commit Message

Leif Lindholm Jan. 6, 2015, 1:41 p.m. UTC
arm64 early_ioremap/iounmap/memremap/memunmap are not supported beyond
the call to paging_init(), but arm64_enter_virtual_mode() (an early
initcall) makes one call to unmap the UEFI memory map.

Rearrange the code to unmap this region before paging_init(), and then
pull back the remapping of the EFI memory map to the second part of
UEFI initialisation - efi_idmap_init() - renaming that function as
efi_memmap_init(), which better describes what it now does.

Signed-off-by: Leif Lindholm <leif.lindholm@linaro.org>
Fixes: f84d02755f5a ("arm64: add EFI runtime services")
---
 arch/arm64/include/asm/efi.h |  4 ++--
 arch/arm64/kernel/efi.c      | 27 ++++++++++++++-------------
 arch/arm64/kernel/setup.c    |  2 +-
 3 files changed, 17 insertions(+), 16 deletions(-)

Comments

Mark Salter Jan. 6, 2015, 8:35 p.m. UTC | #1
On Tue, 2015-01-06 at 13:41 +0000, Leif Lindholm wrote:
> arm64 early_ioremap/iounmap/memremap/memunmap are not supported beyond
> the call to paging_init(), but arm64_enter_virtual_mode() (an early
> initcall) makes one call to unmap the UEFI memory map.
> 
> Rearrange the code to unmap this region before paging_init(), and then
> pull back the remapping of the EFI memory map to the second part of
> UEFI initialisation - efi_idmap_init() - renaming that function as
> efi_memmap_init(), which better describes what it now does.

> Signed-off-by: Leif Lindholm <leif.lindholm@linaro.org>
> Fixes: f84d02755f5a ("arm64: add EFI runtime services")
> ---
>  arch/arm64/include/asm/efi.h |  4 ++--
>  arch/arm64/kernel/efi.c      | 27 ++++++++++++++-------------
>  arch/arm64/kernel/setup.c    |  2 +-
>  3 files changed, 17 insertions(+), 16 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
> index a34fd3b..92f4d44 100644
> --- a/arch/arm64/include/asm/efi.h
> +++ b/arch/arm64/include/asm/efi.h
> @@ -6,10 +6,10 @@
>  
>  #ifdef CONFIG_EFI
>  extern void efi_init(void);
> -extern void efi_idmap_init(void);
> +extern void efi_memmap_init(void);
>  #else
>  #define efi_init()
> -#define efi_idmap_init()
> +#define efi_memmap_init()
>  #endif
>  
>  #define efi_call_virt(f, ...)						\
> diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
> index 6fac253..e311066 100644
> --- a/arch/arm64/kernel/efi.c
> +++ b/arch/arm64/kernel/efi.c
> @@ -313,17 +313,26 @@ void __init efi_init(void)
>  	memmap.desc_size = params.desc_size;
>  	memmap.desc_version = params.desc_ver;
>  
> -	if (uefi_init() < 0)
> -		return;
> +	if (uefi_init() >= 0)
> +		reserve_regions();
>  
> -	reserve_regions();
> +	early_memunmap(memmap.map, params.mmap_size);
>  }
>  
> -void __init efi_idmap_init(void)
> +void __init efi_memmap_init(void)
>  {
> +	u64 mapsize;
> +
>  	if (!efi_enabled(EFI_BOOT))
>  		return;
>  
> +	/* replace early memmap mapping with permanent mapping */
> +	mapsize = memmap.map_end - memmap.map;
> +	memmap.map = (__force void *)ioremap_cache((phys_addr_t)memmap.phys_map,
> +						   mapsize);

ioremap_cache() could potententially fail here if the phys_map address
doesn't have a valid pfn (not in the kernel linear ram mapping) because
some of the underlying vm support hasn't been initialized yet.

Since this is a mapping which we need during early boot and beyond, why
not just create a fixed mapping for it similar to what we do for the
early console (see FIX_EARLYCON_MEM_BASE)? 

> +	memmap.map_end = memmap.map + mapsize;
> +	efi.memmap = &memmap;
> +
>  	/* boot time idmap_pg_dir is incomplete, so fill in missing parts */
>  	efi_setup_idmap();
>  }
> @@ -379,23 +388,15 @@ static int __init arm64_enter_virtual_mode(void)
>  		return -1;
>  	}
>  
> -	mapsize = memmap.map_end - memmap.map;
> -	early_memunmap(memmap.map, mapsize);
> -
>  	if (efi_runtime_disabled()) {
>  		pr_info("EFI runtime services will be disabled.\n");
>  		return -1;
>  	}
>  
>  	pr_info("Remapping and enabling EFI services.\n");
> -	/* replace early memmap mapping with permanent mapping */
> -	memmap.map = (__force void *)ioremap_cache((phys_addr_t)memmap.phys_map,
> -						   mapsize);
> -	memmap.map_end = memmap.map + mapsize;
> -
> -	efi.memmap = &memmap;
>  
>  	/* Map the runtime regions */
> +	mapsize = memmap.map_end - memmap.map;
>  	virtmap = kmalloc(mapsize, GFP_KERNEL);
>  	if (!virtmap) {
>  		pr_err("Failed to allocate EFI virtual memmap\n");
> diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
> index b809911..ebf7820 100644
> --- a/arch/arm64/kernel/setup.c
> +++ b/arch/arm64/kernel/setup.c
> @@ -401,7 +401,7 @@ void __init setup_arch(char **cmdline_p)
>  	paging_init();
>  	request_standard_resources();
>  
> -	efi_idmap_init();
> +	efi_memmap_init();
>  
>  	unflatten_device_tree();
>
Will Deacon Jan. 7, 2015, 10:58 a.m. UTC | #2
On Tue, Jan 06, 2015 at 08:35:22PM +0000, Mark Salter wrote:
> On Tue, 2015-01-06 at 13:41 +0000, Leif Lindholm wrote:
> > diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
> > index 6fac253..e311066 100644
> > --- a/arch/arm64/kernel/efi.c
> > +++ b/arch/arm64/kernel/efi.c
> > @@ -313,17 +313,26 @@ void __init efi_init(void)
> >  	memmap.desc_size = params.desc_size;
> >  	memmap.desc_version = params.desc_ver;
> >  
> > -	if (uefi_init() < 0)
> > -		return;
> > +	if (uefi_init() >= 0)
> > +		reserve_regions();
> >  
> > -	reserve_regions();
> > +	early_memunmap(memmap.map, params.mmap_size);
> >  }
> >  
> > -void __init efi_idmap_init(void)
> > +void __init efi_memmap_init(void)
> >  {
> > +	u64 mapsize;
> > +
> >  	if (!efi_enabled(EFI_BOOT))
> >  		return;
> >  
> > +	/* replace early memmap mapping with permanent mapping */
> > +	mapsize = memmap.map_end - memmap.map;
> > +	memmap.map = (__force void *)ioremap_cache((phys_addr_t)memmap.phys_map,
> > +						   mapsize);
> 
> ioremap_cache() could potententially fail here if the phys_map address
> doesn't have a valid pfn (not in the kernel linear ram mapping) because
> some of the underlying vm support hasn't been initialized yet.

Can you be more specific about the case you have in mind, please? pfn_valid
uses the memblocks on arm64, and that should all have been sorted out in
paging_init(). What's the issue that you're anticipating?

Will
Ard Biesheuvel Jan. 7, 2015, 1:13 p.m. UTC | #3
On 7 January 2015 at 10:58, Will Deacon <will.deacon@arm.com> wrote:
> On Tue, Jan 06, 2015 at 08:35:22PM +0000, Mark Salter wrote:
>> On Tue, 2015-01-06 at 13:41 +0000, Leif Lindholm wrote:
>> > diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
>> > index 6fac253..e311066 100644
>> > --- a/arch/arm64/kernel/efi.c
>> > +++ b/arch/arm64/kernel/efi.c
>> > @@ -313,17 +313,26 @@ void __init efi_init(void)
>> >     memmap.desc_size = params.desc_size;
>> >     memmap.desc_version = params.desc_ver;
>> >
>> > -   if (uefi_init() < 0)
>> > -           return;
>> > +   if (uefi_init() >= 0)
>> > +           reserve_regions();
>> >
>> > -   reserve_regions();
>> > +   early_memunmap(memmap.map, params.mmap_size);
>> >  }
>> >
>> > -void __init efi_idmap_init(void)
>> > +void __init efi_memmap_init(void)
>> >  {
>> > +   u64 mapsize;
>> > +
>> >     if (!efi_enabled(EFI_BOOT))
>> >             return;
>> >
>> > +   /* replace early memmap mapping with permanent mapping */
>> > +   mapsize = memmap.map_end - memmap.map;
>> > +   memmap.map = (__force void *)ioremap_cache((phys_addr_t)memmap.phys_map,
>> > +                                              mapsize);
>>
>> ioremap_cache() could potententially fail here if the phys_map address
>> doesn't have a valid pfn (not in the kernel linear ram mapping) because
>> some of the underlying vm support hasn't been initialized yet.
>
> Can you be more specific about the case you have in mind, please? pfn_valid
> uses the memblocks on arm64, and that should all have been sorted out in
> paging_init(). What's the issue that you're anticipating?
>

I think Mark's concern is that it is too early to call
__get_free_page(), which is what happens if ioremap_cache() finds that
the requested address is not covered by the existing linear mapping.
Currently, UEFI reserved RAM regions are covered by the linear
mapping, but that is something we intend to change in the future.
Leif Lindholm Jan. 7, 2015, 1:31 p.m. UTC | #4
On Wed, Jan 07, 2015 at 01:13:06PM +0000, Ard Biesheuvel wrote:
> >> > -void __init efi_idmap_init(void)
> >> > +void __init efi_memmap_init(void)
> >> >  {
> >> > +   u64 mapsize;
> >> > +
> >> >     if (!efi_enabled(EFI_BOOT))
> >> >             return;
> >> >
> >> > +   /* replace early memmap mapping with permanent mapping */
> >> > +   mapsize = memmap.map_end - memmap.map;
> >> > +   memmap.map = (__force void *)ioremap_cache((phys_addr_t)memmap.phys_map,
> >> > +                                              mapsize);
> >>
> >> ioremap_cache() could potententially fail here if the phys_map address
> >> doesn't have a valid pfn (not in the kernel linear ram mapping) because
> >> some of the underlying vm support hasn't been initialized yet.
> >
> > Can you be more specific about the case you have in mind, please? pfn_valid
> > uses the memblocks on arm64, and that should all have been sorted out in
> > paging_init(). What's the issue that you're anticipating?
> 
> I think Mark's concern is that it is too early to call
> __get_free_page(), which is what happens if ioremap_cache() finds that
> the requested address is not covered by the existing linear mapping.
> Currently, UEFI reserved RAM regions are covered by the linear
> mapping, but that is something we intend to change in the future.

Which shouldn't be a problem, right? Since this function will be going
away with your "stable mappings" set, and the remap call bumped down
to an early initcall in arm64_enter_virtual_mode() (or potential
future name for that function).

/
    Leif
Will Deacon Jan. 7, 2015, 7:03 p.m. UTC | #5
On Wed, Jan 07, 2015 at 01:31:09PM +0000, Leif Lindholm wrote:
> On Wed, Jan 07, 2015 at 01:13:06PM +0000, Ard Biesheuvel wrote:
> > >> > -void __init efi_idmap_init(void)
> > >> > +void __init efi_memmap_init(void)
> > >> >  {
> > >> > +   u64 mapsize;
> > >> > +
> > >> >     if (!efi_enabled(EFI_BOOT))
> > >> >             return;
> > >> >
> > >> > +   /* replace early memmap mapping with permanent mapping */
> > >> > +   mapsize = memmap.map_end - memmap.map;
> > >> > +   memmap.map = (__force void *)ioremap_cache((phys_addr_t)memmap.phys_map,
> > >> > +                                              mapsize);
> > >>
> > >> ioremap_cache() could potententially fail here if the phys_map address
> > >> doesn't have a valid pfn (not in the kernel linear ram mapping) because
> > >> some of the underlying vm support hasn't been initialized yet.
> > >
> > > Can you be more specific about the case you have in mind, please? pfn_valid
> > > uses the memblocks on arm64, and that should all have been sorted out in
> > > paging_init(). What's the issue that you're anticipating?
> > 
> > I think Mark's concern is that it is too early to call
> > __get_free_page(), which is what happens if ioremap_cache() finds that
> > the requested address is not covered by the existing linear mapping.
> > Currently, UEFI reserved RAM regions are covered by the linear
> > mapping, but that is something we intend to change in the future.
> 
> Which shouldn't be a problem, right? Since this function will be going
> away with your "stable mappings" set, and the remap call bumped down
> to an early initcall in arm64_enter_virtual_mode() (or potential
> future name for that function).

Sounds reasonable to me... Ard?

However, I'd certainly like something in the commit log and/or a comment
in the code with our reasoning.

Will
diff mbox

Patch

diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
index a34fd3b..92f4d44 100644
--- a/arch/arm64/include/asm/efi.h
+++ b/arch/arm64/include/asm/efi.h
@@ -6,10 +6,10 @@ 
 
 #ifdef CONFIG_EFI
 extern void efi_init(void);
-extern void efi_idmap_init(void);
+extern void efi_memmap_init(void);
 #else
 #define efi_init()
-#define efi_idmap_init()
+#define efi_memmap_init()
 #endif
 
 #define efi_call_virt(f, ...)						\
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
index 6fac253..e311066 100644
--- a/arch/arm64/kernel/efi.c
+++ b/arch/arm64/kernel/efi.c
@@ -313,17 +313,26 @@  void __init efi_init(void)
 	memmap.desc_size = params.desc_size;
 	memmap.desc_version = params.desc_ver;
 
-	if (uefi_init() < 0)
-		return;
+	if (uefi_init() >= 0)
+		reserve_regions();
 
-	reserve_regions();
+	early_memunmap(memmap.map, params.mmap_size);
 }
 
-void __init efi_idmap_init(void)
+void __init efi_memmap_init(void)
 {
+	u64 mapsize;
+
 	if (!efi_enabled(EFI_BOOT))
 		return;
 
+	/* replace early memmap mapping with permanent mapping */
+	mapsize = memmap.map_end - memmap.map;
+	memmap.map = (__force void *)ioremap_cache((phys_addr_t)memmap.phys_map,
+						   mapsize);
+	memmap.map_end = memmap.map + mapsize;
+	efi.memmap = &memmap;
+
 	/* boot time idmap_pg_dir is incomplete, so fill in missing parts */
 	efi_setup_idmap();
 }
@@ -379,23 +388,15 @@  static int __init arm64_enter_virtual_mode(void)
 		return -1;
 	}
 
-	mapsize = memmap.map_end - memmap.map;
-	early_memunmap(memmap.map, mapsize);
-
 	if (efi_runtime_disabled()) {
 		pr_info("EFI runtime services will be disabled.\n");
 		return -1;
 	}
 
 	pr_info("Remapping and enabling EFI services.\n");
-	/* replace early memmap mapping with permanent mapping */
-	memmap.map = (__force void *)ioremap_cache((phys_addr_t)memmap.phys_map,
-						   mapsize);
-	memmap.map_end = memmap.map + mapsize;
-
-	efi.memmap = &memmap;
 
 	/* Map the runtime regions */
+	mapsize = memmap.map_end - memmap.map;
 	virtmap = kmalloc(mapsize, GFP_KERNEL);
 	if (!virtmap) {
 		pr_err("Failed to allocate EFI virtual memmap\n");
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index b809911..ebf7820 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -401,7 +401,7 @@  void __init setup_arch(char **cmdline_p)
 	paging_init();
 	request_standard_resources();
 
-	efi_idmap_init();
+	efi_memmap_init();
 
 	unflatten_device_tree();