diff mbox series

[RFC,v1,44/57] arm64: Align sections to PAGE_SIZE_MAX

Message ID 20241014105912.3207374-44-ryan.roberts@arm.com (mailing list archive)
State New, archived
Headers show
Series Boot-time page size selection for arm64 | expand

Commit Message

Ryan Roberts Oct. 14, 2024, 10:58 a.m. UTC
Increase alignment of sections in nvhe hyp, vdso and final vmlinux image
from PAGE_SIZE to PAGE_SIZE_MAX. For compile-time PAGE_SIZE,
PAGE_SIZE_MAX == PAGE_SIZE so there is no change. For boot-time
PAGE_SIZE, PAGE_SIZE_MAX is the largest selectable page size.

For a boot-time page size build, image size is comparable to a 64K page
size compile-time build. In future, it may be desirable to optimize
run-time memory consumption by freeing unused padding pages when the
boot-time selected page size is less than PAGE_SIZE_MAX.

Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
---

***NOTE***
Any confused maintainers may want to read the cover note here for context:
https://lore.kernel.org/all/20241014105514.3206191-1-ryan.roberts@arm.com/

 arch/arm64/include/asm/memory.h     |  4 +--
 arch/arm64/kernel/vdso-wrap.S       |  4 +--
 arch/arm64/kernel/vdso.c            |  7 +++---
 arch/arm64/kernel/vdso/vdso.lds.S   |  4 +--
 arch/arm64/kernel/vdso32-wrap.S     |  4 +--
 arch/arm64/kernel/vdso32/vdso.lds.S |  4 +--
 arch/arm64/kernel/vmlinux.lds.S     | 38 ++++++++++++++---------------
 arch/arm64/kvm/hyp/nvhe/hyp.lds.S   |  2 +-
 8 files changed, 34 insertions(+), 33 deletions(-)

Comments

Thomas Weißschuh Oct. 19, 2024, 2:16 p.m. UTC | #1
On 2024-10-14 11:58:51+0100, Ryan Roberts wrote:
> Increase alignment of sections in nvhe hyp, vdso and final vmlinux image
> from PAGE_SIZE to PAGE_SIZE_MAX. For compile-time PAGE_SIZE,
> PAGE_SIZE_MAX == PAGE_SIZE so there is no change. For boot-time
> PAGE_SIZE, PAGE_SIZE_MAX is the largest selectable page size.
> 
> For a boot-time page size build, image size is comparable to a 64K page
> size compile-time build. In future, it may be desirable to optimize
> run-time memory consumption by freeing unused padding pages when the
> boot-time selected page size is less than PAGE_SIZE_MAX.
> 
> Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
> ---
> 
> ***NOTE***
> Any confused maintainers may want to read the cover note here for context:
> https://lore.kernel.org/all/20241014105514.3206191-1-ryan.roberts@arm.com/
> 
>  arch/arm64/include/asm/memory.h     |  4 +--
>  arch/arm64/kernel/vdso-wrap.S       |  4 +--
>  arch/arm64/kernel/vdso.c            |  7 +++---
>  arch/arm64/kernel/vdso/vdso.lds.S   |  4 +--
>  arch/arm64/kernel/vdso32-wrap.S     |  4 +--
>  arch/arm64/kernel/vdso32/vdso.lds.S |  4 +--
>  arch/arm64/kernel/vmlinux.lds.S     | 38 ++++++++++++++---------------
>  arch/arm64/kvm/hyp/nvhe/hyp.lds.S   |  2 +-
>  8 files changed, 34 insertions(+), 33 deletions(-)

> diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
> index 89b6e78400023..1efe98909a2e0 100644
> --- a/arch/arm64/kernel/vdso.c
> +++ b/arch/arm64/kernel/vdso.c
> @@ -195,7 +195,7 @@ static int __setup_additional_pages(enum vdso_abi abi,
>  
>  	vdso_text_len = vdso_info[abi].vdso_pages << PAGE_SHIFT;
>  	/* Be sure to map the data page */
> -	vdso_mapping_len = vdso_text_len + VVAR_NR_PAGES * PAGE_SIZE;
> +	vdso_mapping_len = vdso_text_len + VVAR_NR_PAGES * PAGE_SIZE_MAX;
>  
>  	vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
>  	if (IS_ERR_VALUE(vdso_base)) {
> @@ -203,7 +203,8 @@ static int __setup_additional_pages(enum vdso_abi abi,
>  		goto up_fail;
>  	}
>  
> -	ret = _install_special_mapping(mm, vdso_base, VVAR_NR_PAGES * PAGE_SIZE,
> +	ret = _install_special_mapping(mm, vdso_base,
> +				       VVAR_NR_PAGES * PAGE_SIZE_MAX,
>  				       VM_READ|VM_MAYREAD|VM_PFNMAP,
>  				       vdso_info[abi].dm);
>  	if (IS_ERR(ret))
> @@ -212,7 +213,7 @@ static int __setup_additional_pages(enum vdso_abi abi,
>  	if (system_supports_bti_kernel())
>  		gp_flags = VM_ARM64_BTI;
>  
> -	vdso_base += VVAR_NR_PAGES * PAGE_SIZE;
> +	vdso_base += VVAR_NR_PAGES * PAGE_SIZE_MAX;
>  	mm->context.vdso = (void *)vdso_base;
>  	ret = _install_special_mapping(mm, vdso_base, vdso_text_len,
>  				       VM_READ|VM_EXEC|gp_flags|

> diff --git a/arch/arm64/kernel/vdso/vdso.lds.S b/arch/arm64/kernel/vdso/vdso.lds.S
> index 45354f2ddf706..f7d1537a689e8 100644
> --- a/arch/arm64/kernel/vdso/vdso.lds.S
> +++ b/arch/arm64/kernel/vdso/vdso.lds.S
> @@ -18,9 +18,9 @@ OUTPUT_ARCH(aarch64)
>  
>  SECTIONS
>  {
> -	PROVIDE(_vdso_data = . - __VVAR_PAGES * PAGE_SIZE);
> +	PROVIDE(_vdso_data = . - __VVAR_PAGES * PAGE_SIZE_MAX);
>  #ifdef CONFIG_TIME_NS
> -	PROVIDE(_timens_data = _vdso_data + PAGE_SIZE);
> +	PROVIDE(_timens_data = _vdso_data + PAGE_SIZE_MAX);

This looks like it also needs a change to vvar_fault() in vdso.c.
The symbols are now always PAGE_SIZE_MAX apart, while vvar_fault() works
in page offsets (vmf->pgoff) that are based on the runtime PAGE_SIZE and
it expects hardcoded offsets.

As test you can use tools/testing/selftests/timens/timens.

(I can't test this right now, so it's only a suspicion)

>  #endif
>  	. = VDSO_LBASE + SIZEOF_HEADERS;

> diff --git a/arch/arm64/kernel/vdso32/vdso.lds.S b/arch/arm64/kernel/vdso32/vdso.lds.S
> index 8d95d7d35057d..c46d18a69d1ce 100644
> --- a/arch/arm64/kernel/vdso32/vdso.lds.S
> +++ b/arch/arm64/kernel/vdso32/vdso.lds.S
> @@ -18,9 +18,9 @@ OUTPUT_ARCH(arm)
>  
>  SECTIONS
>  {
> -	PROVIDE_HIDDEN(_vdso_data = . - __VVAR_PAGES * PAGE_SIZE);
> +	PROVIDE_HIDDEN(_vdso_data = . - __VVAR_PAGES * PAGE_SIZE_MAX);
>  #ifdef CONFIG_TIME_NS
> -	PROVIDE_HIDDEN(_timens_data = _vdso_data + PAGE_SIZE);
> +	PROVIDE_HIDDEN(_timens_data = _vdso_data + PAGE_SIZE_MAX);
>  #endif
>  	. = VDSO_LBASE + SIZEOF_HEADERS;
Ryan Roberts Oct. 21, 2024, 11:20 a.m. UTC | #2
On 19/10/2024 15:16, Thomas Weißschuh wrote:
> On 2024-10-14 11:58:51+0100, Ryan Roberts wrote:
>> Increase alignment of sections in nvhe hyp, vdso and final vmlinux image
>> from PAGE_SIZE to PAGE_SIZE_MAX. For compile-time PAGE_SIZE,
>> PAGE_SIZE_MAX == PAGE_SIZE so there is no change. For boot-time
>> PAGE_SIZE, PAGE_SIZE_MAX is the largest selectable page size.
>>
>> For a boot-time page size build, image size is comparable to a 64K page
>> size compile-time build. In future, it may be desirable to optimize
>> run-time memory consumption by freeing unused padding pages when the
>> boot-time selected page size is less than PAGE_SIZE_MAX.
>>
>> Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
>> ---
>>
>> ***NOTE***
>> Any confused maintainers may want to read the cover note here for context:
>> https://lore.kernel.org/all/20241014105514.3206191-1-ryan.roberts@arm.com/
>>
>>  arch/arm64/include/asm/memory.h     |  4 +--
>>  arch/arm64/kernel/vdso-wrap.S       |  4 +--
>>  arch/arm64/kernel/vdso.c            |  7 +++---
>>  arch/arm64/kernel/vdso/vdso.lds.S   |  4 +--
>>  arch/arm64/kernel/vdso32-wrap.S     |  4 +--
>>  arch/arm64/kernel/vdso32/vdso.lds.S |  4 +--
>>  arch/arm64/kernel/vmlinux.lds.S     | 38 ++++++++++++++---------------
>>  arch/arm64/kvm/hyp/nvhe/hyp.lds.S   |  2 +-
>>  8 files changed, 34 insertions(+), 33 deletions(-)
> 
>> diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
>> index 89b6e78400023..1efe98909a2e0 100644
>> --- a/arch/arm64/kernel/vdso.c
>> +++ b/arch/arm64/kernel/vdso.c
>> @@ -195,7 +195,7 @@ static int __setup_additional_pages(enum vdso_abi abi,
>>  
>>  	vdso_text_len = vdso_info[abi].vdso_pages << PAGE_SHIFT;
>>  	/* Be sure to map the data page */
>> -	vdso_mapping_len = vdso_text_len + VVAR_NR_PAGES * PAGE_SIZE;
>> +	vdso_mapping_len = vdso_text_len + VVAR_NR_PAGES * PAGE_SIZE_MAX;
>>  
>>  	vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
>>  	if (IS_ERR_VALUE(vdso_base)) {
>> @@ -203,7 +203,8 @@ static int __setup_additional_pages(enum vdso_abi abi,
>>  		goto up_fail;
>>  	}
>>  
>> -	ret = _install_special_mapping(mm, vdso_base, VVAR_NR_PAGES * PAGE_SIZE,
>> +	ret = _install_special_mapping(mm, vdso_base,
>> +				       VVAR_NR_PAGES * PAGE_SIZE_MAX,
>>  				       VM_READ|VM_MAYREAD|VM_PFNMAP,
>>  				       vdso_info[abi].dm);
>>  	if (IS_ERR(ret))
>> @@ -212,7 +213,7 @@ static int __setup_additional_pages(enum vdso_abi abi,
>>  	if (system_supports_bti_kernel())
>>  		gp_flags = VM_ARM64_BTI;
>>  
>> -	vdso_base += VVAR_NR_PAGES * PAGE_SIZE;
>> +	vdso_base += VVAR_NR_PAGES * PAGE_SIZE_MAX;
>>  	mm->context.vdso = (void *)vdso_base;
>>  	ret = _install_special_mapping(mm, vdso_base, vdso_text_len,
>>  				       VM_READ|VM_EXEC|gp_flags|
> 
>> diff --git a/arch/arm64/kernel/vdso/vdso.lds.S b/arch/arm64/kernel/vdso/vdso.lds.S
>> index 45354f2ddf706..f7d1537a689e8 100644
>> --- a/arch/arm64/kernel/vdso/vdso.lds.S
>> +++ b/arch/arm64/kernel/vdso/vdso.lds.S
>> @@ -18,9 +18,9 @@ OUTPUT_ARCH(aarch64)
>>  
>>  SECTIONS
>>  {
>> -	PROVIDE(_vdso_data = . - __VVAR_PAGES * PAGE_SIZE);
>> +	PROVIDE(_vdso_data = . - __VVAR_PAGES * PAGE_SIZE_MAX);
>>  #ifdef CONFIG_TIME_NS
>> -	PROVIDE(_timens_data = _vdso_data + PAGE_SIZE);
>> +	PROVIDE(_timens_data = _vdso_data + PAGE_SIZE_MAX);
> 
> This looks like it also needs a change to vvar_fault() in vdso.c.
> The symbols are now always PAGE_SIZE_MAX apart, while vvar_fault() works
> in page offsets (vmf->pgoff) that are based on the runtime PAGE_SIZE and
> it expects hardcoded offsets.
> 
> As test you can use tools/testing/selftests/timens/timens.
> 
> (I can't test this right now, so it's only a suspicion)

Ahh good spot - that test does infact fail.

This fixes the problem:

---8<---
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index 1efe98909a2e0..d2049ba6b19f5 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -151,10 +151,11 @@ int vdso_join_timens(struct task_struct *task, struct
time_namespace *ns)
 static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
                             struct vm_area_struct *vma, struct vm_fault *vmf)
 {
+       pgoff_t pgmaxoff = vmf->pgoff >> (PAGE_SHIFT_MAX - PAGE_SHIFT);
        struct page *timens_page = find_timens_vvar_page(vma);
        unsigned long pfn;

-       switch (vmf->pgoff) {
+       switch (pgmaxoff) {
        case VVAR_DATA_PAGE_OFFSET:
                if (timens_page)
                        pfn = page_to_pfn(timens_page);
---8<---

I'll include it in the next version.

Thanks,
Ryan

> 
>>  #endif
>>  	. = VDSO_LBASE + SIZEOF_HEADERS;
> 
>> diff --git a/arch/arm64/kernel/vdso32/vdso.lds.S b/arch/arm64/kernel/vdso32/vdso.lds.S
>> index 8d95d7d35057d..c46d18a69d1ce 100644
>> --- a/arch/arm64/kernel/vdso32/vdso.lds.S
>> +++ b/arch/arm64/kernel/vdso32/vdso.lds.S
>> @@ -18,9 +18,9 @@ OUTPUT_ARCH(arm)
>>  
>>  SECTIONS
>>  {
>> -	PROVIDE_HIDDEN(_vdso_data = . - __VVAR_PAGES * PAGE_SIZE);
>> +	PROVIDE_HIDDEN(_vdso_data = . - __VVAR_PAGES * PAGE_SIZE_MAX);
>>  #ifdef CONFIG_TIME_NS
>> -	PROVIDE_HIDDEN(_timens_data = _vdso_data + PAGE_SIZE);
>> +	PROVIDE_HIDDEN(_timens_data = _vdso_data + PAGE_SIZE_MAX);
>>  #endif
>>  	. = VDSO_LBASE + SIZEOF_HEADERS;
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 6aa97fa22dc30..5393a859183f7 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -195,13 +195,13 @@ 
  *  Open-coded (swapper_pg_dir - reserved_pg_dir) as this cannot be calculated
  *  until link time.
  */
-#define RESERVED_SWAPPER_OFFSET	(PAGE_SIZE)
+#define RESERVED_SWAPPER_OFFSET	(PAGE_SIZE_MAX)
 
 /*
  *  Open-coded (swapper_pg_dir - tramp_pg_dir) as this cannot be calculated
  *  until link time.
  */
-#define TRAMP_SWAPPER_OFFSET	(2 * PAGE_SIZE)
+#define TRAMP_SWAPPER_OFFSET	(2 * PAGE_SIZE_MAX)
 
 #ifndef __ASSEMBLY__
 
diff --git a/arch/arm64/kernel/vdso-wrap.S b/arch/arm64/kernel/vdso-wrap.S
index c4b1990bf2be0..79fa77628199b 100644
--- a/arch/arm64/kernel/vdso-wrap.S
+++ b/arch/arm64/kernel/vdso-wrap.S
@@ -13,10 +13,10 @@ 
 
 	.globl vdso_start, vdso_end
 	.section .rodata
-	.balign PAGE_SIZE
+	.balign PAGE_SIZE_MAX
 vdso_start:
 	.incbin "arch/arm64/kernel/vdso/vdso.so"
-	.balign PAGE_SIZE
+	.balign PAGE_SIZE_MAX
 vdso_end:
 
 	.previous
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index 89b6e78400023..1efe98909a2e0 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -195,7 +195,7 @@  static int __setup_additional_pages(enum vdso_abi abi,
 
 	vdso_text_len = vdso_info[abi].vdso_pages << PAGE_SHIFT;
 	/* Be sure to map the data page */
-	vdso_mapping_len = vdso_text_len + VVAR_NR_PAGES * PAGE_SIZE;
+	vdso_mapping_len = vdso_text_len + VVAR_NR_PAGES * PAGE_SIZE_MAX;
 
 	vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
 	if (IS_ERR_VALUE(vdso_base)) {
@@ -203,7 +203,8 @@  static int __setup_additional_pages(enum vdso_abi abi,
 		goto up_fail;
 	}
 
-	ret = _install_special_mapping(mm, vdso_base, VVAR_NR_PAGES * PAGE_SIZE,
+	ret = _install_special_mapping(mm, vdso_base,
+				       VVAR_NR_PAGES * PAGE_SIZE_MAX,
 				       VM_READ|VM_MAYREAD|VM_PFNMAP,
 				       vdso_info[abi].dm);
 	if (IS_ERR(ret))
@@ -212,7 +213,7 @@  static int __setup_additional_pages(enum vdso_abi abi,
 	if (system_supports_bti_kernel())
 		gp_flags = VM_ARM64_BTI;
 
-	vdso_base += VVAR_NR_PAGES * PAGE_SIZE;
+	vdso_base += VVAR_NR_PAGES * PAGE_SIZE_MAX;
 	mm->context.vdso = (void *)vdso_base;
 	ret = _install_special_mapping(mm, vdso_base, vdso_text_len,
 				       VM_READ|VM_EXEC|gp_flags|
diff --git a/arch/arm64/kernel/vdso/vdso.lds.S b/arch/arm64/kernel/vdso/vdso.lds.S
index 45354f2ddf706..f7d1537a689e8 100644
--- a/arch/arm64/kernel/vdso/vdso.lds.S
+++ b/arch/arm64/kernel/vdso/vdso.lds.S
@@ -18,9 +18,9 @@  OUTPUT_ARCH(aarch64)
 
 SECTIONS
 {
-	PROVIDE(_vdso_data = . - __VVAR_PAGES * PAGE_SIZE);
+	PROVIDE(_vdso_data = . - __VVAR_PAGES * PAGE_SIZE_MAX);
 #ifdef CONFIG_TIME_NS
-	PROVIDE(_timens_data = _vdso_data + PAGE_SIZE);
+	PROVIDE(_timens_data = _vdso_data + PAGE_SIZE_MAX);
 #endif
 	. = VDSO_LBASE + SIZEOF_HEADERS;
 
diff --git a/arch/arm64/kernel/vdso32-wrap.S b/arch/arm64/kernel/vdso32-wrap.S
index e72ac7bc4c04f..1c6069d6c457e 100644
--- a/arch/arm64/kernel/vdso32-wrap.S
+++ b/arch/arm64/kernel/vdso32-wrap.S
@@ -10,10 +10,10 @@ 
 
 	.globl vdso32_start, vdso32_end
 	.section .rodata
-	.balign PAGE_SIZE
+	.balign PAGE_SIZE_MAX
 vdso32_start:
 	.incbin "arch/arm64/kernel/vdso32/vdso.so"
-	.balign PAGE_SIZE
+	.balign PAGE_SIZE_MAX
 vdso32_end:
 
 	.previous
diff --git a/arch/arm64/kernel/vdso32/vdso.lds.S b/arch/arm64/kernel/vdso32/vdso.lds.S
index 8d95d7d35057d..c46d18a69d1ce 100644
--- a/arch/arm64/kernel/vdso32/vdso.lds.S
+++ b/arch/arm64/kernel/vdso32/vdso.lds.S
@@ -18,9 +18,9 @@  OUTPUT_ARCH(arm)
 
 SECTIONS
 {
-	PROVIDE_HIDDEN(_vdso_data = . - __VVAR_PAGES * PAGE_SIZE);
+	PROVIDE_HIDDEN(_vdso_data = . - __VVAR_PAGES * PAGE_SIZE_MAX);
 #ifdef CONFIG_TIME_NS
-	PROVIDE_HIDDEN(_timens_data = _vdso_data + PAGE_SIZE);
+	PROVIDE_HIDDEN(_timens_data = _vdso_data + PAGE_SIZE_MAX);
 #endif
 	. = VDSO_LBASE + SIZEOF_HEADERS;
 
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 7f3f6d709ae73..1ef6dea13b57c 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -15,16 +15,16 @@ 
 
 #define HYPERVISOR_DATA_SECTIONS				\
 	HYP_SECTION_NAME(.rodata) : {				\
-		. = ALIGN(PAGE_SIZE);				\
+		. = ALIGN(PAGE_SIZE_MAX);			\
 		__hyp_rodata_start = .;				\
 		*(HYP_SECTION_NAME(.data..ro_after_init))	\
 		*(HYP_SECTION_NAME(.rodata))			\
-		. = ALIGN(PAGE_SIZE);				\
+		. = ALIGN(PAGE_SIZE_MAX);			\
 		__hyp_rodata_end = .;				\
 	}
 
 #define HYPERVISOR_PERCPU_SECTION				\
-	. = ALIGN(PAGE_SIZE);					\
+	. = ALIGN(PAGE_SIZE_MAX);				\
 	HYP_SECTION_NAME(.data..percpu) : {			\
 		*(HYP_SECTION_NAME(.data..percpu))		\
 	}
@@ -39,7 +39,7 @@ 
 #define BSS_FIRST_SECTIONS					\
 	__hyp_bss_start = .;					\
 	*(HYP_SECTION_NAME(.bss))				\
-	. = ALIGN(PAGE_SIZE);					\
+	. = ALIGN(PAGE_SIZE_MAX);				\
 	__hyp_bss_end = .;
 
 /*
@@ -48,7 +48,7 @@ 
  * between them, which can in some cases cause the linker to misalign them. To
  * work around the issue, force a page alignment for __bss_start.
  */
-#define SBSS_ALIGN			PAGE_SIZE
+#define SBSS_ALIGN			PAGE_SIZE_MAX
 #else /* CONFIG_KVM */
 #define HYPERVISOR_EXTABLE
 #define HYPERVISOR_DATA_SECTIONS
@@ -75,14 +75,14 @@  ENTRY(_text)
 jiffies = jiffies_64;
 
 #define HYPERVISOR_TEXT					\
-	. = ALIGN(PAGE_SIZE);				\
+	. = ALIGN(PAGE_SIZE_MAX);			\
 	__hyp_idmap_text_start = .;			\
 	*(.hyp.idmap.text)				\
 	__hyp_idmap_text_end = .;			\
 	__hyp_text_start = .;				\
 	*(.hyp.text)					\
 	HYPERVISOR_EXTABLE				\
-	. = ALIGN(PAGE_SIZE);				\
+	. = ALIGN(PAGE_SIZE_MAX);			\
 	__hyp_text_end = .;
 
 #define IDMAP_TEXT					\
@@ -113,11 +113,11 @@  jiffies = jiffies_64;
 
 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
 #define TRAMP_TEXT					\
-	. = ALIGN(PAGE_SIZE);				\
+	. = ALIGN(PAGE_SIZE_MAX);			\
 	__entry_tramp_text_start = .;			\
 	*(.entry.tramp.text)				\
-	. = ALIGN(PAGE_SIZE);				\
 	__entry_tramp_text_end = .;			\
+	. = ALIGN(PAGE_SIZE_MAX);			\
 	*(.entry.tramp.rodata)
 #else
 #define TRAMP_TEXT
@@ -187,7 +187,7 @@  SECTIONS
 	_etext = .;			/* End of text section */
 
 	/* everything from this point to __init_begin will be marked RO NX */
-	RO_DATA(PAGE_SIZE)
+	RO_DATA(PAGE_SIZE_MAX)
 
 	HYPERVISOR_DATA_SECTIONS
 
@@ -206,22 +206,22 @@  SECTIONS
 		HIBERNATE_TEXT
 		KEXEC_TEXT
 		IDMAP_TEXT
-		. = ALIGN(PAGE_SIZE);
+		. = ALIGN(PAGE_SIZE_MAX);
 	}
 
 	idmap_pg_dir = .;
-	. += PAGE_SIZE;
+	. += PAGE_SIZE_MAX;
 
 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
 	tramp_pg_dir = .;
-	. += PAGE_SIZE;
+	. += PAGE_SIZE_MAX;
 #endif
 
 	reserved_pg_dir = .;
-	. += PAGE_SIZE;
+	. += PAGE_SIZE_MAX;
 
 	swapper_pg_dir = .;
-	. += PAGE_SIZE;
+	. += PAGE_SIZE_MAX;
 
 	. = ALIGN(SEGMENT_ALIGN);
 	__init_begin = .;
@@ -290,7 +290,7 @@  SECTIONS
 
 	_data = .;
 	_sdata = .;
-	RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_ALIGN)
+	RW_DATA(L1_CACHE_BYTES, PAGE_SIZE_MAX, THREAD_ALIGN)
 
 	/*
 	 * Data written with the MMU off but read with the MMU on requires
@@ -317,7 +317,7 @@  SECTIONS
 	/* start of zero-init region */
 	BSS_SECTION(SBSS_ALIGN, 0, 0)
 
-	. = ALIGN(PAGE_SIZE);
+	. = ALIGN(PAGE_SIZE_MAX);
 	init_pg_dir = .;
 	. += INIT_DIR_SIZE_MAX;
 	init_pg_end = .;
@@ -356,7 +356,7 @@  SECTIONS
  * former is page-aligned, but the latter may not be with 16K or 64K pages, so
  * it should also not cross a page boundary.
  */
-ASSERT(__hyp_idmap_text_end - __hyp_idmap_text_start <= PAGE_SIZE,
+ASSERT(__hyp_idmap_text_end - __hyp_idmap_text_start <= SZ_4K,
 	"HYP init code too big")
 ASSERT(__idmap_text_end - (__idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K,
 	"ID map text too big or misaligned")
@@ -367,7 +367,7 @@  ASSERT(__hibernate_exit_text_start == swsusp_arch_suspend_exit,
        "Hibernate exit text does not start with swsusp_arch_suspend_exit")
 #endif
 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
-ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) <= 3*PAGE_SIZE,
+ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) <= 3 * SZ_4K,
 	"Entry trampoline text too big")
 #endif
 #ifdef CONFIG_KVM
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp.lds.S b/arch/arm64/kvm/hyp/nvhe/hyp.lds.S
index f4562f417d3fc..74c7c21626270 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp.lds.S
+++ b/arch/arm64/kvm/hyp/nvhe/hyp.lds.S
@@ -21,7 +21,7 @@  SECTIONS {
 	 * .hyp..data..percpu needs to be page aligned to maintain the same
 	 * alignment for when linking into vmlinux.
 	 */
-	. = ALIGN(PAGE_SIZE);
+	. = ALIGN(PAGE_SIZE_MAX);
 	BEGIN_HYP_SECTION(.data..percpu)
 		PERCPU_INPUT(L1_CACHE_BYTES)
 	END_HYP_SECTION