@@ -388,15 +388,15 @@ void __init mem_init(void)
* detected at build time already.
*/
#ifdef CONFIG_COMPAT
- BUILD_BUG_ON(TASK_SIZE_32 > DEFAULT_MAP_WINDOW_64);
+ VM_BUG_ON(TASK_SIZE_32 > DEFAULT_MAP_WINDOW_64);
#endif
/*
* Selected page table levels should match when derived from
* scratch using the virtual address range and page size.
*/
- BUILD_BUG_ON(ARM64_HW_PGTABLE_LEVELS(CONFIG_ARM64_VA_BITS) !=
- CONFIG_PGTABLE_LEVELS);
+ VM_BUG_ON(ARM64_HW_PGTABLE_LEVELS(CONFIG_ARM64_VA_BITS) !=
+ CONFIG_PGTABLE_LEVELS);
if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) {
extern int sysctl_overcommit_memory;
@@ -639,8 +639,8 @@ static void __init map_mem(pgd_t *pgdp)
* entire reduced VA space is covered by a single pgd_t which will have
* been populated without the PXNTable attribute by the time we get here.)
*/
- BUILD_BUG_ON(pgd_index(direct_map_end - 1) == pgd_index(direct_map_end) &&
- pgd_index(_PAGE_OFFSET(VA_BITS_MIN)) != PTRS_PER_PGD - 1);
+ VM_BUG_ON(pgd_index(direct_map_end - 1) == pgd_index(direct_map_end) &&
+ pgd_index(_PAGE_OFFSET(VA_BITS_MIN)) != PTRS_PER_PGD - 1);
early_kfence_pool = arm64_kfence_alloc_pool();
@@ -56,7 +56,7 @@ void __init pgtable_cache_init(void)
* With 52-bit physical addresses, the architecture requires the
* top-level table to be aligned to at least 64 bytes.
*/
- BUILD_BUG_ON(PGD_SIZE < 64);
+ VM_BUG_ON(PGD_SIZE < 64);
#endif
/*
There are some build bug checks that will no longer compile for boot-time page size because the values they are testing are no longer compile-time constants. Resolve these by converting them to VM_BUG_ON, which will perform a runtime check. Signed-off-by: Ryan Roberts <ryan.roberts@arm.com> --- ***NOTE*** Any confused maintainers may want to read the cover note here for context: https://lore.kernel.org/all/20241014105514.3206191-1-ryan.roberts@arm.com/ arch/arm64/mm/init.c | 6 +++--- arch/arm64/mm/mmu.c | 4 ++-- arch/arm64/mm/pgd.c | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-)