@@ -1416,38 +1416,9 @@ config ARM64_VA_BITS
default 48 if ARM64_VA_BITS_48
default 52 if ARM64_VA_BITS_52
-choice
- prompt "Physical address space size"
- default ARM64_PA_BITS_48
- help
- Choose the maximum physical address range that the kernel will
- support.
-
-config ARM64_PA_BITS_48
- bool "48-bit"
- depends on ARM64_64K_PAGES || !ARM64_VA_BITS_52
-
-config ARM64_PA_BITS_52
- bool "52-bit"
- depends on ARM64_64K_PAGES || ARM64_VA_BITS_52
- help
- Enable support for a 52-bit physical address space, introduced as
- part of the ARMv8.2-LPA extension.
-
- With this enabled, the kernel will also continue to work on CPUs that
- do not support ARMv8.2-LPA, but with some added memory overhead (and
- minor performance overhead).
-
-endchoice
-
-config ARM64_PA_BITS
- int
- default 48 if ARM64_PA_BITS_48
- default 52 if ARM64_PA_BITS_52
-
config ARM64_LPA2
def_bool y
- depends on ARM64_PA_BITS_52 && !ARM64_64K_PAGES
+ depends on !ARM64_64K_PAGES
choice
prompt "Endianness"
@@ -342,14 +342,13 @@ alternative_cb_end
mrs \tmp0, ID_AA64MMFR0_EL1
// Narrow PARange to fit the PS field in TCR_ELx
ubfx \tmp0, \tmp0, #ID_AA64MMFR0_EL1_PARANGE_SHIFT, #3
- mov \tmp1, #ID_AA64MMFR0_EL1_PARANGE_MAX
#ifdef CONFIG_ARM64_LPA2
alternative_if_not ARM64_HAS_VA52
mov \tmp1, #ID_AA64MMFR0_EL1_PARANGE_48
-alternative_else_nop_endif
-#endif
cmp \tmp0, \tmp1
csel \tmp0, \tmp1, \tmp0, hi
+alternative_else_nop_endif
+#endif
bfi \tcr, \tmp0, \pos, #3
.endm
@@ -599,21 +598,13 @@ alternative_endif
* ttbr: returns the TTBR value
*/
.macro phys_to_ttbr, ttbr, phys
-#ifdef CONFIG_ARM64_PA_BITS_52
orr \ttbr, \phys, \phys, lsr #46
and \ttbr, \ttbr, #TTBR_BADDR_MASK_52
-#else
- mov \ttbr, \phys
-#endif
.endm
.macro phys_to_pte, pte, phys
-#ifdef CONFIG_ARM64_PA_BITS_52
orr \pte, \phys, \phys, lsr #PTE_ADDR_HIGH_SHIFT
and \pte, \pte, #PHYS_TO_PTE_ADDR_MASK
-#else
- mov \pte, \phys
-#endif
.endm
/*
@@ -885,9 +885,8 @@ static inline u32 id_aa64mmfr0_parange_to_phys_shift(int parange)
* However, by the "D10.1.4 Principles of the ID scheme
* for fields in ID registers", ARM DDI 0487C.a, any new
* value is guaranteed to be higher than what we know already.
- * As a safe limit, we return the limit supported by the kernel.
*/
- default: return CONFIG_ARM64_PA_BITS;
+ default: return 52;
}
}
@@ -30,8 +30,7 @@
static inline u64 kvm_get_parange_max(void)
{
- if (kvm_lpa2_is_enabled() ||
- (IS_ENABLED(CONFIG_ARM64_PA_BITS_52) && PAGE_SHIFT == 16))
+ if (kvm_lpa2_is_enabled() || PAGE_SHIFT == 16)
return ID_AA64MMFR0_EL1_PARANGE_52;
else
return ID_AA64MMFR0_EL1_PARANGE_48;
@@ -176,7 +176,6 @@
#define PTE_SWBITS_MASK _AT(pteval_t, (BIT(63) | GENMASK(58, 55)))
#define PTE_ADDR_LOW (((_AT(pteval_t, 1) << (50 - PAGE_SHIFT)) - 1) << PAGE_SHIFT)
-#ifdef CONFIG_ARM64_PA_BITS_52
#ifdef CONFIG_ARM64_64K_PAGES
#define PTE_ADDR_HIGH (_AT(pteval_t, 0xf) << 12)
#define PTE_ADDR_HIGH_SHIFT 36
@@ -186,7 +185,6 @@
#define PTE_ADDR_HIGH_SHIFT 42
#define PHYS_TO_PTE_ADDR_MASK GENMASK_ULL(49, 8)
#endif
-#endif
/*
* AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers).
@@ -327,12 +325,10 @@
/*
* TTBR.
*/
-#ifdef CONFIG_ARM64_PA_BITS_52
/*
- * TTBR_ELx[1] is RES0 in this configuration.
+ * TTBR_ELx[1] is RES0 when using 52-bit physical addressing
*/
#define TTBR_BADDR_MASK_52 GENMASK_ULL(47, 2)
-#endif
#ifdef CONFIG_ARM64_VA_BITS_52
/* Must be at least 64-byte aligned to prevent corruption of the TTBR */
@@ -81,7 +81,7 @@ extern unsigned long prot_ns_shared;
#define lpa2_is_enabled() false
#define PTE_MAYBE_SHARED PTE_SHARED
#define PMD_MAYBE_SHARED PMD_SECT_S
-#define PHYS_MASK_SHIFT (CONFIG_ARM64_PA_BITS)
+#define PHYS_MASK_SHIFT (52)
#else
static inline bool __pure lpa2_is_enabled(void)
{
@@ -90,7 +90,7 @@ static inline bool __pure lpa2_is_enabled(void)
#define PTE_MAYBE_SHARED (lpa2_is_enabled() ? 0 : PTE_SHARED)
#define PMD_MAYBE_SHARED (lpa2_is_enabled() ? 0 : PMD_SECT_S)
-#define PHYS_MASK_SHIFT (lpa2_is_enabled() ? CONFIG_ARM64_PA_BITS : 48)
+#define PHYS_MASK_SHIFT (lpa2_is_enabled() ? 52 : 48)
#endif
/*
@@ -69,10 +69,9 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
pr_err("%s:%d: bad pte %016llx.\n", __FILE__, __LINE__, pte_val(e))
/*
- * Macros to convert between a physical address and its placement in a
+ * Helpers to convert between a physical address and its placement in a
* page table entry, taking care of 52-bit addresses.
*/
-#ifdef CONFIG_ARM64_PA_BITS_52
static inline phys_addr_t __pte_to_phys(pte_t pte)
{
pte_val(pte) &= ~PTE_MAYBE_SHARED;
@@ -83,10 +82,6 @@ static inline pteval_t __phys_to_pte_val(phys_addr_t phys)
{
return (phys | (phys >> PTE_ADDR_HIGH_SHIFT)) & PHYS_TO_PTE_ADDR_MASK;
}
-#else
-#define __pte_to_phys(pte) (pte_val(pte) & PTE_ADDR_LOW)
-#define __phys_to_pte_val(phys) (phys)
-#endif
#define pte_pfn(pte) (__pte_to_phys(pte) >> PAGE_SHIFT)
#define pfn_pte(pfn,prot) \
@@ -1495,11 +1490,7 @@ static inline void update_mmu_cache_range(struct vm_fault *vmf,
update_mmu_cache_range(NULL, vma, addr, ptep, 1)
#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
-#ifdef CONFIG_ARM64_PA_BITS_52
#define phys_to_ttbr(addr) (((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52)
-#else
-#define phys_to_ttbr(addr) (addr)
-#endif
/*
* On arm64 without hardware Access Flag, copying from user will fail because
@@ -916,12 +916,6 @@
#define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_LPA2 0x3
#define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MAX 0x7
-#ifdef CONFIG_ARM64_PA_BITS_52
-#define ID_AA64MMFR0_EL1_PARANGE_MAX ID_AA64MMFR0_EL1_PARANGE_52
-#else
-#define ID_AA64MMFR0_EL1_PARANGE_MAX ID_AA64MMFR0_EL1_PARANGE_48
-#endif
-
#if defined(CONFIG_ARM64_4K_PAGES)
#define ID_AA64MMFR0_EL1_TGRAN_SHIFT ID_AA64MMFR0_EL1_TGRAN4_SHIFT
#define ID_AA64MMFR0_EL1_TGRAN_LPA2 ID_AA64MMFR0_EL1_TGRAN4_52_BIT
@@ -48,20 +48,21 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd)
void __init pgtable_cache_init(void)
{
+ unsigned int pgd_size = PGD_SIZE;
+
if (pgdir_is_page_size())
return;
-#ifdef CONFIG_ARM64_PA_BITS_52
/*
* With 52-bit physical addresses, the architecture requires the
* top-level table to be aligned to at least 64 bytes.
*/
- BUILD_BUG_ON(PGD_SIZE < 64);
-#endif
+ if (PHYS_MASK_SHIFT >= 52)
+ pgd_size = max(pgd_size, 64);
/*
* Naturally aligned pgds required by the architecture.
*/
- pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_SIZE,
+ pgd_cache = kmem_cache_create("pgd_cache", pgd_size, pgd_size,
SLAB_PANIC, NULL);
}
@@ -197,10 +197,8 @@ SYM_FUNC_ALIAS(__pi_idmap_cpu_replace_ttbr1, idmap_cpu_replace_ttbr1)
.macro pte_to_phys, phys, pte
and \phys, \pte, #PTE_ADDR_LOW
-#ifdef CONFIG_ARM64_PA_BITS_52
and \pte, \pte, #PTE_ADDR_HIGH
orr \phys, \phys, \pte, lsl #PTE_ADDR_HIGH_SHIFT
-#endif
.endm
.macro kpti_mk_tbl_ng, type, num_entries
@@ -141,7 +141,6 @@ LX_CONFIG(CONFIG_ARM64_4K_PAGES)
LX_CONFIG(CONFIG_ARM64_16K_PAGES)
LX_CONFIG(CONFIG_ARM64_64K_PAGES)
if IS_BUILTIN(CONFIG_ARM64):
- LX_VALUE(CONFIG_ARM64_PA_BITS)
LX_VALUE(CONFIG_ARM64_VA_BITS)
LX_VALUE(CONFIG_PAGE_SHIFT)
LX_VALUE(CONFIG_ARCH_FORCE_MAX_ORDER)
@@ -574,12 +574,6 @@
#define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MIN 0x2
#define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MAX 0x7
-#ifdef CONFIG_ARM64_PA_BITS_52
-#define ID_AA64MMFR0_EL1_PARANGE_MAX ID_AA64MMFR0_EL1_PARANGE_52
-#else
-#define ID_AA64MMFR0_EL1_PARANGE_MAX ID_AA64MMFR0_EL1_PARANGE_48
-#endif
-
#if defined(CONFIG_ARM64_4K_PAGES)
#define ID_AA64MMFR0_EL1_TGRAN_SHIFT ID_AA64MMFR0_EL1_TGRAN4_SHIFT
#define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MIN