@@ -31,7 +31,7 @@
* (1ULL << (64 - KASAN_SHADOW_SCALE_SHIFT))
*/
#define _KASAN_SHADOW_START(va) (KASAN_SHADOW_END - (1UL << ((va) - KASAN_SHADOW_SCALE_SHIFT)))
-#define KASAN_SHADOW_START _KASAN_SHADOW_START(VA_BITS)
+#define KASAN_SHADOW_START _KASAN_SHADOW_START(VA_BITS_ACTUAL)
void kasan_init(void);
void kasan_copy_shadow(pgd_t *pgdir);
@@ -48,10 +48,6 @@
* VA_START - the first kernel virtual address.
*/
#define VA_BITS (CONFIG_ARM64_VA_BITS)
-#define VA_START (UL(0xffffffffffffffff) - \
- (UL(1) << (VA_BITS - 1)) + 1)
-#define PAGE_OFFSET (UL(0xffffffffffffffff) - \
- (UL(1) << VA_BITS) + 1)
#define PAGE_OFFSET_END (VA_START)
#define KIMAGE_VADDR (MODULES_END)
#define BPF_JIT_REGION_START (KASAN_SHADOW_END)
@@ -178,10 +174,17 @@
#endif
#ifndef __ASSEMBLY__
+extern u64 vabits_actual;
+#define VA_BITS_ACTUAL ({vabits_actual;})
+#define VA_START (_VA_START(VA_BITS_ACTUAL))
+#define PAGE_OFFSET (UL(0xffffffffffffffff) - \
+ (UL(1) << VA_BITS_ACTUAL) + 1)
+#define PAGE_OFFSET_END (VA_START)
#include <linux/bitops.h>
#include <linux/mmdebug.h>
+extern s64 physvirt_offset;
extern s64 memstart_addr;
/* PHYS_OFFSET - the physical address of the start of memory. */
#define PHYS_OFFSET ({ VM_BUG_ON(memstart_addr & 1); memstart_addr; })
@@ -248,9 +251,9 @@ extern u64 vabits_user;
* space. Testing the top bit for the start of the region is a
* sufficient check.
*/
-#define __is_lm_address(addr) (!((addr) & BIT(VA_BITS - 1)))
+#define __is_lm_address(addr) (!((addr) & BIT(VA_BITS_ACTUAL - 1)))
-#define __lm_to_phys(addr) (((addr) & ~PAGE_OFFSET) + PHYS_OFFSET)
+#define __lm_to_phys(addr) (((addr) + physvirt_offset))
#define __kimg_to_phys(addr) ((addr) - kimage_voffset)
#define __virt_to_phys_nodebug(x) ({ \
@@ -269,7 +272,7 @@ extern phys_addr_t __phys_addr_symbol(unsigned long x);
#define __phys_addr_symbol(x) __pa_symbol_nodebug(x)
#endif
-#define __phys_to_virt(x) ((unsigned long)((x) - PHYS_OFFSET) | PAGE_OFFSET)
+#define __phys_to_virt(x) ((unsigned long)((x) - physvirt_offset))
#define __phys_to_kimg(x) ((unsigned long)((x) + kimage_voffset))
/*
@@ -106,7 +106,7 @@ static inline void __cpu_set_tcr_t0sz(unsigned long t0sz)
isb();
}
-#define cpu_set_default_tcr_t0sz() __cpu_set_tcr_t0sz(TCR_T0SZ(VA_BITS))
+#define cpu_set_default_tcr_t0sz() __cpu_set_tcr_t0sz(TCR_T0SZ(VA_BITS_ACTUAL))
#define cpu_set_idmap_tcr_t0sz() __cpu_set_tcr_t0sz(idmap_t0sz)
/*
@@ -332,6 +332,11 @@ __create_page_tables:
dmb sy
dc ivac, x6 // Invalidate potentially stale cache line
+ adr_l x6, vabits_actual
+ str x5, [x6]
+ dmb sy
+ dc ivac, x6 // Invalidate potentially stale cache line
+
/*
* VA_BITS may be too small to allow for an ID mapping to be created
* that covers system RAM if that is located sufficiently high in the
@@ -40,25 +40,25 @@ static void compute_layout(void)
int kva_msb;
/* Where is my RAM region? */
- hyp_va_msb = idmap_addr & BIT(VA_BITS - 1);
- hyp_va_msb ^= BIT(VA_BITS - 1);
+ hyp_va_msb = idmap_addr & BIT(VA_BITS_ACTUAL - 1);
+ hyp_va_msb ^= BIT(VA_BITS_ACTUAL - 1);
kva_msb = fls64((u64)phys_to_virt(memblock_start_of_DRAM()) ^
(u64)(high_memory - 1));
- if (kva_msb == (VA_BITS - 1)) {
+ if (kva_msb == (VA_BITS_ACTUAL - 1)) {
/*
* No space in the address, let's compute the mask so
- * that it covers (VA_BITS - 1) bits, and the region
+ * that it covers (VA_BITS_ACTUAL - 1) bits, and the region
* bit. The tag stays set to zero.
*/
- va_mask = BIT(VA_BITS - 1) - 1;
+ va_mask = BIT(VA_BITS_ACTUAL - 1) - 1;
va_mask |= hyp_va_msb;
} else {
/*
* We do have some free bits to insert a random tag.
* Hyp VAs are now created from kernel linear map VAs
- * using the following formula (with V == VA_BITS):
+ * using the following formula (with V == VA_BITS_ACTUAL):
*
* 63 ... V | V-1 | V-2 .. tag_lsb | tag_lsb - 1 .. 0
* ---------------------------------------------------------
@@ -66,7 +66,7 @@ static void compute_layout(void)
*/
tag_lsb = kva_msb;
va_mask = GENMASK_ULL(tag_lsb - 1, 0);
- tag_val = get_random_long() & GENMASK_ULL(VA_BITS - 2, tag_lsb);
+ tag_val = get_random_long() & GENMASK_ULL(VA_BITS_ACTUAL - 2, tag_lsb);
tag_val |= hyp_va_msb;
tag_val >>= tag_lsb;
}
@@ -171,9 +171,9 @@ void show_pte(unsigned long addr)
return;
}
- pr_alert("%s pgtable: %luk pages, %u-bit VAs, pgdp = %p\n",
+ pr_alert("%s pgtable: %luk pages, %llu-bit VAs, pgdp = %p\n",
mm == &init_mm ? "swapper" : "user", PAGE_SIZE / SZ_1K,
- mm == &init_mm ? VA_BITS : (int) vabits_user, mm->pgd);
+ mm == &init_mm ? VA_BITS_ACTUAL : (int) vabits_user, mm->pgd);
pgdp = pgd_offset(mm, addr);
pgd = READ_ONCE(*pgdp);
pr_alert("[%016lx] pgd=%016llx", addr, pgd_val(pgd));
@@ -61,6 +61,9 @@
s64 memstart_addr __ro_after_init = -1;
EXPORT_SYMBOL(memstart_addr);
+s64 physvirt_offset __ro_after_init;
+EXPORT_SYMBOL(physvirt_offset);
+
phys_addr_t arm64_dma_phys_limit __ro_after_init;
#ifdef CONFIG_KEXEC_CORE
@@ -354,7 +357,7 @@ static void __init fdt_enforce_memory_region(void)
void __init arm64_memblock_init(void)
{
- const s64 linear_region_size = BIT(VA_BITS - 1);
+ const s64 linear_region_size = BIT(VA_BITS_ACTUAL - 1);
/* Handle linux,usable-memory-range property */
fdt_enforce_memory_region();
@@ -368,6 +371,8 @@ void __init arm64_memblock_init(void)
memstart_addr = round_down(memblock_start_of_DRAM(),
ARM64_MEMSTART_ALIGN);
+ physvirt_offset = PHYS_OFFSET - PAGE_OFFSET;
+
/*
* Remove the memory that we will not be able to cover with the
* linear mapping. Take care not to clip the kernel which may be
@@ -55,6 +55,9 @@ u64 idmap_ptrs_per_pgd = PTRS_PER_PGD;
u64 vabits_user __ro_after_init;
EXPORT_SYMBOL(vabits_user);
+u64 __section(".mmuoff.data.write") vabits_actual;
+EXPORT_SYMBOL(vabits_actual);
+
u64 kimage_voffset __ro_after_init;
EXPORT_SYMBOL(kimage_voffset);
In order to support 52-bit kernel addresses detectable at boot time, one needs to know the actual VA_BITS detected. A new variable VA_BITS_ACTUAL is introduced in this commit and employed for the KVM hypervisor layout, KASAN, fault handling and phys-to/from-virt translation where there would normally be compile time constants. In order to maintain performance in phys_to_virt, another variable physvirt_offset is introduced. Signed-off-by: Steve Capper <steve.capper@arm.com> --- arch/arm64/include/asm/kasan.h | 2 +- arch/arm64/include/asm/memory.h | 17 ++++++++++------- arch/arm64/include/asm/mmu_context.h | 2 +- arch/arm64/kernel/head.S | 5 +++++ arch/arm64/kvm/va_layout.c | 14 +++++++------- arch/arm64/mm/fault.c | 4 ++-- arch/arm64/mm/init.c | 7 ++++++- arch/arm64/mm/mmu.c | 3 +++ 8 files changed, 35 insertions(+), 19 deletions(-)