@@ -23,8 +23,6 @@
#define VMALLOC_START (MODULES_END)
#define VMALLOC_END (- PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
-#define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT))
-
#define FIRST_USER_ADDRESS 0UL
#ifndef __ASSEMBLY__
@@ -35,6 +33,8 @@
#include <linux/mm_types.h>
#include <linux/sched.h>
+extern struct page *vmemmap;
+
extern void __pte_error(const char *file, int line, unsigned long val);
extern void __pmd_error(const char *file, int line, unsigned long val);
extern void __pud_error(const char *file, int line, unsigned long val);
@@ -53,6 +53,9 @@ EXPORT_SYMBOL(memstart_addr);
s64 physvirt_offset __ro_after_init;
EXPORT_SYMBOL(physvirt_offset);
+struct page *vmemmap __ro_after_init;
+EXPORT_SYMBOL(vmemmap);
+
phys_addr_t arm64_dma_phys_limit __ro_after_init;
#ifdef CONFIG_KEXEC_CORE
@@ -320,6 +323,8 @@ void __init arm64_memblock_init(void)
physvirt_offset = PHYS_OFFSET - PAGE_OFFSET;
+ vmemmap = ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT));
+
/*
* Remove the memory that we will not be able to cover with the
* linear mapping. Take care not to clip the kernel which may be