@@ -1,4 +1,5 @@
obj-y += lib/
+obj-$(CONFIG_MMU) += mmu/
obj-$(CONFIG_EARLY_PRINTK) += debug.o
obj-y += domctl.o
new file mode 100644
@@ -0,0 +1 @@
+obj-y += mm.o
new file mode 100644
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <xen/init.h>
+#include <asm/fixmap.h>
+
+/*
+ * Set up the direct-mapped xenheap:
+ * up to 1GB of contiguous, always-mapped memory.
+ */
+void __init setup_directmap_mappings(unsigned long base_mfn,
+ unsigned long nr_mfns)
+{
+ int rc;
+
+ rc = map_pages_to_xen(XENHEAP_VIRT_START, _mfn(base_mfn), nr_mfns,
+ PAGE_HYPERVISOR_RW | _PAGE_BLOCK);
+ if ( rc )
+ panic("Unable to setup the directmap mappings.\n");
+
+ /* Record where the directmap is, for translation routines. */
+ directmap_virt_end = XENHEAP_VIRT_START + nr_mfns * PAGE_SIZE;
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
@@ -11,7 +11,6 @@ obj-y += entry.o
obj-y += head.o
obj-y += insn.o
obj-$(CONFIG_LIVEPATCH) += livepatch.o
-obj-y += mm.o
obj-y += smc.o
obj-y += smpboot.o
obj-$(CONFIG_ARM64_SVE) += sve.o sve-asm.o
@@ -1 +1,2 @@
obj-y += head.o
+obj-y += mm.o
similarity index 75%
rename from xen/arch/arm/arm64/mm.c
rename to xen/arch/arm/arm64/mmu/mm.c
@@ -151,6 +151,43 @@ void __init switch_ttbr(uint64_t ttbr)
update_identity_mapping(false);
}
+/* Map the region in the directmap area. */
+void __init setup_directmap_mappings(unsigned long base_mfn,
+ unsigned long nr_mfns)
+{
+ int rc;
+
+ /* First call sets the directmap physical and virtual offset. */
+ if ( mfn_eq(directmap_mfn_start, INVALID_MFN) )
+ {
+ unsigned long mfn_gb = base_mfn & ~((FIRST_SIZE >> PAGE_SHIFT) - 1);
+
+ directmap_mfn_start = _mfn(base_mfn);
+ directmap_base_pdx = mfn_to_pdx(_mfn(base_mfn));
+ /*
+ * The base address may not be aligned to the first level
+ * size (e.g. 1GB when using 4KB pages). This would prevent
+ * superpage mappings for all the regions because the virtual
+ * address and machine address should both be suitably aligned.
+ *
+ * Prevent that by offsetting the start of the directmap virtual
+ * address.
+ */
+ directmap_virt_start = DIRECTMAP_VIRT_START +
+ (base_mfn - mfn_gb) * PAGE_SIZE;
+ }
+
+ if ( base_mfn < mfn_x(directmap_mfn_start) )
+ panic("cannot add directmap mapping at %lx below heap start %lx\n",
+ base_mfn, mfn_x(directmap_mfn_start));
+
+ rc = map_pages_to_xen((vaddr_t)__mfn_to_virt(base_mfn),
+ _mfn(base_mfn), nr_mfns,
+ PAGE_HYPERVISOR_RW | _PAGE_BLOCK);
+ if ( rc )
+ panic("Unable to setup the directmap mappings.\n");
+}
+
/*
* Local variables:
* mode: C
@@ -14,6 +14,12 @@
# error "unknown ARM variant"
#endif
+#if defined(CONFIG_MMU)
+# include <asm/mmu/mm.h>
+#else
+# error "Unknown memory management layout"
+#endif
+
/* Align Xen to a 2 MiB boundary. */
#define XEN_PADDR_ALIGN (1 << 21)
@@ -165,16 +171,6 @@ struct page_info
#define _PGC_need_scrub _PGC_allocated
#define PGC_need_scrub PGC_allocated
-/* Non-boot CPUs use this to find the correct pagetables. */
-extern uint64_t init_ttbr;
-
-extern mfn_t directmap_mfn_start, directmap_mfn_end;
-extern vaddr_t directmap_virt_end;
-#ifdef CONFIG_ARM_64
-extern vaddr_t directmap_virt_start;
-extern unsigned long directmap_base_pdx;
-#endif
-
#ifdef CONFIG_ARM_32
#define is_xen_heap_page(page) is_xen_heap_mfn(page_to_mfn(page))
#define is_xen_heap_mfn(mfn) ({ \
@@ -197,7 +193,6 @@ extern unsigned long directmap_base_pdx;
#define maddr_get_owner(ma) (page_get_owner(maddr_to_page((ma))))
-#define frame_table ((struct page_info *)FRAMETABLE_VIRT_START)
/* PDX of the first page in the frame table. */
extern unsigned long frametable_base_pdx;
@@ -207,19 +202,11 @@ extern unsigned long frametable_base_pdx;
extern void setup_pagetables(unsigned long boot_phys_offset);
/* Map FDT in boot pagetable */
extern void *early_fdt_map(paddr_t fdt_paddr);
-/* Switch to a new root page-tables */
-extern void switch_ttbr(uint64_t ttbr);
/* Remove early mappings */
extern void remove_early_mappings(void);
/* Allocate and initialise pagetables for a secondary CPU. Sets init_ttbr to the
* new page table */
extern int init_secondary_pagetables(int cpu);
-/*
- * For Arm32, set up the direct-mapped xenheap: up to 1GB of contiguous,
- * always-mapped memory. Base must be 32MB aligned and size a multiple of 32MB.
- * For Arm64, map the region in the directmap area.
- */
-extern void setup_directmap_mappings(unsigned long base_mfn, unsigned long nr_mfns);
/* Map a frame table to cover physical addresses ps through pe */
extern void setup_frametable_mappings(paddr_t ps, paddr_t pe);
/* map a physical range in virtual memory */
new file mode 100644
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef __ARM_MMU_MM_H__
+#define __ARM_MMU_MM_H__
+
+/* Non-boot CPUs use this to find the correct pagetables. */
+extern uint64_t init_ttbr;
+
+extern mfn_t directmap_mfn_start, directmap_mfn_end;
+extern vaddr_t directmap_virt_end;
+#ifdef CONFIG_ARM_64
+extern vaddr_t directmap_virt_start;
+extern unsigned long directmap_base_pdx;
+#endif
+
+#define frame_table ((struct page_info *)FRAMETABLE_VIRT_START)
+
+/*
+ * Print a walk of a page table or p2m
+ *
+ * ttbr is the base address register (TTBR0_EL2 or VTTBR_EL2)
+ * addr is the PA or IPA to translate
+ * root_level is the starting level of the page table
+ * (e.g. TCR_EL2.SL0 or VTCR_EL2.SL0 )
+ * nr_root_tables is the number of concatenated tables at the root.
+ * this can only be != 1 for P2M walks starting at the first or
+ * subsequent level.
+ */
+void dump_pt_walk(paddr_t ttbr, paddr_t addr,
+ unsigned int root_level,
+ unsigned int nr_root_tables);
+
+/* Switch to a new root page-tables */
+extern void switch_ttbr(uint64_t ttbr);
+/*
+ * For Arm32, set up the direct-mapped xenheap: up to 1GB of contiguous,
+ * always-mapped memory. Base must be 32MB aligned and size a multiple of 32MB.
+ * For Arm64, map the region in the directmap area.
+ */
+extern void setup_directmap_mappings(unsigned long base_mfn, unsigned long nr_mfns);
+
+#endif /* __ARM_MMU_MM_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
@@ -264,21 +264,6 @@ static inline void write_pte(lpae_t *p, lpae_t pte)
/* Flush the dcache for an entire page. */
void flush_page_to_ram(unsigned long mfn, bool sync_icache);
-/*
- * Print a walk of a page table or p2m
- *
- * ttbr is the base address register (TTBR0_EL2 or VTTBR_EL2)
- * addr is the PA or IPA to translate
- * root_level is the starting level of the page table
- * (e.g. TCR_EL2.SL0 or VTCR_EL2.SL0 )
- * nr_root_tables is the number of concatenated tables at the root.
- * this can only be != 1 for P2M walks starting at the first or
- * subsequent level.
- */
-void dump_pt_walk(paddr_t ttbr, paddr_t addr,
- unsigned int root_level,
- unsigned int nr_root_tables);
-
/* Print a walk of the hypervisor's page tables for a virtual addr. */
extern void dump_hyp_walk(vaddr_t addr);
/* Print a walk of the p2m for a domain for a physical address. */
@@ -11,136 +11,19 @@
#include <xen/domain_page.h>
#include <xen/grant_table.h>
#include <xen/guest_access.h>
-#include <xen/libfdt/libfdt.h>
#include <xen/mm.h>
-#include <xen/sizes.h>
#include <xsm/xsm.h>
-#include <asm/setup.h>
-
#include <public/memory.h>
/* Override macros from asm/page.h to make them work with mfn_t */
#undef virt_to_mfn
#define virt_to_mfn(va) _mfn(__virt_to_mfn(va))
-#undef mfn_to_virt
-#define mfn_to_virt(mfn) __mfn_to_virt(mfn_x(mfn))
-
-/* Main runtime page tables */
-
-/*
- * For arm32 xen_pgtable are per-PCPU and are allocated before
- * bringing up each CPU. For arm64 xen_pgtable is common to all PCPUs.
- *
- * xen_second, xen_fixmap and xen_xenmap are always shared between all
- * PCPUs.
- */
-
-#ifdef CONFIG_ARM_64
-DEFINE_PAGE_TABLE(xen_pgtable);
-static DEFINE_PAGE_TABLE(xen_first);
-#define THIS_CPU_PGTABLE xen_pgtable
-#else
-/* Per-CPU pagetable pages */
-/* xen_pgtable == root of the trie (zeroeth level on 64-bit, first on 32-bit) */
-DEFINE_PER_CPU(lpae_t *, xen_pgtable);
-#define THIS_CPU_PGTABLE this_cpu(xen_pgtable)
-/* Root of the trie for cpu0, other CPU's PTs are dynamically allocated */
-static DEFINE_PAGE_TABLE(cpu0_pgtable);
-#endif
-
-/* Common pagetable leaves */
-/* Second level page table used to cover Xen virtual address space */
-static DEFINE_PAGE_TABLE(xen_second);
-/* Third level page table used for fixmap */
-DEFINE_BOOT_PAGE_TABLE(xen_fixmap);
-/*
- * Third level page table used to map Xen itself with the XN bit set
- * as appropriate.
- */
-static DEFINE_PAGE_TABLES(xen_xenmap, XEN_NR_ENTRIES(2));
-
-static paddr_t phys_offset;
-
-/* Limits of the Xen heap */
-mfn_t directmap_mfn_start __read_mostly = INVALID_MFN_INITIALIZER;
-mfn_t directmap_mfn_end __read_mostly;
-vaddr_t directmap_virt_end __read_mostly;
-#ifdef CONFIG_ARM_64
-vaddr_t directmap_virt_start __read_mostly;
-unsigned long directmap_base_pdx __read_mostly;
-#endif
unsigned long frametable_base_pdx __read_mostly;
unsigned long frametable_virt_end __read_mostly;
-extern char __init_begin[], __init_end[];
-
-/* Checking VA memory layout alignment. */
-static void __init __maybe_unused build_assertions(void)
-{
- /* 2MB aligned regions */
- BUILD_BUG_ON(XEN_VIRT_START & ~SECOND_MASK);
- BUILD_BUG_ON(FIXMAP_ADDR(0) & ~SECOND_MASK);
- /* 1GB aligned regions */
-#ifdef CONFIG_ARM_32
- BUILD_BUG_ON(XENHEAP_VIRT_START & ~FIRST_MASK);
-#else
- BUILD_BUG_ON(DIRECTMAP_VIRT_START & ~FIRST_MASK);
-#endif
- /* Page table structure constraints */
-#ifdef CONFIG_ARM_64
- /*
- * The first few slots of the L0 table is reserved for the identity
- * mapping. Check that none of the other regions are overlapping
- * with it.
- */
-#define CHECK_OVERLAP_WITH_IDMAP(virt) \
- BUILD_BUG_ON(zeroeth_table_offset(virt) < IDENTITY_MAPPING_AREA_NR_L0)
-
- CHECK_OVERLAP_WITH_IDMAP(XEN_VIRT_START);
- CHECK_OVERLAP_WITH_IDMAP(VMAP_VIRT_START);
- CHECK_OVERLAP_WITH_IDMAP(FRAMETABLE_VIRT_START);
- CHECK_OVERLAP_WITH_IDMAP(DIRECTMAP_VIRT_START);
-#undef CHECK_OVERLAP_WITH_IDMAP
-#endif
- BUILD_BUG_ON(first_table_offset(XEN_VIRT_START));
-#ifdef CONFIG_ARCH_MAP_DOMAIN_PAGE
- BUILD_BUG_ON(DOMHEAP_VIRT_START & ~FIRST_MASK);
-#endif
- /*
- * The boot code expects the regions XEN_VIRT_START, FIXMAP_ADDR(0),
- * BOOT_FDT_VIRT_START to use the same 0th (arm64 only) and 1st
- * slot in the page tables.
- */
-#define CHECK_SAME_SLOT(level, virt1, virt2) \
- BUILD_BUG_ON(level##_table_offset(virt1) != level##_table_offset(virt2))
-
-#define CHECK_DIFFERENT_SLOT(level, virt1, virt2) \
- BUILD_BUG_ON(level##_table_offset(virt1) == level##_table_offset(virt2))
-
-#ifdef CONFIG_ARM_64
- CHECK_SAME_SLOT(zeroeth, XEN_VIRT_START, FIXMAP_ADDR(0));
- CHECK_SAME_SLOT(zeroeth, XEN_VIRT_START, BOOT_FDT_VIRT_START);
-#endif
- CHECK_SAME_SLOT(first, XEN_VIRT_START, FIXMAP_ADDR(0));
- CHECK_SAME_SLOT(first, XEN_VIRT_START, BOOT_FDT_VIRT_START);
-
- /*
- * For arm32, the temporary mapping will re-use the domheap
- * first slot and the second slots will match.
- */
-#ifdef CONFIG_ARM_32
- CHECK_SAME_SLOT(first, TEMPORARY_XEN_VIRT_START, DOMHEAP_VIRT_START);
- CHECK_DIFFERENT_SLOT(first, XEN_VIRT_START, TEMPORARY_XEN_VIRT_START);
- CHECK_SAME_SLOT(second, XEN_VIRT_START, TEMPORARY_XEN_VIRT_START);
-#endif
-
-#undef CHECK_SAME_SLOT
-#undef CHECK_DIFFERENT_SLOT
-}
-
void flush_page_to_ram(unsigned long mfn, bool sync_icache)
{
void *v = map_domain_page(_mfn(mfn));
@@ -160,229 +43,6 @@ void flush_page_to_ram(unsigned long mfn, bool sync_icache)
invalidate_icache();
}
-lpae_t pte_of_xenaddr(vaddr_t va)
-{
- paddr_t ma = va + phys_offset;
-
- return mfn_to_xen_entry(maddr_to_mfn(ma), MT_NORMAL);
-}
-
-void * __init early_fdt_map(paddr_t fdt_paddr)
-{
- /* We are using 2MB superpage for mapping the FDT */
- paddr_t base_paddr = fdt_paddr & SECOND_MASK;
- paddr_t offset;
- void *fdt_virt;
- uint32_t size;
- int rc;
-
- /*
- * Check whether the physical FDT address is set and meets the minimum
- * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be at
- * least 8 bytes so that we always access the magic and size fields
- * of the FDT header after mapping the first chunk, double check if
- * that is indeed the case.
- */
- BUILD_BUG_ON(MIN_FDT_ALIGN < 8);
- if ( !fdt_paddr || fdt_paddr % MIN_FDT_ALIGN )
- return NULL;
-
- /* The FDT is mapped using 2MB superpage */
- BUILD_BUG_ON(BOOT_FDT_VIRT_START % SZ_2M);
-
- rc = map_pages_to_xen(BOOT_FDT_VIRT_START, maddr_to_mfn(base_paddr),
- SZ_2M >> PAGE_SHIFT,
- PAGE_HYPERVISOR_RO | _PAGE_BLOCK);
- if ( rc )
- panic("Unable to map the device-tree.\n");
-
-
- offset = fdt_paddr % SECOND_SIZE;
- fdt_virt = (void *)BOOT_FDT_VIRT_START + offset;
-
- if ( fdt_magic(fdt_virt) != FDT_MAGIC )
- return NULL;
-
- size = fdt_totalsize(fdt_virt);
- if ( size > MAX_FDT_SIZE )
- return NULL;
-
- if ( (offset + size) > SZ_2M )
- {
- rc = map_pages_to_xen(BOOT_FDT_VIRT_START + SZ_2M,
- maddr_to_mfn(base_paddr + SZ_2M),
- SZ_2M >> PAGE_SHIFT,
- PAGE_HYPERVISOR_RO | _PAGE_BLOCK);
- if ( rc )
- panic("Unable to map the device-tree\n");
- }
-
- return fdt_virt;
-}
-
-void __init remove_early_mappings(void)
-{
- int rc;
-
- /* destroy the _PAGE_BLOCK mapping */
- rc = modify_xen_mappings(BOOT_FDT_VIRT_START,
- BOOT_FDT_VIRT_START + BOOT_FDT_VIRT_SIZE,
- _PAGE_BLOCK);
- BUG_ON(rc);
-}
-
-/*
- * After boot, Xen page-tables should not contain mapping that are both
- * Writable and eXecutables.
- *
- * This should be called on each CPU to enforce the policy.
- */
-static void xen_pt_enforce_wnx(void)
-{
- WRITE_SYSREG(READ_SYSREG(SCTLR_EL2) | SCTLR_Axx_ELx_WXN, SCTLR_EL2);
- /*
- * The TLBs may cache SCTLR_EL2.WXN. So ensure it is synchronized
- * before flushing the TLBs.
- */
- isb();
- flush_xen_tlb_local();
-}
-
-/* Boot-time pagetable setup.
- * Changes here may need matching changes in head.S */
-void __init setup_pagetables(unsigned long boot_phys_offset)
-{
- uint64_t ttbr;
- lpae_t pte, *p;
- int i;
-
- phys_offset = boot_phys_offset;
-
- arch_setup_page_tables();
-
-#ifdef CONFIG_ARM_64
- pte = pte_of_xenaddr((uintptr_t)xen_first);
- pte.pt.table = 1;
- pte.pt.xn = 0;
- xen_pgtable[zeroeth_table_offset(XEN_VIRT_START)] = pte;
-
- p = (void *) xen_first;
-#else
- p = (void *) cpu0_pgtable;
-#endif
-
- /* Map xen second level page-table */
- p[0] = pte_of_xenaddr((uintptr_t)(xen_second));
- p[0].pt.table = 1;
- p[0].pt.xn = 0;
-
- /* Break up the Xen mapping into pages and protect them separately. */
- for ( i = 0; i < XEN_NR_ENTRIES(3); i++ )
- {
- vaddr_t va = XEN_VIRT_START + (i << PAGE_SHIFT);
-
- if ( !is_kernel(va) )
- break;
- pte = pte_of_xenaddr(va);
- pte.pt.table = 1; /* third level mappings always have this bit set */
- if ( is_kernel_text(va) || is_kernel_inittext(va) )
- {
- pte.pt.xn = 0;
- pte.pt.ro = 1;
- }
- if ( is_kernel_rodata(va) )
- pte.pt.ro = 1;
- xen_xenmap[i] = pte;
- }
-
- /* Initialise xen second level entries ... */
- /* ... Xen's text etc */
- for ( i = 0; i < XEN_NR_ENTRIES(2); i++ )
- {
- vaddr_t va = XEN_VIRT_START + (i << XEN_PT_LEVEL_SHIFT(2));
-
- pte = pte_of_xenaddr((vaddr_t)(xen_xenmap + i * XEN_PT_LPAE_ENTRIES));
- pte.pt.table = 1;
- xen_second[second_table_offset(va)] = pte;
- }
-
- /* ... Fixmap */
- pte = pte_of_xenaddr((vaddr_t)xen_fixmap);
- pte.pt.table = 1;
- xen_second[second_table_offset(FIXMAP_ADDR(0))] = pte;
-
-#ifdef CONFIG_ARM_64
- ttbr = (uintptr_t) xen_pgtable + phys_offset;
-#else
- ttbr = (uintptr_t) cpu0_pgtable + phys_offset;
-#endif
-
- switch_ttbr(ttbr);
-
- xen_pt_enforce_wnx();
-
-#ifdef CONFIG_ARM_32
- per_cpu(xen_pgtable, 0) = cpu0_pgtable;
-#endif
-}
-
-#ifdef CONFIG_ARM_32
-/*
- * Set up the direct-mapped xenheap:
- * up to 1GB of contiguous, always-mapped memory.
- */
-void __init setup_directmap_mappings(unsigned long base_mfn,
- unsigned long nr_mfns)
-{
- int rc;
-
- rc = map_pages_to_xen(XENHEAP_VIRT_START, _mfn(base_mfn), nr_mfns,
- PAGE_HYPERVISOR_RW | _PAGE_BLOCK);
- if ( rc )
- panic("Unable to setup the directmap mappings.\n");
-
- /* Record where the directmap is, for translation routines. */
- directmap_virt_end = XENHEAP_VIRT_START + nr_mfns * PAGE_SIZE;
-}
-#else /* CONFIG_ARM_64 */
-/* Map the region in the directmap area. */
-void __init setup_directmap_mappings(unsigned long base_mfn,
- unsigned long nr_mfns)
-{
- int rc;
-
- /* First call sets the directmap physical and virtual offset. */
- if ( mfn_eq(directmap_mfn_start, INVALID_MFN) )
- {
- unsigned long mfn_gb = base_mfn & ~((FIRST_SIZE >> PAGE_SHIFT) - 1);
-
- directmap_mfn_start = _mfn(base_mfn);
- directmap_base_pdx = mfn_to_pdx(_mfn(base_mfn));
- /*
- * The base address may not be aligned to the first level
- * size (e.g. 1GB when using 4KB pages). This would prevent
- * superpage mappings for all the regions because the virtual
- * address and machine address should both be suitably aligned.
- *
- * Prevent that by offsetting the start of the directmap virtual
- * address.
- */
- directmap_virt_start = DIRECTMAP_VIRT_START +
- (base_mfn - mfn_gb) * PAGE_SIZE;
- }
-
- if ( base_mfn < mfn_x(directmap_mfn_start) )
- panic("cannot add directmap mapping at %lx below heap start %lx\n",
- base_mfn, mfn_x(directmap_mfn_start));
-
- rc = map_pages_to_xen((vaddr_t)__mfn_to_virt(base_mfn),
- _mfn(base_mfn), nr_mfns,
- PAGE_HYPERVISOR_RW | _PAGE_BLOCK);
- if ( rc )
- panic("Unable to setup the directmap mappings.\n");
-}
-#endif
-
/* Map a frame table to cover physical addresses ps through pe */
void __init setup_frametable_mappings(paddr_t ps, paddr_t pe)
{
@@ -422,51 +82,6 @@ void __init setup_frametable_mappings(paddr_t ps, paddr_t pe)
frametable_virt_end = FRAMETABLE_VIRT_START + (nr_pdxs * sizeof(struct page_info));
}
-void *__init arch_vmap_virt_end(void)
-{
- return (void *)(VMAP_VIRT_START + VMAP_VIRT_SIZE);
-}
-
-/* Release all __init and __initdata ranges to be reused */
-void free_init_memory(void)
-{
- paddr_t pa = virt_to_maddr(__init_begin);
- unsigned long len = __init_end - __init_begin;
- uint32_t insn;
- unsigned int i, nr = len / sizeof(insn);
- uint32_t *p;
- int rc;
-
- rc = modify_xen_mappings((unsigned long)__init_begin,
- (unsigned long)__init_end, PAGE_HYPERVISOR_RW);
- if ( rc )
- panic("Unable to map RW the init section (rc = %d)\n", rc);
-
- /*
- * From now on, init will not be used for execution anymore,
- * so nuke the instruction cache to remove entries related to init.
- */
- invalidate_icache_local();
-
-#ifdef CONFIG_ARM_32
- /* udf instruction i.e (see A8.8.247 in ARM DDI 0406C.c) */
- insn = 0xe7f000f0;
-#else
- insn = AARCH64_BREAK_FAULT;
-#endif
- p = (uint32_t *)__init_begin;
- for ( i = 0; i < nr; i++ )
- *(p + i) = insn;
-
- rc = destroy_xen_mappings((unsigned long)__init_begin,
- (unsigned long)__init_end);
- if ( rc )
- panic("Unable to remove the init section (rc = %d)\n", rc);
-
- init_domheap_pages(pa, pa + len);
- printk("Freed %ldkB init memory.\n", (long)(__init_end-__init_begin)>>10);
-}
-
int steal_page(
struct domain *d, struct page_info *page, unsigned int memflags)
{
@@ -1,2 +1,3 @@
obj-y += pt.o
+obj-y += setup.o
obj-y += smpboot.o
new file mode 100644
@@ -0,0 +1,349 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * xen/arch/arm/mmu/setup.c
+ *
+ * MMU system boot CPU MM bringup code.
+ */
+
+#include <xen/init.h>
+#include <xen/libfdt/libfdt.h>
+#include <xen/sizes.h>
+
+#include <asm/fixmap.h>
+
+/* Override macros from asm/page.h to make them work with mfn_t */
+#undef mfn_to_virt
+#define mfn_to_virt(mfn) __mfn_to_virt(mfn_x(mfn))
+
+/* Main runtime page tables */
+
+/*
+ * For arm32 xen_pgtable are per-PCPU and are allocated before
+ * bringing up each CPU. For arm64 xen_pgtable is common to all PCPUs.
+ *
+ * xen_second, xen_fixmap and xen_xenmap are always shared between all
+ * PCPUs.
+ */
+
+#ifdef CONFIG_ARM_64
+DEFINE_PAGE_TABLE(xen_pgtable);
+static DEFINE_PAGE_TABLE(xen_first);
+#define THIS_CPU_PGTABLE xen_pgtable
+#else
+/* Per-CPU pagetable pages */
+/* xen_pgtable == root of the trie (zeroeth level on 64-bit, first on 32-bit) */
+DEFINE_PER_CPU(lpae_t *, xen_pgtable);
+#define THIS_CPU_PGTABLE this_cpu(xen_pgtable)
+/* Root of the trie for cpu0, other CPU's PTs are dynamically allocated */
+static DEFINE_PAGE_TABLE(cpu0_pgtable);
+#endif
+
+/* Common pagetable leaves */
+/* Second level page table used to cover Xen virtual address space */
+static DEFINE_PAGE_TABLE(xen_second);
+/* Third level page table used for fixmap */
+DEFINE_BOOT_PAGE_TABLE(xen_fixmap);
+/*
+ * Third level page table used to map Xen itself with the XN bit set
+ * as appropriate.
+ */
+static DEFINE_PAGE_TABLES(xen_xenmap, XEN_NR_ENTRIES(2));
+
+static paddr_t phys_offset;
+
+/* Limits of the Xen heap */
+mfn_t directmap_mfn_start __read_mostly = INVALID_MFN_INITIALIZER;
+mfn_t directmap_mfn_end __read_mostly;
+vaddr_t directmap_virt_end __read_mostly;
+#ifdef CONFIG_ARM_64
+vaddr_t directmap_virt_start __read_mostly;
+unsigned long directmap_base_pdx __read_mostly;
+#endif
+
+extern char __init_begin[], __init_end[];
+
+/* Checking VA memory layout alignment. */
+static void __init __maybe_unused build_assertions(void)
+{
+ /* 2MB aligned regions */
+ BUILD_BUG_ON(XEN_VIRT_START & ~SECOND_MASK);
+ BUILD_BUG_ON(FIXMAP_ADDR(0) & ~SECOND_MASK);
+ /* 1GB aligned regions */
+#ifdef CONFIG_ARM_32
+ BUILD_BUG_ON(XENHEAP_VIRT_START & ~FIRST_MASK);
+#else
+ BUILD_BUG_ON(DIRECTMAP_VIRT_START & ~FIRST_MASK);
+#endif
+ /* Page table structure constraints */
+#ifdef CONFIG_ARM_64
+ /*
+ * The first few slots of the L0 table is reserved for the identity
+ * mapping. Check that none of the other regions are overlapping
+ * with it.
+ */
+#define CHECK_OVERLAP_WITH_IDMAP(virt) \
+ BUILD_BUG_ON(zeroeth_table_offset(virt) < IDENTITY_MAPPING_AREA_NR_L0)
+
+ CHECK_OVERLAP_WITH_IDMAP(XEN_VIRT_START);
+ CHECK_OVERLAP_WITH_IDMAP(VMAP_VIRT_START);
+ CHECK_OVERLAP_WITH_IDMAP(FRAMETABLE_VIRT_START);
+ CHECK_OVERLAP_WITH_IDMAP(DIRECTMAP_VIRT_START);
+#undef CHECK_OVERLAP_WITH_IDMAP
+#endif
+ BUILD_BUG_ON(first_table_offset(XEN_VIRT_START));
+#ifdef CONFIG_ARCH_MAP_DOMAIN_PAGE
+ BUILD_BUG_ON(DOMHEAP_VIRT_START & ~FIRST_MASK);
+#endif
+ /*
+ * The boot code expects the regions XEN_VIRT_START, FIXMAP_ADDR(0),
+ * BOOT_FDT_VIRT_START to use the same 0th (arm64 only) and 1st
+ * slot in the page tables.
+ */
+#define CHECK_SAME_SLOT(level, virt1, virt2) \
+ BUILD_BUG_ON(level##_table_offset(virt1) != level##_table_offset(virt2))
+
+#define CHECK_DIFFERENT_SLOT(level, virt1, virt2) \
+ BUILD_BUG_ON(level##_table_offset(virt1) == level##_table_offset(virt2))
+
+#ifdef CONFIG_ARM_64
+ CHECK_SAME_SLOT(zeroeth, XEN_VIRT_START, FIXMAP_ADDR(0));
+ CHECK_SAME_SLOT(zeroeth, XEN_VIRT_START, BOOT_FDT_VIRT_START);
+#endif
+ CHECK_SAME_SLOT(first, XEN_VIRT_START, FIXMAP_ADDR(0));
+ CHECK_SAME_SLOT(first, XEN_VIRT_START, BOOT_FDT_VIRT_START);
+
+ /*
+ * For arm32, the temporary mapping will re-use the domheap
+ * first slot and the second slots will match.
+ */
+#ifdef CONFIG_ARM_32
+ CHECK_SAME_SLOT(first, TEMPORARY_XEN_VIRT_START, DOMHEAP_VIRT_START);
+ CHECK_DIFFERENT_SLOT(first, XEN_VIRT_START, TEMPORARY_XEN_VIRT_START);
+ CHECK_SAME_SLOT(second, XEN_VIRT_START, TEMPORARY_XEN_VIRT_START);
+#endif
+
+#undef CHECK_SAME_SLOT
+#undef CHECK_DIFFERENT_SLOT
+}
+
+lpae_t __init pte_of_xenaddr(vaddr_t va)
+{
+ paddr_t ma = va + phys_offset;
+
+ return mfn_to_xen_entry(maddr_to_mfn(ma), MT_NORMAL);
+}
+
+void * __init early_fdt_map(paddr_t fdt_paddr)
+{
+ /* We are using 2MB superpage for mapping the FDT */
+ paddr_t base_paddr = fdt_paddr & SECOND_MASK;
+ paddr_t offset;
+ void *fdt_virt;
+ uint32_t size;
+ int rc;
+
+ /*
+ * Check whether the physical FDT address is set and meets the minimum
+ * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be at
+ * least 8 bytes so that we always access the magic and size fields
+ * of the FDT header after mapping the first chunk, double check if
+ * that is indeed the case.
+ */
+ BUILD_BUG_ON(MIN_FDT_ALIGN < 8);
+ if ( !fdt_paddr || fdt_paddr % MIN_FDT_ALIGN )
+ return NULL;
+
+ /* The FDT is mapped using 2MB superpage */
+ BUILD_BUG_ON(BOOT_FDT_VIRT_START % SZ_2M);
+
+ rc = map_pages_to_xen(BOOT_FDT_VIRT_START, maddr_to_mfn(base_paddr),
+ SZ_2M >> PAGE_SHIFT,
+ PAGE_HYPERVISOR_RO | _PAGE_BLOCK);
+ if ( rc )
+ panic("Unable to map the device-tree.\n");
+
+
+ offset = fdt_paddr % SECOND_SIZE;
+ fdt_virt = (void *)BOOT_FDT_VIRT_START + offset;
+
+ if ( fdt_magic(fdt_virt) != FDT_MAGIC )
+ return NULL;
+
+ size = fdt_totalsize(fdt_virt);
+ if ( size > MAX_FDT_SIZE )
+ return NULL;
+
+ if ( (offset + size) > SZ_2M )
+ {
+ rc = map_pages_to_xen(BOOT_FDT_VIRT_START + SZ_2M,
+ maddr_to_mfn(base_paddr + SZ_2M),
+ SZ_2M >> PAGE_SHIFT,
+ PAGE_HYPERVISOR_RO | _PAGE_BLOCK);
+ if ( rc )
+ panic("Unable to map the device-tree\n");
+ }
+
+ return fdt_virt;
+}
+
+void __init remove_early_mappings(void)
+{
+ int rc;
+
+ /* destroy the _PAGE_BLOCK mapping */
+ rc = modify_xen_mappings(BOOT_FDT_VIRT_START,
+ BOOT_FDT_VIRT_START + BOOT_FDT_VIRT_SIZE,
+ _PAGE_BLOCK);
+ BUG_ON(rc);
+}
+
+/*
+ * After boot, Xen page-tables should not contain mapping that are both
+ * Writable and eXecutables.
+ *
+ * This should be called on each CPU to enforce the policy.
+ */
+static void xen_pt_enforce_wnx(void)
+{
+ WRITE_SYSREG(READ_SYSREG(SCTLR_EL2) | SCTLR_Axx_ELx_WXN, SCTLR_EL2);
+ /*
+ * The TLBs may cache SCTLR_EL2.WXN. So ensure it is synchronized
+ * before flushing the TLBs.
+ */
+ isb();
+ flush_xen_tlb_local();
+}
+
+/*
+ * Boot-time pagetable setup.
+ * Changes here may need matching changes in head.S
+ */
+void __init setup_pagetables(unsigned long boot_phys_offset)
+{
+ uint64_t ttbr;
+ lpae_t pte, *p;
+ int i;
+
+ phys_offset = boot_phys_offset;
+
+ arch_setup_page_tables();
+
+#ifdef CONFIG_ARM_64
+ pte = pte_of_xenaddr((uintptr_t)xen_first);
+ pte.pt.table = 1;
+ pte.pt.xn = 0;
+ xen_pgtable[zeroeth_table_offset(XEN_VIRT_START)] = pte;
+
+ p = (void *) xen_first;
+#else
+ p = (void *) cpu0_pgtable;
+#endif
+
+ /* Map xen second level page-table */
+ p[0] = pte_of_xenaddr((uintptr_t)(xen_second));
+ p[0].pt.table = 1;
+ p[0].pt.xn = 0;
+
+ /* Break up the Xen mapping into pages and protect them separately. */
+ for ( i = 0; i < XEN_NR_ENTRIES(3); i++ )
+ {
+ vaddr_t va = XEN_VIRT_START + (i << PAGE_SHIFT);
+
+ if ( !is_kernel(va) )
+ break;
+ pte = pte_of_xenaddr(va);
+ pte.pt.table = 1; /* third level mappings always have this bit set */
+ if ( is_kernel_text(va) || is_kernel_inittext(va) )
+ {
+ pte.pt.xn = 0;
+ pte.pt.ro = 1;
+ }
+ if ( is_kernel_rodata(va) )
+ pte.pt.ro = 1;
+ xen_xenmap[i] = pte;
+ }
+
+ /* Initialise xen second level entries ... */
+ /* ... Xen's text etc */
+ for ( i = 0; i < XEN_NR_ENTRIES(2); i++ )
+ {
+ vaddr_t va = XEN_VIRT_START + (i << XEN_PT_LEVEL_SHIFT(2));
+
+ pte = pte_of_xenaddr((vaddr_t)(xen_xenmap + i * XEN_PT_LPAE_ENTRIES));
+ pte.pt.table = 1;
+ xen_second[second_table_offset(va)] = pte;
+ }
+
+ /* ... Fixmap */
+ pte = pte_of_xenaddr((vaddr_t)xen_fixmap);
+ pte.pt.table = 1;
+ xen_second[second_table_offset(FIXMAP_ADDR(0))] = pte;
+
+#ifdef CONFIG_ARM_64
+ ttbr = (uintptr_t) xen_pgtable + phys_offset;
+#else
+ ttbr = (uintptr_t) cpu0_pgtable + phys_offset;
+#endif
+
+ switch_ttbr(ttbr);
+
+ xen_pt_enforce_wnx();
+
+#ifdef CONFIG_ARM_32
+ per_cpu(xen_pgtable, 0) = cpu0_pgtable;
+#endif
+}
+
+void *__init arch_vmap_virt_end(void)
+{
+ return (void *)(VMAP_VIRT_START + VMAP_VIRT_SIZE);
+}
+
+/* Release all __init and __initdata ranges to be reused */
+void free_init_memory(void)
+{
+ paddr_t pa = virt_to_maddr(__init_begin);
+ unsigned long len = __init_end - __init_begin;
+ uint32_t insn;
+ unsigned int i, nr = len / sizeof(insn);
+ uint32_t *p;
+ int rc;
+
+ rc = modify_xen_mappings((unsigned long)__init_begin,
+ (unsigned long)__init_end, PAGE_HYPERVISOR_RW);
+ if ( rc )
+ panic("Unable to map RW the init section (rc = %d)\n", rc);
+
+ /*
+ * From now on, init will not be used for execution anymore,
+ * so nuke the instruction cache to remove entries related to init.
+ */
+ invalidate_icache_local();
+
+#ifdef CONFIG_ARM_32
+ /* udf instruction i.e (see A8.8.247 in ARM DDI 0406C.c) */
+ insn = 0xe7f000f0;
+#else
+ insn = AARCH64_BREAK_FAULT;
+#endif
+ p = (uint32_t *)__init_begin;
+ for ( i = 0; i < nr; i++ )
+ *(p + i) = insn;
+
+ rc = destroy_xen_mappings((unsigned long)__init_begin,
+ (unsigned long)__init_end);
+ if ( rc )
+ panic("Unable to remove the init section (rc = %d)\n", rc);
+
+ init_domheap_pages(pa, pa + len);
+ printk("Freed %ldkB init memory.\n", (long)(__init_end-__init_begin)>>10);
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */