@@ -10,6 +10,7 @@ obj-y += entry.o
obj-y += head.o
obj-y += insn.o
obj-$(CONFIG_LIVEPATCH) += livepatch.o
+obj-y += mm.o
obj-y += smc.o
obj-y += smpboot.o
obj-y += traps.o
new file mode 100644
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <xen/init.h>
+#include <xen/mm.h>
+
+#include <asm/setup.h>
+
+/* Override macros from asm/page.h to make them work with mfn_t */
+#undef virt_to_mfn
+#define virt_to_mfn(va) _mfn(__virt_to_mfn(va))
+
+static DEFINE_PAGE_TABLE(xen_first_id);
+static DEFINE_PAGE_TABLE(xen_second_id);
+static DEFINE_PAGE_TABLE(xen_third_id);
+
+/*
+ * The identity mapping may start at physical address 0. So we don't want
+ * to keep it mapped longer than necessary.
+ *
+ * When this is called, we are still using the boot_pgtable.
+ *
+ * We need to prepare the identity mapping for both the boot page tables
+ * and runtime page tables.
+ *
+ * The logic to create the entry is slightly different because Xen may
+ * be running at a different location at runtime.
+ */
+static void __init prepare_boot_identity_mapping(void)
+{
+ paddr_t id_addr = virt_to_maddr(_start);
+ lpae_t pte;
+ DECLARE_OFFSETS(id_offsets, id_addr);
+
+ if ( id_offsets[0] != 0 )
+ panic("Cannot handled ID mapping above 512GB\n");
+
+ /* Link first ID table */
+ pte = mfn_to_xen_entry(virt_to_mfn(boot_first_id), MT_NORMAL);
+ pte.pt.table = 1;
+ pte.pt.xn = 0;
+
+ write_pte(&boot_pgtable[id_offsets[0]], pte);
+
+ /* Link second ID table */
+ pte = mfn_to_xen_entry(virt_to_mfn(boot_second_id), MT_NORMAL);
+ pte.pt.table = 1;
+ pte.pt.xn = 0;
+
+ write_pte(&boot_first_id[id_offsets[1]], pte);
+
+ /* Link third ID table */
+ pte = mfn_to_xen_entry(virt_to_mfn(boot_third_id), MT_NORMAL);
+ pte.pt.table = 1;
+ pte.pt.xn = 0;
+
+ write_pte(&boot_second_id[id_offsets[2]], pte);
+
+ /* The mapping in the third table will be created at a later stage */
+}
+
+static void __init prepare_runtime_identity_mapping(void)
+{
+ paddr_t id_addr = virt_to_maddr(_start);
+ lpae_t pte;
+ DECLARE_OFFSETS(id_offsets, id_addr);
+
+ if ( id_offsets[0] != 0 )
+ panic("Cannot handled ID mapping above 512GB\n");
+
+ /* Link first ID table */
+ pte = pte_of_xenaddr((vaddr_t)xen_first_id);
+ pte.pt.table = 1;
+ pte.pt.xn = 0;
+
+ write_pte(&xen_pgtable[id_offsets[0]], pte);
+
+ /* Link second ID table */
+ pte = pte_of_xenaddr((vaddr_t)xen_second_id);
+ pte.pt.table = 1;
+ pte.pt.xn = 0;
+
+ write_pte(&xen_first_id[id_offsets[1]], pte);
+
+ /* Link third ID table */
+ pte = pte_of_xenaddr((vaddr_t)xen_third_id);
+ pte.pt.table = 1;
+ pte.pt.xn = 0;
+
+ write_pte(&xen_second_id[id_offsets[2]], pte);
+
+ /* The mapping in the third table will be created at a later stage */
+}
+
+void __init arch_setup_page_tables(void)
+{
+ prepare_boot_identity_mapping();
+ prepare_runtime_identity_mapping();
+}
+
+void update_identity_mapping(bool enable)
+{
+ paddr_t id_addr = virt_to_maddr(_start);
+ int rc;
+
+ if ( enable )
+ rc = map_pages_to_xen(id_addr, maddr_to_mfn(id_addr), 1,
+ PAGE_HYPERVISOR_RX);
+ else
+ rc = destroy_xen_mappings(id_addr, id_addr + PAGE_SIZE);
+
+ BUG_ON(rc);
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
@@ -18,6 +18,10 @@ static inline bool arch_mfns_in_directmap(unsigned long mfn, unsigned long nr)
bool init_domheap_mappings(unsigned int cpu);
+static inline void arch_setup_page_tables(void)
+{
+}
+
#endif /* __ARM_ARM32_MM_H__ */
/*
@@ -1,6 +1,8 @@
#ifndef __ARM_ARM64_MM_H__
#define __ARM_ARM64_MM_H__
+extern DEFINE_PAGE_TABLE(xen_pgtable);
+
/*
* On ARM64, all the RAM is currently direct mapped in Xen.
* Hence return always true.
@@ -10,6 +12,16 @@ static inline bool arch_mfns_in_directmap(unsigned long mfn, unsigned long nr)
return true;
}
+void arch_setup_page_tables(void);
+
+/*
+ * Enable/disable the identity mapping
+ *
+ * Note that nested a call (e.g. enable=true, enable=true) is not
+ * supported.
+ */
+void update_identity_mapping(bool enable);
+
#endif /* __ARM_ARM64_MM_H__ */
/*
@@ -168,6 +168,17 @@ int map_range_to_domain(const struct dt_device_node *dev,
extern const char __ro_after_init_start[], __ro_after_init_end[];
+extern DEFINE_BOOT_PAGE_TABLE(boot_pgtable);
+
+#ifdef CONFIG_ARM_64
+extern DEFINE_BOOT_PAGE_TABLE(boot_first_id);
+#endif
+extern DEFINE_BOOT_PAGE_TABLE(boot_second_id);
+extern DEFINE_BOOT_PAGE_TABLE(boot_third_id);
+
+/* Find where Xen will be residing at runtime and return an PT entry */
+lpae_t pte_of_xenaddr(vaddr_t);
+
#endif
/*
* Local variables:
@@ -102,7 +102,7 @@ DEFINE_BOOT_PAGE_TABLE(boot_third);
#ifdef CONFIG_ARM_64
#define HYP_PT_ROOT_LEVEL 0
-static DEFINE_PAGE_TABLE(xen_pgtable);
+DEFINE_PAGE_TABLE(xen_pgtable);
static DEFINE_PAGE_TABLE(xen_first);
#define THIS_CPU_PGTABLE xen_pgtable
#else
@@ -397,7 +397,7 @@ void flush_page_to_ram(unsigned long mfn, bool sync_icache)
invalidate_icache();
}
-static inline lpae_t pte_of_xenaddr(vaddr_t va)
+lpae_t pte_of_xenaddr(vaddr_t va)
{
paddr_t ma = va + phys_offset;
@@ -504,6 +504,8 @@ void __init setup_pagetables(unsigned long boot_phys_offset)
phys_offset = boot_phys_offset;
+ arch_setup_page_tables();
+
#ifdef CONFIG_ARM_64
pte = pte_of_xenaddr((uintptr_t)xen_first);
pte.pt.table = 1;