@@ -1,7 +1,6 @@
config ARM_32
def_bool y
depends on "$(ARCH)" = "arm32"
- select ARCH_MAP_DOMAIN_PAGE
config ARM_64
def_bool y
@@ -12,6 +11,7 @@ config ARM_64
config ARM
def_bool y
select HAS_ALTERNATIVE
+ select ARCH_MAP_DOMAIN_PAGE
select HAS_DEVICE_TREE
select HAS_PASSTHROUGH
select HAS_UBSAN
@@ -4,6 +4,7 @@
#include <xen/mm.h>
#include <xen/pfn.h>
+#include <asm/domain_page.h>
#include <asm/setup.h>
#include <asm/static-memory.h>
@@ -236,6 +237,14 @@ void __init setup_mm(void)
setup_frametable_mappings(ram_start, ram_end);
max_page = PFN_DOWN(ram_end);
+ /*
+ * The allocators may need to use map_domain_page() (such as for
+ * scrubbing pages). So we need to prepare the domheap area first.
+ */
+ if ( !init_domheap_mappings(smp_processor_id()) )
+ panic("CPU%u: Unable to prepare the domheap page-tables\n",
+ smp_processor_id());
+
init_staticmem_pages();
}
@@ -1,4 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
+#include <xen/domain_page.h>
#include <xen/mm.h>
#include <xen/pmap.h>
#include <xen/vmap.h>
@@ -8,6 +9,8 @@
/* Override macros from asm/page.h to make them work with mfn_t */
#undef virt_to_mfn
#define virt_to_mfn(va) _mfn(__virt_to_mfn(va))
+#undef mfn_to_virt
+#define mfn_to_virt(va) __mfn_to_virt(mfn_x(mfn))
/* cpu0's domheap page tables */
static DEFINE_PAGE_TABLES(cpu0_dommap, DOMHEAP_SECOND_PAGES);
@@ -31,13 +34,30 @@ bool init_domheap_mappings(unsigned int cpu)
{
unsigned int order = get_order_from_pages(DOMHEAP_SECOND_PAGES);
lpae_t *root = per_cpu(xen_pgtable, cpu);
+ lpae_t *first;
unsigned int i, first_idx;
lpae_t *domheap;
mfn_t mfn;
+ /* Convenience aliases */
+ DECLARE_OFFSETS(offsets, DOMHEAP_VIRT_START);
+
ASSERT(root);
ASSERT(!per_cpu(xen_dommap, cpu));
+ /*
+ * On Arm64, the root is at level 0. Therefore we need an extra step
+ * to allocate the first level page-table.
+ */
+#ifdef CONFIG_ARM_64
+ if ( create_xen_table(&root[offsets[0]]) )
+ return false;
+
+ first = xen_map_table(lpae_get_mfn(root[offsets[0]]));
+#else
+ first = root;
+#endif
+
/*
* The domheap for cpu0 is initialized before the heap is initialized.
* So we need to use pre-allocated pages.
@@ -58,16 +78,20 @@ bool init_domheap_mappings(unsigned int cpu)
* domheap mapping pages.
*/
mfn = virt_to_mfn(domheap);
- first_idx = first_table_offset(DOMHEAP_VIRT_START);
+ first_idx = offsets[1];
for ( i = 0; i < DOMHEAP_SECOND_PAGES; i++ )
{
lpae_t pte = mfn_to_xen_entry(mfn_add(mfn, i), MT_NORMAL);
pte.pt.table = 1;
- write_pte(&root[first_idx + i], pte);
+ write_pte(&first[first_idx + i], pte);
}
per_cpu(xen_dommap, cpu) = domheap;
+#ifdef CONFIG_ARM_64
+ xen_unmap_table(first);
+#endif
+
return true;
}
@@ -91,6 +115,10 @@ void *map_domain_page(mfn_t mfn)
lpae_t pte;
int i, slot;
+ /* Bypass the mapcache if the page is in the directmap */
+ if ( arch_mfns_in_directmap(mfn_x(mfn), 1) )
+ return mfn_to_virt(mfn);
+
local_irq_save(flags);
/* The map is laid out as an open-addressed hash table where each
@@ -153,13 +181,25 @@ void *map_domain_page(mfn_t mfn)
/* Release a mapping taken with map_domain_page() */
void unmap_domain_page(const void *ptr)
{
+ unsigned long va = (unsigned long)ptr;
unsigned long flags;
lpae_t *map = this_cpu(xen_dommap);
- int slot = ((unsigned long)ptr - DOMHEAP_VIRT_START) >> SECOND_SHIFT;
+ unsigned int slot;
+
+ /* Below we assume that the domheap area doesn't start at 0 */
+ BUILD_BUG_ON(DOMHEAP_VIRT_START == 0);
- if ( !ptr )
+ /*
+ * map_domain_page() may not have mapped anything if the address
+ * is part of the directmap. So ignore anything outside of the
+ * domheap.
+ */
+ if ( (va < DOMHEAP_VIRT_START) ||
+ ((va - DOMHEAP_VIRT_START) >= DOMHEAP_VIRT_SIZE) )
return;
+ slot = (va - DOMHEAP_VIRT_START) >> SECOND_SHIFT;
+
local_irq_save(flags);
ASSERT(slot >= 0 && slot < DOMHEAP_ENTRIES);
@@ -432,6 +432,11 @@ static inline void page_set_xenheap_gfn(struct page_info *p, gfn_t gfn)
} while ( (y = cmpxchg(&p->u.inuse.type_info, x, nx)) != x );
}
+/* Helpers to allocate, map and unmap a Xen page-table */
+int create_xen_table(lpae_t *entry);
+lpae_t *xen_map_table(mfn_t mfn);
+void xen_unmap_table(const lpae_t *table);
+
#endif /* __ARCH_ARM_MM__ */
/*
* Local variables:
@@ -35,9 +35,13 @@
*
* 32G - 64G Frametable: 56 bytes per page for 2TB of RAM
*
- * 0x00000a8000000000 - 0x00007fffffffffff (512GB+117TB, L0 slots [21..255])
+ * 0x00000a8000000000 - 0x00007f7fffffffff (117TB, L0 slots [21..254])
* Unused
*
+ * 0x00007f8000000000 - 0x00007fffffffffff (512GB, L0 slot [255])
+ * (Relative offsets)
+ * 0 - 2G Domheap: on-demand-mapped
+ *
* 0x0000800000000000 - 0x000084ffffffffff (5TB, L0 slots [256..265])
* 1:1 mapping of RAM
*
@@ -130,6 +134,13 @@
#define FRAMETABLE_SIZE GB(32)
#define FRAMETABLE_NR (FRAMETABLE_SIZE / sizeof(*frame_table))
+#define DOMHEAP_VIRT_START SLOT0(255)
+#define DOMHEAP_VIRT_SIZE GB(2)
+
+#define DOMHEAP_ENTRIES 1024 /* 1024 2MB mapping slots */
+/* Number of domheap pagetable pages required at the second level (2MB mappings) */
+#define DOMHEAP_SECOND_PAGES (DOMHEAP_VIRT_SIZE >> FIRST_SHIFT)
+
#define DIRECTMAP_VIRT_START SLOT0(256)
#define DIRECTMAP_SIZE (SLOT0_ENTRY_SIZE * (266 - 256))
#define DIRECTMAP_VIRT_END (DIRECTMAP_VIRT_START + DIRECTMAP_SIZE - 1)
@@ -32,7 +32,7 @@ mm_printk(const char *fmt, ...) {}
#define HYP_PT_ROOT_LEVEL 1
#endif
-static lpae_t *xen_map_table(mfn_t mfn)
+lpae_t *xen_map_table(mfn_t mfn)
{
/*
* During early boot, map_domain_page() may be unusable. Use the
@@ -44,7 +44,7 @@ static lpae_t *xen_map_table(mfn_t mfn)
return map_domain_page(mfn);
}
-static void xen_unmap_table(const lpae_t *table)
+void xen_unmap_table(const lpae_t *table)
{
/*
* During early boot, xen_map_table() will not use map_domain_page()
@@ -227,7 +227,7 @@ void *ioremap(paddr_t pa, size_t len)
return ioremap_attr(pa, len, PAGE_HYPERVISOR_NOCACHE);
}
-static int create_xen_table(lpae_t *entry)
+int create_xen_table(lpae_t *entry)
{
mfn_t mfn;
void *p;