@@ -856,6 +856,57 @@ static struct domain *__init create_dom0(const module_t *image,
/* How much of the directmap is prebuilt at compile time. */
#define PREBUILT_MAP_LIMIT (1 << L2_PAGETABLE_SHIFT)
+/*
+ * This either populates a valid direct map, or allocates empty L3 tables and
+ * creates the L4 entries for virtual address between [start, end) in the
+ * direct map depending on arch_has_directmap();
+ *
+ * When directmap=no, we still need to populate empty L3 tables in the
+ * direct map region. The reason is that on-demand xenheap mappings are
+ * created in the idle domain's page table but must be seen by
+ * everyone. Since all domains share the direct map L4 entries, they
+ * will share xenheap mappings if we pre-populate the L4 entries and L3
+ * tables in the direct map region for all RAM. We also rely on the fact
+ * that L3 tables are never freed.
+ */
+static void __init populate_directmap(uint64_t pstart, uint64_t pend,
+ unsigned int flags)
+{
+ unsigned long vstart = (unsigned long)__va(pstart);
+ unsigned long vend = (unsigned long)__va(pend);
+
+ if ( pstart >= pend )
+ return;
+
+ BUG_ON(vstart < DIRECTMAP_VIRT_START);
+ BUG_ON(vend > DIRECTMAP_VIRT_END);
+
+ if ( arch_has_directmap() )
+ /* Populate valid direct map. */
+ BUG_ON(map_pages_to_xen(vstart, maddr_to_mfn(pstart),
+ PFN_DOWN(pend - pstart), flags));
+ else
+ {
+ /* Create empty L3 tables. */
+ unsigned long vaddr = vstart & ~((1UL << L4_PAGETABLE_SHIFT) - 1);
+
+ for ( ; vaddr < vend; vaddr += (1UL << L4_PAGETABLE_SHIFT) )
+ {
+ l4_pgentry_t *pl4e = &idle_pg_table[l4_table_offset(vaddr)];
+
+ if ( !(l4e_get_flags(*pl4e) & _PAGE_PRESENT) )
+ {
+ mfn_t mfn = alloc_boot_pages(1, 1);
+ void *v = map_domain_page(mfn);
+
+ clear_page(v);
+ UNMAP_DOMAIN_PAGE(v);
+ l4e_write(pl4e, l4e_from_mfn(mfn, __PAGE_HYPERVISOR));
+ }
+ }
+ }
+}
+
void __init noreturn __start_xen(unsigned long mbi_p)
{
char *memmap_type = NULL;
@@ -1507,8 +1558,17 @@ void __init noreturn __start_xen(unsigned long mbi_p)
map_e = min_t(uint64_t, e,
ARRAY_SIZE(l2_directmap) << L2_PAGETABLE_SHIFT);
- /* Pass mapped memory to allocator /before/ creating new mappings. */
+ /*
+ * Pass mapped memory to allocator /before/ creating new mappings.
+ * The direct map for the bottom 4GiB has been populated in the first
+ * e820 pass. In the second pass, we make sure those existing mappings
+ * are destroyed when directmap=no.
+ */
init_boot_pages(s, min(map_s, e));
+ if ( !arch_has_directmap() )
+ destroy_xen_mappings((unsigned long)__va(s),
+ (unsigned long)__va(min(map_s, e)));
+
s = map_s;
if ( s < map_e )
{
@@ -1517,6 +1577,9 @@ void __init noreturn __start_xen(unsigned long mbi_p)
map_s = (s + mask) & ~mask;
map_e &= ~mask;
init_boot_pages(map_s, map_e);
+ if ( !arch_has_directmap() )
+ destroy_xen_mappings((unsigned long)__va(map_s),
+ (unsigned long)__va(map_e));
}
if ( map_s > map_e )
@@ -1530,8 +1593,7 @@ void __init noreturn __start_xen(unsigned long mbi_p)
if ( map_e < end )
{
- map_pages_to_xen((unsigned long)__va(map_e), maddr_to_mfn(map_e),
- PFN_DOWN(end - map_e), PAGE_HYPERVISOR);
+ populate_directmap(map_e, end, PAGE_HYPERVISOR);
init_boot_pages(map_e, end);
map_e = end;
}
@@ -1540,13 +1602,11 @@ void __init noreturn __start_xen(unsigned long mbi_p)
{
/* This range must not be passed to the boot allocator and
* must also not be mapped with _PAGE_GLOBAL. */
- map_pages_to_xen((unsigned long)__va(map_e), maddr_to_mfn(map_e),
- PFN_DOWN(e - map_e), __PAGE_HYPERVISOR_RW);
+ populate_directmap(map_e, e, __PAGE_HYPERVISOR_RW);
}
if ( s < map_s )
{
- map_pages_to_xen((unsigned long)__va(s), maddr_to_mfn(s),
- PFN_DOWN(map_s - s), PAGE_HYPERVISOR);
+ populate_directmap(s, map_s, PAGE_HYPERVISOR);
init_boot_pages(s, map_s);
}
}