@@ -799,7 +799,7 @@ that enabling this option cannot guarantee anything beyond what underlying
hardware guarantees (with, where available and known to Xen, respective
tweaks applied).
-### directmap (x86)
+### directmap (arm64, x86)
> `= <boolean>`
> Default: `true`
@@ -7,6 +7,7 @@ config ARM_64
depends on !ARM_32
select 64BIT
select HAS_FAST_MULTIPLY
+ select HAS_SECRET_HIDING
config ARM
def_bool y
@@ -158,16 +158,27 @@ void __init switch_ttbr(uint64_t ttbr)
update_identity_mapping(false);
}
-/* Map the region in the directmap area. */
+/*
+ * This either populate a valid fdirect map, or allocates empty L1 tables
+ * and creates the L0 entries for the given region in the direct map
+ * depending on has_directmap().
+ *
+ * When directmap=no, we still need to populate empty L1 tables in the
+ * directmap region. The reason is that the root page-table (i.e. L0)
+ * is per-CPU and secondary CPUs will initialize their root page-table
+ * based on the pCPU0 one. So L0 entries will be shared if they are
+ * pre-populated. We also rely on the fact that L1 tables are never
+ * freed.
+ */
static void __init setup_directmap_mappings(unsigned long base_mfn,
unsigned long nr_mfns)
{
+ unsigned long mfn_gb = base_mfn & ~((FIRST_SIZE >> PAGE_SHIFT) - 1);
int rc;
/* First call sets the directmap physical and virtual offset. */
if ( mfn_eq(directmap_mfn_start, INVALID_MFN) )
{
- unsigned long mfn_gb = base_mfn & ~((FIRST_SIZE >> PAGE_SHIFT) - 1);
directmap_mfn_start = _mfn(base_mfn);
directmap_base_pdx = mfn_to_pdx(_mfn(base_mfn));
@@ -188,6 +199,24 @@ static void __init setup_directmap_mappings(unsigned long base_mfn,
panic("cannot add directmap mapping at %lx below heap start %lx\n",
base_mfn, mfn_x(directmap_mfn_start));
+ if ( !has_directmap() )
+ {
+ vaddr_t vaddr = (vaddr_t)__mfn_to_virt(base_mfn);
+ lpae_t *root = this_cpu(xen_pgtable);
+ unsigned int i, slot;
+
+ slot = first_table_offset(vaddr);
+ nr_mfns += base_mfn - mfn_gb;
+ for ( i = 0; i < nr_mfns; i += BIT(XEN_PT_LEVEL_ORDER(0), UL), slot++ )
+ {
+ lpae_t *entry = &root[slot];
+
+ if ( !lpae_is_valid(*entry) && !create_xen_table(entry) )
+ panic("Unable to populate zeroeth slot %u\n", slot);
+ }
+ return;
+ }
+
rc = map_pages_to_xen((vaddr_t)__mfn_to_virt(base_mfn),
_mfn(base_mfn), nr_mfns,
PAGE_HYPERVISOR_RW | _PAGE_BLOCK);
@@ -3,13 +3,10 @@
extern DEFINE_PAGE_TABLE(xen_pgtable);
-/*
- * On ARM64, all the RAM is currently direct mapped in Xen.
- * Hence return always true.
- */
+/* On Arm64, the user can chose whether all the RAM is directmap. */
static inline bool arch_mfns_in_directmap(unsigned long mfn, unsigned long nr)
{
- return true;
+ return has_directmap();
}
void arch_setup_page_tables(void);
@@ -12,6 +12,7 @@
#include <xen/grant_table.h>
#include <xen/guest_access.h>
#include <xen/mm.h>
+#include <xen/param.h>
#include <xsm/xsm.h>
@@ -778,6 +778,7 @@ void asmlinkage __init start_xen(unsigned long boot_phys_offset,
cmdline_parse(cmdline);
setup_mm();
+ printk("Booting with directmap %s\n", has_directmap() ? "on" : "off");
vm_init();