@@ -832,14 +832,20 @@ static int setup_cpu_root_pgt(unsigned int cpu)
goto out;
}
- rpt_mfn = alloc_xen_pagetable();
- if ( mfn_eq(rpt_mfn, INVALID_MFN) )
+ /*
+ * Unfortunately, some code (especially in assembly) assumes the rpt is in
+ * the DIRECTMAP region and is always mapped. Making all of them adapt to
+ * the new page table APIs is non-trivial. For now, make it always mapped
+ * on the xenheap.
+ */
+ rpt = alloc_xenheap_page();
+ if ( !rpt )
{
rc = -ENOMEM;
goto out;
}
- rpt = map_xen_pagetable(rpt_mfn);
+ rpt_mfn = _mfn(virt_to_mfn(rpt));
clear_page(rpt);
per_cpu(root_pgt_mfn, cpu) = rpt_mfn;
@@ -884,7 +890,6 @@ static int setup_cpu_root_pgt(unsigned int cpu)
rc = clone_mapping((void *)per_cpu(stubs.addr, cpu), rpt);
out:
- UNMAP_XEN_PAGETABLE(rpt);
return rc;
}
@@ -900,7 +905,7 @@ static void cleanup_cpu_root_pgt(unsigned int cpu)
per_cpu(root_pgt_mfn, cpu) = INVALID_MFN;
- rpt = map_xen_pagetable(rpt_mfn);
+ rpt = mfn_to_virt(mfn_x(rpt_mfn));
for ( r = root_table_offset(DIRECTMAP_VIRT_START);
r < root_table_offset(HYPERVISOR_VIRT_END); ++r )
@@ -945,8 +950,8 @@ static void cleanup_cpu_root_pgt(unsigned int cpu)
free_xen_pagetable(l3t_mfn);
}
- UNMAP_XEN_PAGETABLE(rpt);
- free_xen_pagetable(rpt_mfn);
+ /* Unlike other levels, the root level is a xenheap page. */
+ free_xenheap_page(rpt);
/* Also zap the stub mapping for this CPU. */
if ( stub_linear )