@@ -832,20 +832,14 @@ static int setup_cpu_root_pgt(unsigned int cpu)
goto out;
}
- /*
- * Unfortunately, some code (especially in assembly) assumes the rpt is in
- * the DIRECTMAP region and is always mapped. Making all of them adapt to
- * the new page table APIs is non-trivial. For now, make it always mapped
- * on the xenheap.
- */
- rpt = alloc_xenheap_page();
- if ( !rpt )
+ rpt_mfn = alloc_xen_pagetable();
+ if ( mfn_eq(rpt_mfn, INVALID_MFN) )
{
rc = -ENOMEM;
goto out;
}
- rpt_mfn = _mfn(virt_to_mfn(rpt));
+ rpt = map_xen_pagetable(rpt_mfn);
clear_page(rpt);
per_cpu(root_pgt_mfn, cpu) = rpt_mfn;
@@ -890,6 +884,7 @@ static int setup_cpu_root_pgt(unsigned int cpu)
rc = clone_mapping((void *)per_cpu(stubs.addr, cpu), rpt);
out:
+ UNMAP_XEN_PAGETABLE(rpt);
return rc;
}
@@ -905,7 +900,7 @@ static void cleanup_cpu_root_pgt(unsigned int cpu)
per_cpu(root_pgt_mfn, cpu) = INVALID_MFN;
- rpt = mfn_to_virt(mfn_x(rpt_mfn));
+ rpt = map_xen_pagetable(rpt_mfn);
for ( r = root_table_offset(DIRECTMAP_VIRT_START);
r < root_table_offset(HYPERVISOR_VIRT_END); ++r )
@@ -950,8 +945,8 @@ static void cleanup_cpu_root_pgt(unsigned int cpu)
free_xen_pagetable(l3t_mfn);
}
- /* Unlike other levels, the root level is a xenheap page. */
- free_xenheap_page(rpt);
+ UNMAP_XEN_PAGETABLE(rpt);
+ free_xen_pagetable(rpt_mfn);
/* Also zap the stub mapping for this CPU. */
if ( stub_linear )