@@ -2212,6 +2212,10 @@ void *alloc_xenheap_pages(unsigned int order, unsigned int memflags)
pg[i].count_info |= PGC_xen_heap;
ret = page_to_virt(pg);
+ /*
+ * The direct map is not always mapped now. We need to populate the direct
+ * map region on demand for security.
+ */
map_pages_to_xen((unsigned long)ret, page_to_mfn(pg),
1UL << order, PAGE_HYPERVISOR);
@@ -2234,6 +2238,7 @@ void free_xenheap_pages(void *v, unsigned int order)
pg[i].count_info &= ~PGC_xen_heap;
ASSERT((unsigned long)v >= DIRECTMAP_VIRT_START);
+ /* Tear down the 1:1 mapping in this region for memory safety. */
map_pages_to_xen((unsigned long)v, INVALID_MFN, 1UL << order, _PAGE_NONE);
free_heap_pages(pg, order, true);