@@ -240,7 +240,9 @@ void __init discard_initial_images(void)
uint64_t start = (uint64_t)initial_images[i].mod_start << PAGE_SHIFT;
init_domheap_pages(start,
- start + PAGE_ALIGN(initial_images[i].mod_end));
+ start + PAGE_ALIGN(initial_images[i].mod_end));
+ map_pages_to_xen((unsigned long)__va(start), INVALID_MFN,
+ PFN_DOWN(PAGE_ALIGN(initial_images[i].mod_end)), _PAGE_NONE);
}
nr_initial_images = 0;
@@ -1346,6 +1348,8 @@ void __init noreturn __start_xen(unsigned long mbi_p)
/* Pass mapped memory to allocator /before/ creating new mappings. */
init_boot_pages(s, min(map_s, e));
+ map_pages_to_xen((unsigned long)__va(s), INVALID_MFN,
+ PFN_DOWN(min(map_s, e) - s), _PAGE_NONE);
s = map_s;
if ( s < map_e )
{
@@ -1354,6 +1358,8 @@ void __init noreturn __start_xen(unsigned long mbi_p)
map_s = (s + mask) & ~mask;
map_e &= ~mask;
init_boot_pages(map_s, map_e);
+ map_pages_to_xen((unsigned long)__va(map_s), INVALID_MFN,
+ PFN_DOWN(map_e - map_s), _PAGE_NONE);
}
if ( map_s > map_e )
@@ -1367,9 +1373,9 @@ void __init noreturn __start_xen(unsigned long mbi_p)
if ( map_e < end )
{
+ init_boot_pages(map_e, end);
map_pages_to_xen((unsigned long)__va(map_e), INVALID_MFN,
PFN_DOWN(end - map_e), _PAGE_NONE);
- init_boot_pages(map_e, end);
map_e = end;
}
}
@@ -1382,9 +1388,9 @@ void __init noreturn __start_xen(unsigned long mbi_p)
}
if ( s < map_s )
{
+ init_boot_pages(s, map_s);
map_pages_to_xen((unsigned long)__va(s), INVALID_MFN,
PFN_DOWN(map_s - s), _PAGE_NONE);
- init_boot_pages(s, map_s);
}
}
@@ -1506,6 +1512,8 @@ void __init noreturn __start_xen(unsigned long mbi_p)
if ( PFN_DOWN(s) <= limit )
s = pfn_to_paddr(limit + 1);
init_domheap_pages(s, e);
+ map_pages_to_xen((unsigned long)__va(s), INVALID_MFN,
+ PFN_DOWN(e - s), _PAGE_NONE);
}
}
else