@@ -52,6 +52,12 @@ bool efi_enabled(unsigned int feature)
void __init efi_init_memory(void) { }
+bool efi_boot_mem_unused(unsigned long *start, unsigned long *end)
+{
+ *start = *end = (unsigned long)_end;
+ return false;
+}
+
void efi_update_l4_pgtable(unsigned int l4idx, l4_pgentry_t l4e) { }
bool efi_rs_using_pgtables(void)
@@ -830,6 +830,7 @@ void __init noreturn __start_xen(unsigne
module_t *mod;
unsigned long nr_pages, raw_max_page, modules_headroom, module_map[1];
int i, j, e820_warn = 0, bytes = 0;
+ unsigned long eb_start, eb_end;
bool acpi_boot_table_init_done = false, relocated = false;
int ret;
struct ns16550_defaults ns16550 = {
@@ -1145,7 +1146,8 @@ void __init noreturn __start_xen(unsigne
/*
* This needs to remain in sync with xen_in_range() and the
- * respective reserve_e820_ram() invocation below.
+ * respective reserve_e820_ram() invocation below. No need to
+ * query efi_boot_mem_unused() here, though.
*/
mod[mbi->mods_count].mod_start = virt_to_mfn(_stext);
mod[mbi->mods_count].mod_end = __2M_rwdata_end - _stext;
@@ -1417,8 +1419,18 @@ void __init noreturn __start_xen(unsigne
if ( !xen_phys_start )
panic("Not enough memory to relocate Xen\n");
+ /* FIXME: Putting a hole in .bss would shatter the large page mapping. */
+ if ( using_2M_mapping() )
+ efi_boot_mem_unused(NULL, NULL);
+
/* This needs to remain in sync with xen_in_range(). */
- reserve_e820_ram(&boot_e820, __pa(_stext), __pa(__2M_rwdata_end));
+ if ( efi_boot_mem_unused(&eb_start, &eb_end) )
+ {
+ reserve_e820_ram(&boot_e820, __pa(_stext), __pa(eb_start));
+ reserve_e820_ram(&boot_e820, __pa(eb_end), __pa(__2M_rwdata_end));
+ }
+ else
+ reserve_e820_ram(&boot_e820, __pa(_stext), __pa(__2M_rwdata_end));
/* Late kexec reservation (dynamic start address). */
kexec_reserve_area(&boot_e820);
@@ -1979,7 +1991,7 @@ int __hwdom_init xen_in_range(unsigned l
paddr_t start, end;
int i;
- enum { region_s3, region_ro, region_rw, nr_regions };
+ enum { region_s3, region_ro, region_rw, region_bss, nr_regions };
static struct {
paddr_t s, e;
} xen_regions[nr_regions] __hwdom_initdata;
@@ -2004,6 +2016,14 @@ int __hwdom_init xen_in_range(unsigned l
/* hypervisor .data + .bss */
xen_regions[region_rw].s = __pa(&__2M_rwdata_start);
xen_regions[region_rw].e = __pa(&__2M_rwdata_end);
+ if ( efi_boot_mem_unused(&start, &end) )
+ {
+ ASSERT(__pa(start) >= xen_regions[region_rw].s);
+ ASSERT(__pa(end) <= xen_regions[region_rw].e);
+ xen_regions[region_rw].e = __pa(start);
+ xen_regions[region_bss].s = __pa(end);
+ xen_regions[region_bss].e = __pa(&__2M_rwdata_end);
+ }
}
start = (paddr_t)mfn << PAGE_SHIFT;
@@ -1,3 +1,4 @@
+#include <xen/efi.h>
#include <xen/init.h>
#include <xen/types.h>
#include <xen/lib.h>
@@ -364,6 +365,8 @@ void tboot_shutdown(uint32_t shutdown_ty
/* if this is S3 then set regions to MAC */
if ( shutdown_type == TB_SHUTDOWN_S3 )
{
+ unsigned long s, e;
+
/*
* Xen regions for tboot to MAC. This needs to remain in sync with
* xen_in_range().
@@ -378,6 +381,15 @@ void tboot_shutdown(uint32_t shutdown_ty
/* hypervisor .data + .bss */
g_tboot_shared->mac_regions[2].start = (uint64_t)__pa(&__2M_rwdata_start);
g_tboot_shared->mac_regions[2].size = __2M_rwdata_end - __2M_rwdata_start;
+ if ( efi_boot_mem_unused(&s, &e) )
+ {
+ g_tboot_shared->mac_regions[2].size =
+ s - (unsigned long)__2M_rwdata_start;
+ g_tboot_shared->mac_regions[3].start = __pa(e);
+ g_tboot_shared->mac_regions[3].size =
+ (unsigned long)__2M_rwdata_end - e;
+ g_tboot_shared->num_mac_regions = 4;
+ }
/*
* MAC domains and other Xen memory
@@ -1,5 +1,6 @@
#include "efi.h"
#include <xen/init.h>
+#include <xen/mm.h>
#ifdef CONFIG_ARM
/*
@@ -21,7 +22,7 @@
static char __section(".bss.page_aligned") __aligned(PAGE_SIZE)
ebmalloc_mem[EBMALLOC_SIZE];
-static unsigned long __initdata ebmalloc_allocated;
+static unsigned long __read_mostly ebmalloc_allocated;
/* EFI boot allocator. */
void __init *ebmalloc(size_t size)
@@ -36,17 +37,37 @@ void __init *ebmalloc(size_t size)
return ptr;
}
+bool efi_boot_mem_unused(unsigned long *start, unsigned long *end)
+{
+ if ( !start && !end )
+ {
+ ebmalloc_allocated = sizeof(ebmalloc_mem);
+ return false;
+ }
+
+ *start = (unsigned long)ebmalloc_mem + PAGE_ALIGN(ebmalloc_allocated);
+ *end = (unsigned long)ebmalloc_mem + sizeof(ebmalloc_mem);
+
+ return *start < *end;
+}
+
void __init free_ebmalloc_unused_mem(void)
{
-#if 0 /* FIXME: Putting a hole in the BSS breaks the IOMMU mappings for dom0. */
unsigned long start, end;
- start = (unsigned long)ebmalloc_mem + PAGE_ALIGN(ebmalloc_allocated);
- end = (unsigned long)ebmalloc_mem + sizeof(ebmalloc_mem);
+ if ( !efi_boot_mem_unused(&start, &end) )
+ return;
destroy_xen_mappings(start, end);
+
+#ifdef CONFIG_X86
+ /*
+ * By reserving the space early in the E820 map, it gets freed way before
+ * we make it here. Don't free the range a 2nd time.
+ */
+#else
init_xenheap_pages(__pa(start), __pa(end));
+#endif
printk(XENLOG_INFO "Freed %lukB unused BSS memory\n", (end - start) >> 10);
-#endif
}
@@ -33,6 +33,7 @@ struct compat_pf_efi_runtime_call;
bool efi_enabled(unsigned int feature);
void efi_init_memory(void);
+bool efi_boot_mem_unused(unsigned long *start, unsigned long *end);
bool efi_rs_using_pgtables(void);
unsigned long efi_get_time(void);
void efi_halt_system(void);