@@ -1,6 +1,7 @@
config ARM_32
def_bool y
depends on "$(ARCH)" = "arm32"
+ select DOMAIN_PAGE
config ARM_64
def_bool y
@@ -17,6 +17,7 @@ obj-y += device.o
obj-$(CONFIG_IOREQ_SERVER) += dm.o
obj-y += domain.o
obj-y += domain_build.init.o
+obj-$(CONFIG_DOMAIN_PAGE) += domain_page.o
obj-y += domctl.o
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-y += efi/
new file mode 100644
@@ -0,0 +1,193 @@
+#include <xen/mm.h>
+#include <xen/pmap.h>
+#include <xen/vmap.h>
+
+/* Override macros from asm/page.h to make them work with mfn_t */
+#undef virt_to_mfn
+#define virt_to_mfn(va) _mfn(__virt_to_mfn(va))
+
+/* cpu0's domheap page tables */
+static DEFINE_PAGE_TABLES(cpu0_dommap, DOMHEAP_SECOND_PAGES);
+
+/*
+ * xen_dommap == pages used by map_domain_page, these pages contain
+ * the second level pagetables which map the domheap region
+ * starting at DOMHEAP_VIRT_START in 2MB chunks.
+ */
+static DEFINE_PER_CPU(lpae_t *, xen_dommap);
+
+/*
+ * Prepare the area that will be used to map domheap pages. They are
+ * mapped in 2MB chunks, so we need to allocate the page-tables up to
+ * the 2nd level.
+ *
+ * The caller should make sure the root page-table for @cpu has been
+ * been allocated.
+ */
+bool init_domheap_mappings(unsigned int cpu)
+{
+ unsigned int order = get_order_from_pages(DOMHEAP_SECOND_PAGES);
+ lpae_t *root = per_cpu(xen_pgtable, cpu);
+ unsigned int i, first_idx;
+ lpae_t *domheap;
+ mfn_t mfn;
+
+ ASSERT(root);
+ ASSERT(!per_cpu(xen_dommap, cpu));
+
+ /*
+ * The domheap for cpu0 is before the heap is initialized. So we
+ * need to use pre-allocated pages.
+ */
+ if ( !cpu )
+ domheap = cpu0_dommap;
+ else
+ domheap = alloc_xenheap_pages(order, 0);
+
+ if ( !domheap )
+ return false;
+
+ /* Ensure the domheap has no stray mappings */
+ memset(domheap, 0, DOMHEAP_SECOND_PAGES * PAGE_SIZE);
+
+ /*
+ * Update the first level mapping to reference the local CPUs
+ * domheap mapping pages.
+ */
+ mfn = virt_to_mfn(domheap);
+ first_idx = first_table_offset(DOMHEAP_VIRT_START);
+ for ( i = 0; i < DOMHEAP_SECOND_PAGES; i++ )
+ {
+ lpae_t pte = mfn_to_xen_entry(mfn_add(mfn, i), MT_NORMAL);
+ pte.pt.table = 1;
+ write_pte(&root[first_idx + i], pte);
+ }
+
+ per_cpu(xen_dommap, cpu) = domheap;
+
+ return true;
+}
+
+void *map_domain_page_global(mfn_t mfn)
+{
+ return vmap(&mfn, 1);
+}
+
+void unmap_domain_page_global(const void *va)
+{
+ vunmap(va);
+}
+
+/* Map a page of domheap memory */
+void *map_domain_page(mfn_t mfn)
+{
+ unsigned long flags;
+ lpae_t *map = this_cpu(xen_dommap);
+ unsigned long slot_mfn = mfn_x(mfn) & ~XEN_PT_LPAE_ENTRY_MASK;
+ vaddr_t va;
+ lpae_t pte;
+ int i, slot;
+
+ local_irq_save(flags);
+
+ /* The map is laid out as an open-addressed hash table where each
+ * entry is a 2MB superpage pte. We use the available bits of each
+ * PTE as a reference count; when the refcount is zero the slot can
+ * be reused. */
+ for ( slot = (slot_mfn >> XEN_PT_LPAE_SHIFT) % DOMHEAP_ENTRIES, i = 0;
+ i < DOMHEAP_ENTRIES;
+ slot = (slot + 1) % DOMHEAP_ENTRIES, i++ )
+ {
+ if ( map[slot].pt.avail < 0xf &&
+ map[slot].pt.base == slot_mfn &&
+ map[slot].pt.valid )
+ {
+ /* This slot already points to the right place; reuse it */
+ map[slot].pt.avail++;
+ break;
+ }
+ else if ( map[slot].pt.avail == 0 )
+ {
+ /* Commandeer this 2MB slot */
+ pte = mfn_to_xen_entry(_mfn(slot_mfn), MT_NORMAL);
+ pte.pt.avail = 1;
+ write_pte(map + slot, pte);
+ break;
+ }
+
+ }
+ /* If the map fills up, the callers have misbehaved. */
+ BUG_ON(i == DOMHEAP_ENTRIES);
+
+#ifndef NDEBUG
+ /* Searching the hash could get slow if the map starts filling up.
+ * Cross that bridge when we come to it */
+ {
+ static int max_tries = 32;
+ if ( i >= max_tries )
+ {
+ dprintk(XENLOG_WARNING, "Domheap map is filling: %i tries\n", i);
+ max_tries *= 2;
+ }
+ }
+#endif
+
+ local_irq_restore(flags);
+
+ va = (DOMHEAP_VIRT_START
+ + (slot << SECOND_SHIFT)
+ + ((mfn_x(mfn) & XEN_PT_LPAE_ENTRY_MASK) << THIRD_SHIFT));
+
+ /*
+ * We may not have flushed this specific subpage at map time,
+ * since we only flush the 4k page not the superpage
+ */
+ flush_xen_tlb_range_va_local(va, PAGE_SIZE);
+
+ return (void *)va;
+}
+
+/* Release a mapping taken with map_domain_page() */
+void unmap_domain_page(const void *va)
+{
+ unsigned long flags;
+ lpae_t *map = this_cpu(xen_dommap);
+ int slot = ((unsigned long) va - DOMHEAP_VIRT_START) >> SECOND_SHIFT;
+
+ if ( !va )
+ return;
+
+ local_irq_save(flags);
+
+ ASSERT(slot >= 0 && slot < DOMHEAP_ENTRIES);
+ ASSERT(map[slot].pt.avail != 0);
+
+ map[slot].pt.avail--;
+
+ local_irq_restore(flags);
+}
+
+mfn_t domain_page_map_to_mfn(const void *ptr)
+{
+ unsigned long va = (unsigned long)ptr;
+ lpae_t *map = this_cpu(xen_dommap);
+ int slot = (va - DOMHEAP_VIRT_START) >> SECOND_SHIFT;
+ unsigned long offset = (va>>THIRD_SHIFT) & XEN_PT_LPAE_ENTRY_MASK;
+
+ if ( (va >= VMAP_VIRT_START) && ((VMAP_VIRT_START - va) < VMAP_VIRT_SIZE) )
+ return virt_to_mfn(va);
+
+ ASSERT(slot >= 0 && slot < DOMHEAP_ENTRIES);
+ ASSERT(map[slot].pt.avail != 0);
+
+ return mfn_add(lpae_get_mfn(map[slot]), offset);
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
@@ -1,6 +1,12 @@
#ifndef __ARM_ARM32_MM_H__
#define __ARM_ARM32_MM_H__
+#include <xen/percpu.h>
+
+#include <asm/lpae.h>
+
+DECLARE_PER_CPU(lpae_t *, xen_pgtable);
+
/*
* Only a limited amount of RAM, called xenheap, is always mapped on ARM32.
* For convenience always return false.
@@ -122,7 +122,6 @@
#ifdef CONFIG_ARM_32
-#define CONFIG_DOMAIN_PAGE 1
#define CONFIG_SEPARATE_XENHEAP 1
#define FRAMETABLE_VIRT_START _AT(vaddr_t,0x02000000)
@@ -261,6 +261,23 @@ lpae_t mfn_to_xen_entry(mfn_t mfn, unsigned int attr);
#define third_table_offset(va) TABLE_OFFSET(third_linear_offset(va))
#define zeroeth_table_offset(va) TABLE_OFFSET(zeroeth_linear_offset(va))
+/*
+ * Macros to define page-tables:
+ * - DEFINE_BOOT_PAGE_TABLE is used to define page-table that are used
+ * in assembly code before BSS is zeroed.
+ * - DEFINE_PAGE_TABLE{,S} are used to define one or multiple
+ * page-tables to be used after BSS is zeroed (typically they are only used
+ * in C).
+ */
+#define DEFINE_BOOT_PAGE_TABLE(name) \
+lpae_t __aligned(PAGE_SIZE) __section(".data.page_aligned") \
+ name[XEN_PT_LPAE_ENTRIES]
+
+#define DEFINE_PAGE_TABLES(name, nr) \
+lpae_t __aligned(PAGE_SIZE) name[XEN_PT_LPAE_ENTRIES * (nr)]
+
+#define DEFINE_PAGE_TABLE(name) DEFINE_PAGE_TABLES(name, 1)
+
#endif /* __ARM_LPAE_H__ */
/*
@@ -57,23 +57,6 @@ mm_printk(const char *fmt, ...) {}
} while (0)
#endif
-/*
- * Macros to define page-tables:
- * - DEFINE_BOOT_PAGE_TABLE is used to define page-table that are used
- * in assembly code before BSS is zeroed.
- * - DEFINE_PAGE_TABLE{,S} are used to define one or multiple
- * page-tables to be used after BSS is zeroed (typically they are only used
- * in C).
- */
-#define DEFINE_BOOT_PAGE_TABLE(name) \
-lpae_t __aligned(PAGE_SIZE) __section(".data.page_aligned") \
- name[XEN_PT_LPAE_ENTRIES]
-
-#define DEFINE_PAGE_TABLES(name, nr) \
-lpae_t __aligned(PAGE_SIZE) name[XEN_PT_LPAE_ENTRIES * (nr)]
-
-#define DEFINE_PAGE_TABLE(name) DEFINE_PAGE_TABLES(name, 1)
-
/* Static start-of-day pagetables that we use before the allocators
* are up. These are used by all CPUs during bringup before switching
* to the CPUs own pagetables.
@@ -110,7 +93,7 @@ DEFINE_BOOT_PAGE_TABLE(boot_third);
/* Main runtime page tables */
/*
- * For arm32 xen_pgtable and xen_dommap are per-PCPU and are allocated before
+ * For arm32 xen_pgtable are per-PCPU and are allocated before
* bringing up each CPU. For arm64 xen_pgtable is common to all PCPUs.
*
* xen_second, xen_fixmap and xen_xenmap are always shared between all
@@ -126,18 +109,10 @@ static DEFINE_PAGE_TABLE(xen_first);
#define HYP_PT_ROOT_LEVEL 1
/* Per-CPU pagetable pages */
/* xen_pgtable == root of the trie (zeroeth level on 64-bit, first on 32-bit) */
-static DEFINE_PER_CPU(lpae_t *, xen_pgtable);
+DEFINE_PER_CPU(lpae_t *, xen_pgtable);
#define THIS_CPU_PGTABLE this_cpu(xen_pgtable)
-/*
- * xen_dommap == pages used by map_domain_page, these pages contain
- * the second level pagetables which map the domheap region
- * starting at DOMHEAP_VIRT_START in 2MB chunks.
- */
-static DEFINE_PER_CPU(lpae_t *, xen_dommap);
/* Root of the trie for cpu0, other CPU's PTs are dynamically allocated */
static DEFINE_PAGE_TABLE(cpu0_pgtable);
-/* cpu0's domheap page tables */
-static DEFINE_PAGE_TABLES(cpu0_dommap, DOMHEAP_SECOND_PAGES);
#endif
/* Common pagetable leaves */
@@ -371,175 +346,6 @@ void clear_fixmap(unsigned map)
BUG_ON(res != 0);
}
-#ifdef CONFIG_DOMAIN_PAGE
-/*
- * Prepare the area that will be used to map domheap pages. They are
- * mapped in 2MB chunks, so we need to allocate the page-tables up to
- * the 2nd level.
- *
- * The caller should make sure the root page-table for @cpu has been
- * been allocated.
- */
-bool init_domheap_mappings(unsigned int cpu)
-{
- unsigned int order = get_order_from_pages(DOMHEAP_SECOND_PAGES);
- lpae_t *root = per_cpu(xen_pgtable, cpu);
- unsigned int i, first_idx;
- lpae_t *domheap;
- mfn_t mfn;
-
- ASSERT(root);
- ASSERT(!per_cpu(xen_dommap, cpu));
-
- /*
- * The domheap for cpu0 is before the heap is initialized. So we
- * need to use pre-allocated pages.
- */
- if ( !cpu )
- domheap = cpu0_dommap;
- else
- domheap = alloc_xenheap_pages(order, 0);
-
- if ( !domheap )
- return false;
-
- /* Ensure the domheap has no stray mappings */
- memset(domheap, 0, DOMHEAP_SECOND_PAGES * PAGE_SIZE);
-
- /*
- * Update the first level mapping to reference the local CPUs
- * domheap mapping pages.
- */
- mfn = virt_to_mfn(domheap);
- first_idx = first_table_offset(DOMHEAP_VIRT_START);
- for ( i = 0; i < DOMHEAP_SECOND_PAGES; i++ )
- {
- lpae_t pte = mfn_to_xen_entry(mfn_add(mfn, i), MT_NORMAL);
- pte.pt.table = 1;
- write_pte(&root[first_idx + i], pte);
- }
-
- per_cpu(xen_dommap, cpu) = domheap;
-
- return true;
-}
-
-void *map_domain_page_global(mfn_t mfn)
-{
- return vmap(&mfn, 1);
-}
-
-void unmap_domain_page_global(const void *va)
-{
- vunmap(va);
-}
-
-/* Map a page of domheap memory */
-void *map_domain_page(mfn_t mfn)
-{
- unsigned long flags;
- lpae_t *map = this_cpu(xen_dommap);
- unsigned long slot_mfn = mfn_x(mfn) & ~XEN_PT_LPAE_ENTRY_MASK;
- vaddr_t va;
- lpae_t pte;
- int i, slot;
-
- local_irq_save(flags);
-
- /* The map is laid out as an open-addressed hash table where each
- * entry is a 2MB superpage pte. We use the available bits of each
- * PTE as a reference count; when the refcount is zero the slot can
- * be reused. */
- for ( slot = (slot_mfn >> XEN_PT_LPAE_SHIFT) % DOMHEAP_ENTRIES, i = 0;
- i < DOMHEAP_ENTRIES;
- slot = (slot + 1) % DOMHEAP_ENTRIES, i++ )
- {
- if ( map[slot].pt.avail < 0xf &&
- map[slot].pt.base == slot_mfn &&
- map[slot].pt.valid )
- {
- /* This slot already points to the right place; reuse it */
- map[slot].pt.avail++;
- break;
- }
- else if ( map[slot].pt.avail == 0 )
- {
- /* Commandeer this 2MB slot */
- pte = mfn_to_xen_entry(_mfn(slot_mfn), MT_NORMAL);
- pte.pt.avail = 1;
- write_pte(map + slot, pte);
- break;
- }
-
- }
- /* If the map fills up, the callers have misbehaved. */
- BUG_ON(i == DOMHEAP_ENTRIES);
-
-#ifndef NDEBUG
- /* Searching the hash could get slow if the map starts filling up.
- * Cross that bridge when we come to it */
- {
- static int max_tries = 32;
- if ( i >= max_tries )
- {
- dprintk(XENLOG_WARNING, "Domheap map is filling: %i tries\n", i);
- max_tries *= 2;
- }
- }
-#endif
-
- local_irq_restore(flags);
-
- va = (DOMHEAP_VIRT_START
- + (slot << SECOND_SHIFT)
- + ((mfn_x(mfn) & XEN_PT_LPAE_ENTRY_MASK) << THIRD_SHIFT));
-
- /*
- * We may not have flushed this specific subpage at map time,
- * since we only flush the 4k page not the superpage
- */
- flush_xen_tlb_range_va_local(va, PAGE_SIZE);
-
- return (void *)va;
-}
-
-/* Release a mapping taken with map_domain_page() */
-void unmap_domain_page(const void *va)
-{
- unsigned long flags;
- lpae_t *map = this_cpu(xen_dommap);
- int slot = ((unsigned long) va - DOMHEAP_VIRT_START) >> SECOND_SHIFT;
-
- if ( !va )
- return;
-
- local_irq_save(flags);
-
- ASSERT(slot >= 0 && slot < DOMHEAP_ENTRIES);
- ASSERT(map[slot].pt.avail != 0);
-
- map[slot].pt.avail--;
-
- local_irq_restore(flags);
-}
-
-mfn_t domain_page_map_to_mfn(const void *ptr)
-{
- unsigned long va = (unsigned long)ptr;
- lpae_t *map = this_cpu(xen_dommap);
- int slot = (va - DOMHEAP_VIRT_START) >> SECOND_SHIFT;
- unsigned long offset = (va>>THIRD_SHIFT) & XEN_PT_LPAE_ENTRY_MASK;
-
- if ( (va >= VMAP_VIRT_START) && ((VMAP_VIRT_START - va) < VMAP_VIRT_SIZE) )
- return virt_to_mfn(va);
-
- ASSERT(slot >= 0 && slot < DOMHEAP_ENTRIES);
- ASSERT(map[slot].pt.avail != 0);
-
- return mfn_add(lpae_get_mfn(map[slot]), offset);
-}
-#endif
-
void flush_page_to_ram(unsigned long mfn, bool sync_icache)
{
void *v = map_domain_page(_mfn(mfn));
@@ -10,6 +10,7 @@ config X86
select ALTERNATIVE_CALL
select ARCH_SUPPORTS_INT128
select CORE_PARKING
+ select DOMAIN_PAGE
select HAS_ALTERNATIVE
select HAS_COMPAT
select HAS_CPUFREQ
@@ -22,7 +22,6 @@
#define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
#define CONFIG_DISCONTIGMEM 1
#define CONFIG_NUMA_EMU 1
-#define CONFIG_DOMAIN_PAGE 1
#define CONFIG_PAGEALLOC_MAX_ORDER (2 * PAGETABLE_ORDER)
#define CONFIG_DOMU_MAX_ORDER PAGETABLE_ORDER
@@ -11,6 +11,9 @@ config COMPAT
config CORE_PARKING
bool
+config DOMAIN_PAGE
+ bool
+
config GRANT_TABLE
bool "Grant table support" if EXPERT
default y