@@ -228,53 +228,6 @@ void __init init_frametable(void)
memset(end_pg, -1, (unsigned long)top_pg - (unsigned long)end_pg);
}
-#ifndef NDEBUG
-static unsigned int __read_mostly root_pgt_pv_xen_slots
- = ROOT_PAGETABLE_PV_XEN_SLOTS;
-static l4_pgentry_t __read_mostly split_l4e;
-#else
-#define root_pgt_pv_xen_slots ROOT_PAGETABLE_PV_XEN_SLOTS
-#endif
-
-static void pv_arch_init_memory(void)
-{
-#ifndef NDEBUG
- unsigned int i;
-
- if ( highmem_start )
- {
- unsigned long split_va = (unsigned long)__va(highmem_start);
-
- if ( split_va < HYPERVISOR_VIRT_END &&
- split_va - 1 == (unsigned long)__va(highmem_start - 1) )
- {
- root_pgt_pv_xen_slots = l4_table_offset(split_va) -
- ROOT_PAGETABLE_FIRST_XEN_SLOT;
- ASSERT(root_pgt_pv_xen_slots < ROOT_PAGETABLE_PV_XEN_SLOTS);
- if ( l4_table_offset(split_va) == l4_table_offset(split_va - 1) )
- {
- l3_pgentry_t *l3tab = alloc_xen_pagetable();
-
- if ( l3tab )
- {
- const l3_pgentry_t *l3idle =
- l4e_to_l3e(idle_pg_table[l4_table_offset(split_va)]);
-
- for ( i = 0; i < l3_table_offset(split_va); ++i )
- l3tab[i] = l3idle[i];
- for ( ; i < L3_PAGETABLE_ENTRIES; ++i )
- l3tab[i] = l3e_empty();
- split_l4e = l4e_from_pfn(virt_to_mfn(l3tab),
- __PAGE_HYPERVISOR_RW);
- }
- else
- ++root_pgt_pv_xen_slots;
- }
- }
- }
-#endif
-}
-
void __init arch_init_memory(void)
{
unsigned long i, pfn, rstart_pfn, rend_pfn, iostart_pfn, ioend_pfn;
@@ -1433,26 +1386,6 @@ static int alloc_l3_table(struct page_info *page)
return rc > 0 ? 0 : rc;
}
-void init_guest_l4_table(l4_pgentry_t l4tab[], const struct domain *d,
- bool zap_ro_mpt)
-{
- /* Xen private mappings. */
- memcpy(&l4tab[ROOT_PAGETABLE_FIRST_XEN_SLOT],
- &idle_pg_table[ROOT_PAGETABLE_FIRST_XEN_SLOT],
- root_pgt_pv_xen_slots * sizeof(l4_pgentry_t));
-#ifndef NDEBUG
- if ( l4e_get_intpte(split_l4e) )
- l4tab[ROOT_PAGETABLE_FIRST_XEN_SLOT + root_pgt_pv_xen_slots] =
- split_l4e;
-#endif
- l4tab[l4_table_offset(LINEAR_PT_VIRT_START)] =
- l4e_from_pfn(domain_page_map_to_mfn(l4tab), __PAGE_HYPERVISOR_RW);
- l4tab[l4_table_offset(PERDOMAIN_VIRT_START)] =
- l4e_from_page(d->arch.perdomain_l3_pg, __PAGE_HYPERVISOR_RW);
- if ( zap_ro_mpt || is_pv_32bit_domain(d) )
- l4tab[l4_table_offset(RO_MPT_VIRT_START)] = l4e_empty();
-}
-
bool fill_ro_mpt(unsigned long mfn)
{
l4_pgentry_t *l4tab = map_domain_page(_mfn(mfn));
@@ -1527,7 +1460,7 @@ static int alloc_l4_table(struct page_info *page)
if ( rc >= 0 )
{
- init_guest_l4_table(pl4e, d, !VM_ASSIST(d, m2p_strict));
+ pv_init_guest_l4_table(pl4e, d, !VM_ASSIST(d, m2p_strict));
atomic_inc(&d->arch.pv_domain.nr_l4_pages);
rc = 0;
}
@@ -18,6 +18,7 @@
#include <asm/bzimage.h>
#include <asm/dom0_build.h>
#include <asm/page.h>
+#include <asm/pv/mm.h>
#include <asm/setup.h>
/* Allow ring-3 access in long mode as guest cannot use ring 1 ... */
@@ -588,7 +589,7 @@ int __init dom0_construct_pv(struct domain *d,
l3start = __va(mpt_alloc); mpt_alloc += PAGE_SIZE;
}
clear_page(l4tab);
- init_guest_l4_table(l4tab, d, 0);
+ pv_init_guest_l4_table(l4tab, d, 0);
v->arch.guest_table = pagetable_from_paddr(__pa(l4start));
if ( is_pv_32bit_domain(d) )
v->arch.guest_table_user = v->arch.guest_table;
@@ -10,6 +10,7 @@
#include <xen/sched.h>
#include <asm/pv/domain.h>
+#include <asm/pv/mm.h>
static void noreturn continue_nonidle_domain(struct vcpu *v)
{
@@ -29,7 +30,7 @@ static int setup_compat_l4(struct vcpu *v)
l4tab = __map_domain_page(pg);
clear_page(l4tab);
- init_guest_l4_table(l4tab, v->domain, 1);
+ pv_init_guest_l4_table(l4tab, v->domain, 1);
unmap_domain_page(l4tab);
/* This page needs to look like a pagetable so that it can be shadowed */
@@ -23,6 +23,7 @@
#include <xen/guest_access.h>
#include <asm/pv/mm.h>
+#include <asm/setup.h>
/*
* PTE updates can be done with ordinary writes except:
@@ -32,6 +33,14 @@
#define PTE_UPDATE_WITH_CMPXCHG
#endif
+#ifndef NDEBUG
+static unsigned int __read_mostly root_pgt_pv_xen_slots
+ = ROOT_PAGETABLE_PV_XEN_SLOTS;
+static l4_pgentry_t __read_mostly split_l4e;
+#else
+#define root_pgt_pv_xen_slots ROOT_PAGETABLE_PV_XEN_SLOTS
+#endif
+
/* Read a PV guest's l1e that maps this virtual address. */
void pv_get_guest_eff_l1e(unsigned long addr, l1_pgentry_t *eff_l1e)
{
@@ -96,6 +105,65 @@ void pv_unmap_guest_l1e(void *p)
unmap_domain_page(p);
}
+void pv_init_guest_l4_table(l4_pgentry_t l4tab[], const struct domain *d,
+ bool zap_ro_mpt)
+{
+ /* Xen private mappings. */
+ memcpy(&l4tab[ROOT_PAGETABLE_FIRST_XEN_SLOT],
+ &idle_pg_table[ROOT_PAGETABLE_FIRST_XEN_SLOT],
+ root_pgt_pv_xen_slots * sizeof(l4_pgentry_t));
+#ifndef NDEBUG
+ if ( l4e_get_intpte(split_l4e) )
+ l4tab[ROOT_PAGETABLE_FIRST_XEN_SLOT + root_pgt_pv_xen_slots] =
+ split_l4e;
+#endif
+ l4tab[l4_table_offset(LINEAR_PT_VIRT_START)] =
+ l4e_from_pfn(domain_page_map_to_mfn(l4tab), __PAGE_HYPERVISOR_RW);
+ l4tab[l4_table_offset(PERDOMAIN_VIRT_START)] =
+ l4e_from_page(d->arch.perdomain_l3_pg, __PAGE_HYPERVISOR_RW);
+ if ( zap_ro_mpt || is_pv_32bit_domain(d) )
+ l4tab[l4_table_offset(RO_MPT_VIRT_START)] = l4e_empty();
+}
+
+void pv_arch_init_memory(void)
+{
+#ifndef NDEBUG
+ unsigned int i;
+
+ if ( highmem_start )
+ {
+ unsigned long split_va = (unsigned long)__va(highmem_start);
+
+ if ( split_va < HYPERVISOR_VIRT_END &&
+ split_va - 1 == (unsigned long)__va(highmem_start - 1) )
+ {
+ root_pgt_pv_xen_slots = l4_table_offset(split_va) -
+ ROOT_PAGETABLE_FIRST_XEN_SLOT;
+ ASSERT(root_pgt_pv_xen_slots < ROOT_PAGETABLE_PV_XEN_SLOTS);
+ if ( l4_table_offset(split_va) == l4_table_offset(split_va - 1) )
+ {
+ l3_pgentry_t *l3tab = alloc_xen_pagetable();
+
+ if ( l3tab )
+ {
+ const l3_pgentry_t *l3idle =
+ l4e_to_l3e(idle_pg_table[l4_table_offset(split_va)]);
+
+ for ( i = 0; i < l3_table_offset(split_va); ++i )
+ l3tab[i] = l3idle[i];
+ for ( ; i < L3_PAGETABLE_ENTRIES; ++i )
+ l3tab[i] = l3e_empty();
+ split_l4e = l4e_from_pfn(virt_to_mfn(l3tab),
+ __PAGE_HYPERVISOR_RW);
+ }
+ else
+ ++root_pgt_pv_xen_slots;
+ }
+ }
+ }
+#endif
+}
+
/*
* How to write an entry to the guest pagetables.
* Returns false for failure (pointer not valid), true for success.
@@ -305,8 +305,6 @@ static inline void *__page_to_virt(const struct page_info *pg)
int free_page_type(struct page_info *page, unsigned long type,
int preemptible);
-void init_guest_l4_table(l4_pgentry_t[], const struct domain *,
- bool_t zap_ro_mpt);
bool_t fill_ro_mpt(unsigned long mfn);
void zap_ro_mpt(unsigned long mfn);
@@ -89,6 +89,10 @@ bool pv_update_intpte(intpte_t *p, intpte_t old, intpte_t new,
l1_pgentry_t *pv_map_guest_l1e(unsigned long addr, unsigned long *gl1mfn);
void pv_unmap_guest_l1e(void *p);
+void pv_init_guest_l4_table(l4_pgentry_t[], const struct domain *,
+ bool zap_ro_mpt);
+void pv_arch_init_memory(void);
+
#else
static inline void pv_get_guest_eff_l1e(unsigned long addr,
@@ -110,6 +114,10 @@ static inline l1_pgentry_t *pv_map_guest_l1e(unsigned long addr,
static inline void pv_unmap_guest_l1e(void *p) {}
+static inline void pv_init_guest_l4_table(l4_pgentry_t[],
+ const struct domain *,
+ bool zap_ro_mpt) {}
+static inline void pv_arch_init_memory(void) {}
#endif
#endif /* __X86_PV_MM_H__ */
Move two functions to pv/mm.c. Add prefix to init_guest_l4_table. Export them via pv/mm.h. Fix up call sites. Signed-off-by: Wei Liu <wei.liu2@citrix.com> --- xen/arch/x86/mm.c | 69 +------------------------------------------- xen/arch/x86/pv/dom0_build.c | 3 +- xen/arch/x86/pv/domain.c | 3 +- xen/arch/x86/pv/mm.c | 68 +++++++++++++++++++++++++++++++++++++++++++ xen/include/asm-x86/mm.h | 2 -- xen/include/asm-x86/pv/mm.h | 8 +++++ 6 files changed, 81 insertions(+), 72 deletions(-)