Message ID | 450fcebd6ad4cbf0b078070149b9287260bdc0a3.1569489002.git.hongyax@amazon.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Remove direct map from Xen | expand |
On Thu, Sep 26, 2019 at 10:46:40AM +0100, hongyax@amazon.com wrote: > From: Hongyan Xia <hongyax@amazon.com> > > Before, it assumed both cr3 could be accessed via a direct map. This is > no longer true. Also, this means we can remove a xenheap mapping hack > we introduced earlier when building the cr3 of dom0. > > Signed-off-by: Hongyan Xia <hongyax@amazon.com> > --- > xen/arch/x86/pv/dom0_build.c | 11 +++++------ > xen/arch/x86/x86_64/entry.S | 32 +++++++++++++++++++++++++++++--- > 2 files changed, 34 insertions(+), 9 deletions(-) > > diff --git a/xen/arch/x86/pv/dom0_build.c b/xen/arch/x86/pv/dom0_build.c > index 0ec30988b8..202edcaa17 100644 > --- a/xen/arch/x86/pv/dom0_build.c > +++ b/xen/arch/x86/pv/dom0_build.c > @@ -623,9 +623,7 @@ int __init dom0_construct_pv(struct domain *d, > if ( !is_pv_32bit_domain(d) ) > { > maddr_to_page(mpt_alloc)->u.inuse.type_info = PGT_l4_page_table; > - l4start = l4tab = __va(mpt_alloc); > - map_pages_to_xen((unsigned long)l4start, maddr_to_mfn(mpt_alloc), 1, > - PAGE_HYPERVISOR); > + l4start = l4tab = map_xen_pagetable(maddr_to_mfn(mpt_alloc)); > mpt_alloc += PAGE_SIZE; > clear_page(l4tab); > init_xen_l4_slots(l4tab, _mfn(virt_to_mfn(l4start)), > @@ -635,9 +633,8 @@ int __init dom0_construct_pv(struct domain *d, > else > { > /* Monitor table already created by switch_compat(). */ > - l4start = l4tab = __va(pagetable_get_paddr(v->arch.guest_table)); > - map_pages_to_xen((unsigned long)l4start, > - pagetable_get_mfn(v->arch.guest_table), 1, PAGE_HYPERVISOR); > + l4start = l4tab = > + map_xen_pagetable(pagetable_get_mfn(v->arch.guest_table)); > /* See public/xen.h on why the following is needed. */ > maddr_to_page(mpt_alloc)->u.inuse.type_info = PGT_l3_page_table; > l3start = map_xen_pagetable(maddr_to_mfn(mpt_alloc)); > @@ -907,6 +904,8 @@ int __init dom0_construct_pv(struct domain *d, > pv_shim_setup_dom(d, l4start, v_start, vxenstore_start, vconsole_start, > vphysmap_start, si); > > + UNMAP_XEN_PAGETABLE(l4start); > + These hunks should be part of a previous patch, right? The one you changed PV Dom0 construction. > if ( is_pv_32bit_domain(d) ) > xlat_start_info(si, pv_shim ? XLAT_start_info_console_domU > : XLAT_start_info_console_dom0); > diff --git a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S > index 11385857fa..8ca9a8e0ea 100644 > --- a/xen/arch/x86/x86_64/entry.S > +++ b/xen/arch/x86/x86_64/entry.S > @@ -150,11 +150,27 @@ restore_all_guest: > je .Lrag_copy_done > movb $0, STACK_CPUINFO_FIELD(root_pgt_changed)(%rdx) > movabs $PADDR_MASK & PAGE_MASK, %rsi > - movabs $DIRECTMAP_VIRT_START, %rcx > and %rsi, %rdi > and %r9, %rsi > - add %rcx, %rdi > - add %rcx, %rsi > + > + /* Without a direct map, we have to map pages first before copying. */ > + /* FIXME: optimisations may be needed. */ > + pushq %r9 > + pushq %rdx > + pushq %rax > + pushq %rsi > + shr $PAGE_SHIFT, %rdi > + callq map_xen_pagetable > + popq %rdi > + pushq %rax > + shr $PAGE_SHIFT, %rdi > + callq map_xen_pagetable > + mov %rax, %rsi > + mov 0(%rsp), %rdi > + > + /* %rsi and %rdi are on top the stack for unmapping. */ > + pushq %rsi > + > mov $ROOT_PAGETABLE_FIRST_XEN_SLOT, %ecx > mov root_table_offset(SH_LINEAR_PT_VIRT_START)*8(%rsi), %r8 > mov %r8, root_table_offset(SH_LINEAR_PT_VIRT_START)*8(%rdi) > @@ -166,6 +182,16 @@ restore_all_guest: > sub $(ROOT_PAGETABLE_FIRST_XEN_SLOT - \ > ROOT_PAGETABLE_LAST_XEN_SLOT - 1) * 8, %rdi > rep movsq > + > + /* Unmap the two pages. */ > + popq %rdi > + callq unmap_xen_pagetable > + popq %rdi > + callq unmap_xen_pagetable > + popq %rax > + popq %rdx > + popq %r9 > + This section is for synchronising root page tables. Now that it has become so long, it would be better if you write a C function for this purpose. Wei. > .Lrag_copy_done: > mov %r9, STACK_CPUINFO_FIELD(xen_cr3)(%rdx) > movb $1, STACK_CPUINFO_FIELD(use_pv_cr3)(%rdx) > -- > 2.17.1 >
diff --git a/xen/arch/x86/pv/dom0_build.c b/xen/arch/x86/pv/dom0_build.c index 0ec30988b8..202edcaa17 100644 --- a/xen/arch/x86/pv/dom0_build.c +++ b/xen/arch/x86/pv/dom0_build.c @@ -623,9 +623,7 @@ int __init dom0_construct_pv(struct domain *d, if ( !is_pv_32bit_domain(d) ) { maddr_to_page(mpt_alloc)->u.inuse.type_info = PGT_l4_page_table; - l4start = l4tab = __va(mpt_alloc); - map_pages_to_xen((unsigned long)l4start, maddr_to_mfn(mpt_alloc), 1, - PAGE_HYPERVISOR); + l4start = l4tab = map_xen_pagetable(maddr_to_mfn(mpt_alloc)); mpt_alloc += PAGE_SIZE; clear_page(l4tab); init_xen_l4_slots(l4tab, _mfn(virt_to_mfn(l4start)), @@ -635,9 +633,8 @@ int __init dom0_construct_pv(struct domain *d, else { /* Monitor table already created by switch_compat(). */ - l4start = l4tab = __va(pagetable_get_paddr(v->arch.guest_table)); - map_pages_to_xen((unsigned long)l4start, - pagetable_get_mfn(v->arch.guest_table), 1, PAGE_HYPERVISOR); + l4start = l4tab = + map_xen_pagetable(pagetable_get_mfn(v->arch.guest_table)); /* See public/xen.h on why the following is needed. */ maddr_to_page(mpt_alloc)->u.inuse.type_info = PGT_l3_page_table; l3start = map_xen_pagetable(maddr_to_mfn(mpt_alloc)); @@ -907,6 +904,8 @@ int __init dom0_construct_pv(struct domain *d, pv_shim_setup_dom(d, l4start, v_start, vxenstore_start, vconsole_start, vphysmap_start, si); + UNMAP_XEN_PAGETABLE(l4start); + if ( is_pv_32bit_domain(d) ) xlat_start_info(si, pv_shim ? XLAT_start_info_console_domU : XLAT_start_info_console_dom0); diff --git a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S index 11385857fa..8ca9a8e0ea 100644 --- a/xen/arch/x86/x86_64/entry.S +++ b/xen/arch/x86/x86_64/entry.S @@ -150,11 +150,27 @@ restore_all_guest: je .Lrag_copy_done movb $0, STACK_CPUINFO_FIELD(root_pgt_changed)(%rdx) movabs $PADDR_MASK & PAGE_MASK, %rsi - movabs $DIRECTMAP_VIRT_START, %rcx and %rsi, %rdi and %r9, %rsi - add %rcx, %rdi - add %rcx, %rsi + + /* Without a direct map, we have to map pages first before copying. */ + /* FIXME: optimisations may be needed. */ + pushq %r9 + pushq %rdx + pushq %rax + pushq %rsi + shr $PAGE_SHIFT, %rdi + callq map_xen_pagetable + popq %rdi + pushq %rax + shr $PAGE_SHIFT, %rdi + callq map_xen_pagetable + mov %rax, %rsi + mov 0(%rsp), %rdi + + /* %rsi and %rdi are on top the stack for unmapping. */ + pushq %rsi + mov $ROOT_PAGETABLE_FIRST_XEN_SLOT, %ecx mov root_table_offset(SH_LINEAR_PT_VIRT_START)*8(%rsi), %r8 mov %r8, root_table_offset(SH_LINEAR_PT_VIRT_START)*8(%rdi) @@ -166,6 +182,16 @@ restore_all_guest: sub $(ROOT_PAGETABLE_FIRST_XEN_SLOT - \ ROOT_PAGETABLE_LAST_XEN_SLOT - 1) * 8, %rdi rep movsq + + /* Unmap the two pages. */ + popq %rdi + callq unmap_xen_pagetable + popq %rdi + callq unmap_xen_pagetable + popq %rax + popq %rdx + popq %r9 + .Lrag_copy_done: mov %r9, STACK_CPUINFO_FIELD(xen_cr3)(%rdx) movb $1, STACK_CPUINFO_FIELD(use_pv_cr3)(%rdx)