Message ID | 27c7736ec643dd0dd3cf469e6dc57f9d36379dcb.1582281718.git.hongyxia@amazon.com (mailing list archive) |
---|---|
State | Superseded |
Headers | show |
Series | x86/mm: switch to new APIs in arch_init_memory | expand |
Hi Hongyan, On 21/02/2020 10:42, Hongyan Xia wrote: > From: Wei Liu <wei.liu2@citrix.com> > > Since we now map and unmap Xen PTE pages, we would like to track the > lifetime of mappings so that 1) we do not dereference memory through a > variable after it is unmapped, 2) we do not unmap more than once. > Therefore, we introduce the UNMAP_DOMAIN_PAGE macro to nullify the > variable after unmapping, and ignore NULL in unmap_domain_page. If we want to support NULL in unmap_domain_page() then we would need to replicate the change for Arm as well. Cheers, > > Signed-off-by: Wei Liu <wei.liu2@citrix.com> > Signed-off-by: Hongyan Xia <hongyxia@amazon.com> > --- > xen/arch/x86/domain_page.c | 2 +- > xen/arch/x86/mm.c | 14 ++++++++------ > xen/include/xen/domain_page.h | 5 +++++ > 3 files changed, 14 insertions(+), 7 deletions(-) > > diff --git a/xen/arch/x86/domain_page.c b/xen/arch/x86/domain_page.c > index dd32712d2f..b03728e18e 100644 > --- a/xen/arch/x86/domain_page.c > +++ b/xen/arch/x86/domain_page.c > @@ -181,7 +181,7 @@ void unmap_domain_page(const void *ptr) > unsigned long va = (unsigned long)ptr, mfn, flags; > struct vcpu_maphash_entry *hashent; > > - if ( va >= DIRECTMAP_VIRT_START ) > + if ( !va || va >= DIRECTMAP_VIRT_START ) > return; > > ASSERT(va >= MAPCACHE_VIRT_START && va < MAPCACHE_VIRT_END); > diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c > index 70b87c4830..9fcdcde5b7 100644 > --- a/xen/arch/x86/mm.c > +++ b/xen/arch/x86/mm.c > @@ -356,19 +356,21 @@ void __init arch_init_memory(void) > ASSERT(root_pgt_pv_xen_slots < ROOT_PAGETABLE_PV_XEN_SLOTS); > if ( l4_table_offset(split_va) == l4_table_offset(split_va - 1) ) > { > - l3_pgentry_t *l3tab = alloc_xen_pagetable(); > + mfn_t l3mfn = alloc_xen_pagetable_new(); > > - if ( l3tab ) > + if ( !mfn_eq(l3mfn, INVALID_MFN) ) > { > - const l3_pgentry_t *l3idle = > - l4e_to_l3e(idle_pg_table[l4_table_offset(split_va)]); > + l3_pgentry_t *l3idle = map_l3t_from_l4e( > + idle_pg_table[l4_table_offset(split_va)]); > + l3_pgentry_t *l3tab = map_domain_page(l3mfn); > > for ( i = 0; i < l3_table_offset(split_va); ++i ) > l3tab[i] = l3idle[i]; > for ( ; i < L3_PAGETABLE_ENTRIES; ++i ) > l3tab[i] = l3e_empty(); > - split_l4e = l4e_from_mfn(virt_to_mfn(l3tab), > - __PAGE_HYPERVISOR_RW); > + split_l4e = l4e_from_mfn(l3mfn, __PAGE_HYPERVISOR_RW); > + UNMAP_DOMAIN_PAGE(l3idle); > + UNMAP_DOMAIN_PAGE(l3tab); > } > else > ++root_pgt_pv_xen_slots; > diff --git a/xen/include/xen/domain_page.h b/xen/include/xen/domain_page.h > index 32669a3339..a182d33b67 100644 > --- a/xen/include/xen/domain_page.h > +++ b/xen/include/xen/domain_page.h > @@ -72,4 +72,9 @@ static inline void unmap_domain_page_global(const void *va) {}; > > #endif /* !CONFIG_DOMAIN_PAGE */ > > +#define UNMAP_DOMAIN_PAGE(p) do { \ > + unmap_domain_page(p); \ > + (p) = NULL; \ > +} while ( false ) > + > #endif /* __XEN_DOMAIN_PAGE_H__ */ >
On 21.02.2020 11:42, Hongyan Xia wrote: > From: Wei Liu <wei.liu2@citrix.com> > > Since we now map and unmap Xen PTE pages, we would like to track the > lifetime of mappings so that 1) we do not dereference memory through a > variable after it is unmapped, 2) we do not unmap more than once. > Therefore, we introduce the UNMAP_DOMAIN_PAGE macro to nullify the > variable after unmapping, and ignore NULL in unmap_domain_page. To me this reads as if it was a 2nd paragraph explaining what needs doing in order to achieve the main goal of the patch (supposedly described in a 1st paragraph). > --- a/xen/arch/x86/domain_page.c > +++ b/xen/arch/x86/domain_page.c > @@ -181,7 +181,7 @@ void unmap_domain_page(const void *ptr) > unsigned long va = (unsigned long)ptr, mfn, flags; > struct vcpu_maphash_entry *hashent; > > - if ( va >= DIRECTMAP_VIRT_START ) > + if ( !va || va >= DIRECTMAP_VIRT_START ) > return; If we are to go with this, then I agree with Julien that this needs mirroring to Arm, allowing common code to assume this behavior. However, an alternative to this is to make ... > --- a/xen/include/xen/domain_page.h > +++ b/xen/include/xen/domain_page.h > @@ -72,4 +72,9 @@ static inline void unmap_domain_page_global(const void *va) {}; > > #endif /* !CONFIG_DOMAIN_PAGE */ > > +#define UNMAP_DOMAIN_PAGE(p) do { \ > + unmap_domain_page(p); \ > + (p) = NULL; \ > +} while ( false ) ... this avoid the call, leaving the function itself untouched. Jan
diff --git a/xen/arch/x86/domain_page.c b/xen/arch/x86/domain_page.c index dd32712d2f..b03728e18e 100644 --- a/xen/arch/x86/domain_page.c +++ b/xen/arch/x86/domain_page.c @@ -181,7 +181,7 @@ void unmap_domain_page(const void *ptr) unsigned long va = (unsigned long)ptr, mfn, flags; struct vcpu_maphash_entry *hashent; - if ( va >= DIRECTMAP_VIRT_START ) + if ( !va || va >= DIRECTMAP_VIRT_START ) return; ASSERT(va >= MAPCACHE_VIRT_START && va < MAPCACHE_VIRT_END); diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c index 70b87c4830..9fcdcde5b7 100644 --- a/xen/arch/x86/mm.c +++ b/xen/arch/x86/mm.c @@ -356,19 +356,21 @@ void __init arch_init_memory(void) ASSERT(root_pgt_pv_xen_slots < ROOT_PAGETABLE_PV_XEN_SLOTS); if ( l4_table_offset(split_va) == l4_table_offset(split_va - 1) ) { - l3_pgentry_t *l3tab = alloc_xen_pagetable(); + mfn_t l3mfn = alloc_xen_pagetable_new(); - if ( l3tab ) + if ( !mfn_eq(l3mfn, INVALID_MFN) ) { - const l3_pgentry_t *l3idle = - l4e_to_l3e(idle_pg_table[l4_table_offset(split_va)]); + l3_pgentry_t *l3idle = map_l3t_from_l4e( + idle_pg_table[l4_table_offset(split_va)]); + l3_pgentry_t *l3tab = map_domain_page(l3mfn); for ( i = 0; i < l3_table_offset(split_va); ++i ) l3tab[i] = l3idle[i]; for ( ; i < L3_PAGETABLE_ENTRIES; ++i ) l3tab[i] = l3e_empty(); - split_l4e = l4e_from_mfn(virt_to_mfn(l3tab), - __PAGE_HYPERVISOR_RW); + split_l4e = l4e_from_mfn(l3mfn, __PAGE_HYPERVISOR_RW); + UNMAP_DOMAIN_PAGE(l3idle); + UNMAP_DOMAIN_PAGE(l3tab); } else ++root_pgt_pv_xen_slots; diff --git a/xen/include/xen/domain_page.h b/xen/include/xen/domain_page.h index 32669a3339..a182d33b67 100644 --- a/xen/include/xen/domain_page.h +++ b/xen/include/xen/domain_page.h @@ -72,4 +72,9 @@ static inline void unmap_domain_page_global(const void *va) {}; #endif /* !CONFIG_DOMAIN_PAGE */ +#define UNMAP_DOMAIN_PAGE(p) do { \ + unmap_domain_page(p); \ + (p) = NULL; \ +} while ( false ) + #endif /* __XEN_DOMAIN_PAGE_H__ */