Message ID | 20220624091146.35716-6-julien@xen.org (mailing list archive) |
---|---|
State | Superseded |
Headers | show |
Series | xen/arm: mm: Bunch of clean-ups | expand |
Hi Julien, On 24.06.2022 11:11, Julien Grall wrote: > From: Julien Grall <jgrall@amazon.com> > > At the moment, the domheap mappings initialization is done separately for > the boot CPU and secondary CPUs. The main difference is for the former > the pages are part of Xen binary whilst for the latter they are > dynamically allocated. > > It would be good to have a single helper so it is easier to rework > on the domheap is initialized. > > For CPU0, we still need to use pre-allocated pages because the > allocators may use domain_map_page(), so we need to have the domheap > area ready first. But we can still delay the initialization to setup_mm(). > > Introduce a new helper domheap_mapping_init() that will be called FWICS the function name is init_domheap_mappings. > from setup_mm() for the boot CPU and from init_secondary_pagetables() > for secondary CPUs. > > Signed-off-by: Julien Grall <jgrall@amazon.com> > --- > xen/arch/arm/include/asm/arm32/mm.h | 2 + > xen/arch/arm/mm.c | 92 +++++++++++++++++++---------- > xen/arch/arm/setup.c | 8 +++ > 3 files changed, 71 insertions(+), 31 deletions(-) > > diff --git a/xen/arch/arm/include/asm/arm32/mm.h b/xen/arch/arm/include/asm/arm32/mm.h > index 6b039d9ceaa2..575373aeb985 100644 > --- a/xen/arch/arm/include/asm/arm32/mm.h > +++ b/xen/arch/arm/include/asm/arm32/mm.h > @@ -10,6 +10,8 @@ static inline bool arch_mfns_in_directmap(unsigned long mfn, unsigned long nr) > return false; > } > > +bool init_domheap_mappings(unsigned int cpu); > + > #endif /* __ARM_ARM32_MM_H__ */ > > /* > diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c > index 20733afebce4..995aa1e4480e 100644 > --- a/xen/arch/arm/mm.c > +++ b/xen/arch/arm/mm.c > @@ -372,6 +372,58 @@ void clear_fixmap(unsigned map) > } > > #ifdef CONFIG_DOMAIN_PAGE > +/* > + * Prepare the area that will be used to map domheap pages. They are > + * mapped in 2MB chunks, so we need to allocate the page-tables up to > + * the 2nd level. > + * > + * The caller should make sure the root page-table for @cpu has been > + * been allocated. Second 'been' not needed. > + */ > +bool init_domheap_mappings(unsigned int cpu) > +{ > + unsigned int order = get_order_from_pages(DOMHEAP_SECOND_PAGES); > + lpae_t *root = per_cpu(xen_pgtable, cpu); > + unsigned int i, first_idx; > + lpae_t *domheap; > + mfn_t mfn; > + > + ASSERT(root); > + ASSERT(!per_cpu(xen_dommap, cpu)); > + > + /* > + * The domheap for cpu0 is before the heap is initialized. So we > + * need to use pre-allocated pages. > + */ > + if ( !cpu ) > + domheap = cpu0_dommap; > + else > + domheap = alloc_xenheap_pages(order, 0); > + > + if ( !domheap ) > + return false; > + > + /* Ensure the domheap has no stray mappings */ > + memset(domheap, 0, DOMHEAP_SECOND_PAGES * PAGE_SIZE); > + > + /* > + * Update the first level mapping to reference the local CPUs > + * domheap mapping pages. > + */ > + mfn = virt_to_mfn(domheap); > + first_idx = first_table_offset(DOMHEAP_VIRT_START); > + for ( i = 0; i < DOMHEAP_SECOND_PAGES; i++ ) > + { > + lpae_t pte = mfn_to_xen_entry(mfn_add(mfn, i), MT_NORMAL); I might have missed sth but shouldn't 'i' be multiplied by XEN_PT_LPAE_ENTRIES? Cheers, Michal
Hi Michal, On 27/06/2022 08:24, Michal Orzel wrote: > On 24.06.2022 11:11, Julien Grall wrote: >> From: Julien Grall <jgrall@amazon.com> >> >> At the moment, the domheap mappings initialization is done separately for >> the boot CPU and secondary CPUs. The main difference is for the former >> the pages are part of Xen binary whilst for the latter they are >> dynamically allocated. >> >> It would be good to have a single helper so it is easier to rework >> on the domheap is initialized. >> >> For CPU0, we still need to use pre-allocated pages because the >> allocators may use domain_map_page(), so we need to have the domheap >> area ready first. But we can still delay the initialization to setup_mm(). >> >> Introduce a new helper domheap_mapping_init() that will be called > FWICS the function name is init_domheap_mappings. I will udpate it. > >> from setup_mm() for the boot CPU and from init_secondary_pagetables() >> for secondary CPUs. >> >> Signed-off-by: Julien Grall <jgrall@amazon.com> >> --- >> xen/arch/arm/include/asm/arm32/mm.h | 2 + >> xen/arch/arm/mm.c | 92 +++++++++++++++++++---------- >> xen/arch/arm/setup.c | 8 +++ >> 3 files changed, 71 insertions(+), 31 deletions(-) >> >> diff --git a/xen/arch/arm/include/asm/arm32/mm.h b/xen/arch/arm/include/asm/arm32/mm.h >> index 6b039d9ceaa2..575373aeb985 100644 >> --- a/xen/arch/arm/include/asm/arm32/mm.h >> +++ b/xen/arch/arm/include/asm/arm32/mm.h >> @@ -10,6 +10,8 @@ static inline bool arch_mfns_in_directmap(unsigned long mfn, unsigned long nr) >> return false; >> } >> >> +bool init_domheap_mappings(unsigned int cpu); >> + >> #endif /* __ARM_ARM32_MM_H__ */ >> >> /* >> diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c >> index 20733afebce4..995aa1e4480e 100644 >> --- a/xen/arch/arm/mm.c >> +++ b/xen/arch/arm/mm.c >> @@ -372,6 +372,58 @@ void clear_fixmap(unsigned map) >> } >> >> #ifdef CONFIG_DOMAIN_PAGE >> +/* >> + * Prepare the area that will be used to map domheap pages. They are >> + * mapped in 2MB chunks, so we need to allocate the page-tables up to >> + * the 2nd level. >> + * >> + * The caller should make sure the root page-table for @cpu has been >> + * been allocated. > Second 'been' not needed. I will drop it. > >> + */ >> +bool init_domheap_mappings(unsigned int cpu) >> +{ >> + unsigned int order = get_order_from_pages(DOMHEAP_SECOND_PAGES); >> + lpae_t *root = per_cpu(xen_pgtable, cpu); >> + unsigned int i, first_idx; >> + lpae_t *domheap; >> + mfn_t mfn; >> + >> + ASSERT(root); >> + ASSERT(!per_cpu(xen_dommap, cpu)); >> + >> + /* >> + * The domheap for cpu0 is before the heap is initialized. So we >> + * need to use pre-allocated pages. >> + */ >> + if ( !cpu ) >> + domheap = cpu0_dommap; >> + else >> + domheap = alloc_xenheap_pages(order, 0); >> + >> + if ( !domheap ) >> + return false; >> + >> + /* Ensure the domheap has no stray mappings */ >> + memset(domheap, 0, DOMHEAP_SECOND_PAGES * PAGE_SIZE); >> + >> + /* >> + * Update the first level mapping to reference the local CPUs >> + * domheap mapping pages. >> + */ >> + mfn = virt_to_mfn(domheap); >> + first_idx = first_table_offset(DOMHEAP_VIRT_START); >> + for ( i = 0; i < DOMHEAP_SECOND_PAGES; i++ ) >> + { >> + lpae_t pte = mfn_to_xen_entry(mfn_add(mfn, i), MT_NORMAL); > I might have missed sth but shouldn't 'i' be multiplied by XEN_PT_LPAE_ENTRIES? Each table is taking one page. As we are dealing with MFN, we only need to increment by 1 every time. The reason the previous code was multiplying by XEN_PT_LPAE_ENTRIES was because it was using a virtual address with the type was lpae_t and then call virt_to_mfn(). But this is pointless as the domheap pages tables are so far both virtual and physically contiguous. Cheers,
diff --git a/xen/arch/arm/include/asm/arm32/mm.h b/xen/arch/arm/include/asm/arm32/mm.h index 6b039d9ceaa2..575373aeb985 100644 --- a/xen/arch/arm/include/asm/arm32/mm.h +++ b/xen/arch/arm/include/asm/arm32/mm.h @@ -10,6 +10,8 @@ static inline bool arch_mfns_in_directmap(unsigned long mfn, unsigned long nr) return false; } +bool init_domheap_mappings(unsigned int cpu); + #endif /* __ARM_ARM32_MM_H__ */ /* diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c index 20733afebce4..995aa1e4480e 100644 --- a/xen/arch/arm/mm.c +++ b/xen/arch/arm/mm.c @@ -372,6 +372,58 @@ void clear_fixmap(unsigned map) } #ifdef CONFIG_DOMAIN_PAGE +/* + * Prepare the area that will be used to map domheap pages. They are + * mapped in 2MB chunks, so we need to allocate the page-tables up to + * the 2nd level. + * + * The caller should make sure the root page-table for @cpu has been + * been allocated. + */ +bool init_domheap_mappings(unsigned int cpu) +{ + unsigned int order = get_order_from_pages(DOMHEAP_SECOND_PAGES); + lpae_t *root = per_cpu(xen_pgtable, cpu); + unsigned int i, first_idx; + lpae_t *domheap; + mfn_t mfn; + + ASSERT(root); + ASSERT(!per_cpu(xen_dommap, cpu)); + + /* + * The domheap for cpu0 is before the heap is initialized. So we + * need to use pre-allocated pages. + */ + if ( !cpu ) + domheap = cpu0_dommap; + else + domheap = alloc_xenheap_pages(order, 0); + + if ( !domheap ) + return false; + + /* Ensure the domheap has no stray mappings */ + memset(domheap, 0, DOMHEAP_SECOND_PAGES * PAGE_SIZE); + + /* + * Update the first level mapping to reference the local CPUs + * domheap mapping pages. + */ + mfn = virt_to_mfn(domheap); + first_idx = first_table_offset(DOMHEAP_VIRT_START); + for ( i = 0; i < DOMHEAP_SECOND_PAGES; i++ ) + { + lpae_t pte = mfn_to_xen_entry(mfn_add(mfn, i), MT_NORMAL); + pte.pt.table = 1; + write_pte(&root[first_idx + i], pte); + } + + per_cpu(xen_dommap, cpu) = domheap; + + return true; +} + void *map_domain_page_global(mfn_t mfn) { return vmap(&mfn, 1); @@ -633,16 +685,6 @@ void __init setup_pagetables(unsigned long boot_phys_offset) p[i].pt.xn = 0; } -#ifdef CONFIG_ARM_32 - for ( i = 0; i < DOMHEAP_SECOND_PAGES; i++ ) - { - p[first_table_offset(DOMHEAP_VIRT_START+i*FIRST_SIZE)] - = pte_of_xenaddr((uintptr_t)(cpu0_dommap + - i * XEN_PT_LPAE_ENTRIES)); - p[first_table_offset(DOMHEAP_VIRT_START+i*FIRST_SIZE)].pt.table = 1; - } -#endif - /* Break up the Xen mapping into 4k pages and protect them separately. */ for ( i = 0; i < XEN_PT_LPAE_ENTRIES; i++ ) { @@ -686,7 +728,6 @@ void __init setup_pagetables(unsigned long boot_phys_offset) #ifdef CONFIG_ARM_32 per_cpu(xen_pgtable, 0) = cpu0_pgtable; - per_cpu(xen_dommap, 0) = cpu0_dommap; #endif } @@ -719,39 +760,28 @@ int init_secondary_pagetables(int cpu) #else int init_secondary_pagetables(int cpu) { - lpae_t *first, *domheap, pte; - int i; + lpae_t *first; first = alloc_xenheap_page(); /* root == first level on 32-bit 3-level trie */ - domheap = alloc_xenheap_pages(get_order_from_pages(DOMHEAP_SECOND_PAGES), 0); - if ( domheap == NULL || first == NULL ) + if ( !first ) { - printk("Not enough free memory for secondary CPU%d pagetables\n", cpu); - free_xenheap_pages(domheap, get_order_from_pages(DOMHEAP_SECOND_PAGES)); - free_xenheap_page(first); + printk("CPU%u: Unable to allocate the first page-table\n", cpu); return -ENOMEM; } /* Initialise root pagetable from root of boot tables */ memcpy(first, cpu0_pgtable, PAGE_SIZE); + per_cpu(xen_pgtable, cpu) = first; - /* Ensure the domheap has no stray mappings */ - memset(domheap, 0, DOMHEAP_SECOND_PAGES*PAGE_SIZE); - - /* Update the first level mapping to reference the local CPUs - * domheap mapping pages. */ - for ( i = 0; i < DOMHEAP_SECOND_PAGES; i++ ) + if ( !init_domheap_mappings(cpu) ) { - pte = mfn_to_xen_entry(virt_to_mfn(domheap + i * XEN_PT_LPAE_ENTRIES), - MT_NORMAL); - pte.pt.table = 1; - write_pte(&first[first_table_offset(DOMHEAP_VIRT_START+i*FIRST_SIZE)], pte); + printk("CPU%u: Unable to prepare the domheap page-tables\n", cpu); + per_cpu(xen_pgtable, cpu) = NULL; + free_xenheap_page(first); + return -ENOMEM; } - per_cpu(xen_pgtable, cpu) = first; - per_cpu(xen_dommap, cpu) = domheap; - clear_boot_pagetables(); /* Set init_ttbr for this CPU coming up */ diff --git a/xen/arch/arm/setup.c b/xen/arch/arm/setup.c index 577c54e6fbfa..31574996f36d 100644 --- a/xen/arch/arm/setup.c +++ b/xen/arch/arm/setup.c @@ -783,6 +783,14 @@ static void __init setup_mm(void) setup_frametable_mappings(ram_start, ram_end); max_page = PFN_DOWN(ram_end); + /* + * The allocators may need to use map_domain_page() (such as for + * scrubbing pages). So we need to prepare the domheap area first. + */ + if ( !init_domheap_mappings(smp_processor_id()) ) + panic("CPU%u: Unable to prepare the domheap page-tables\n", + smp_processor_id()); + /* Add xenheap memory that was not already added to the boot allocator. */ init_xenheap_pages(mfn_to_maddr(xenheap_mfn_start), mfn_to_maddr(xenheap_mfn_end));