diff mbox series

[v2,2/4] arm64: mm: Batch dsb and isb when populating pgtables

Message ID 20240404143308.2224141-3-ryan.roberts@arm.com (mailing list archive)
State New, archived
Headers show
Series Speed up boot with faster linear map creation | expand

Commit Message

Ryan Roberts April 4, 2024, 2:33 p.m. UTC
After removing uneccessary TLBIs, the next bottleneck when creating the
page tables for the linear map is DSB and ISB, which were previously
issued per-pte in __set_pte(). Since we are writing multiple ptes in a
given pte table, we can elide these barriers and insert them once we
have finished writing to the table.

Execution time of map_mem(), which creates the kernel linear map page
tables, was measured on different machines with different RAM configs:

               | Apple M2 VM | Ampere Altra| Ampere Altra| Ampere Altra
               | VM, 16G     | VM, 64G     | VM, 256G    | Metal, 512G
---------------|-------------|-------------|-------------|-------------
               |   ms    (%) |   ms    (%) |   ms    (%) |    ms    (%)
---------------|-------------|-------------|-------------|-------------
before         |   77   (0%) |  431   (0%) | 1727   (0%) |  3796   (0%)
after          |   13 (-84%) |  162 (-62%) |  655 (-62%) |  1656 (-56%)

Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
Tested-by: Itaru Kitayama <itaru.kitayama@fujitsu.com>
Tested-by: Eric Chanudet <echanude@redhat.com>
---
 arch/arm64/include/asm/pgtable.h |  7 ++++++-
 arch/arm64/mm/mmu.c              | 13 ++++++++++++-
 2 files changed, 18 insertions(+), 2 deletions(-)

Comments

Mark Rutland April 10, 2024, 10:06 a.m. UTC | #1
On Thu, Apr 04, 2024 at 03:33:06PM +0100, Ryan Roberts wrote:
> After removing uneccessary TLBIs, the next bottleneck when creating the
> page tables for the linear map is DSB and ISB, which were previously
> issued per-pte in __set_pte(). Since we are writing multiple ptes in a
> given pte table, we can elide these barriers and insert them once we
> have finished writing to the table.
> 
> Execution time of map_mem(), which creates the kernel linear map page
> tables, was measured on different machines with different RAM configs:
> 
>                | Apple M2 VM | Ampere Altra| Ampere Altra| Ampere Altra
>                | VM, 16G     | VM, 64G     | VM, 256G    | Metal, 512G
> ---------------|-------------|-------------|-------------|-------------
>                |   ms    (%) |   ms    (%) |   ms    (%) |    ms    (%)
> ---------------|-------------|-------------|-------------|-------------
> before         |   77   (0%) |  431   (0%) | 1727   (0%) |  3796   (0%)
> after          |   13 (-84%) |  162 (-62%) |  655 (-62%) |  1656 (-56%)
> 
> Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
> Tested-by: Itaru Kitayama <itaru.kitayama@fujitsu.com>
> Tested-by: Eric Chanudet <echanude@redhat.com>
> ---
>  arch/arm64/include/asm/pgtable.h |  7 ++++++-
>  arch/arm64/mm/mmu.c              | 13 ++++++++++++-
>  2 files changed, 18 insertions(+), 2 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
> index afdd56d26ad7..105a95a8845c 100644
> --- a/arch/arm64/include/asm/pgtable.h
> +++ b/arch/arm64/include/asm/pgtable.h
> @@ -271,9 +271,14 @@ static inline pte_t pte_mkdevmap(pte_t pte)
>  	return set_pte_bit(pte, __pgprot(PTE_DEVMAP | PTE_SPECIAL));
>  }
>  
> -static inline void __set_pte(pte_t *ptep, pte_t pte)
> +static inline void __set_pte_nosync(pte_t *ptep, pte_t pte)
>  {
>  	WRITE_ONCE(*ptep, pte);
> +}
> +
> +static inline void __set_pte(pte_t *ptep, pte_t pte)
> +{
> +	__set_pte_nosync(ptep, pte);
>  
>  	/*
>  	 * Only if the new pte is valid and kernel, otherwise TLB maintenance
> diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
> index fd91b5bdb514..dc86dceb0efe 100644
> --- a/arch/arm64/mm/mmu.c
> +++ b/arch/arm64/mm/mmu.c
> @@ -178,7 +178,11 @@ static pte_t *init_pte(pte_t *ptep, unsigned long addr, unsigned long end,
>  	do {
>  		pte_t old_pte = __ptep_get(ptep);
>  
> -		__set_pte(ptep, pfn_pte(__phys_to_pfn(phys), prot));
> +		/*
> +		 * Required barriers to make this visible to the table walker
> +		 * are deferred to the end of alloc_init_cont_pte().
> +		 */
> +		__set_pte_nosync(ptep, pfn_pte(__phys_to_pfn(phys), prot));
>  
>  		/*
>  		 * After the PTE entry has been populated once, we
> @@ -234,6 +238,13 @@ static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
>  	} while (addr = next, addr != end);
>  
>  	pte_clear_fixmap();
> +
> +	/*
> +	 * Ensure all previous pgtable writes are visible to the table walker.
> +	 * See init_pte().
> +	 */
> +	dsb(ishst);
> +	isb();

Hmm... currently the call to pte_clear_fixmap() alone should be sufficient,
since that needs to update the PTE for the fixmap slot, then do maintenance for
that.

So we could avoid the addition of the dsb+isb here, and have a comment:

	/*
	 * Note: barriers and maintenance necessary to clear the fixmap slot
	 * ensure that all previous pgtable writes are visible to the table
	 * walker.
	 */
	pte_clear_fixmap();

... which'd be fine as long as we keep this fixmap clearing rather than trying
to do that lazily as in patch 4.

Mark.

>  }
>  
>  static pmd_t *init_pmd(pmd_t *pmdp, unsigned long addr, unsigned long end,
> -- 
> 2.25.1
>
Ryan Roberts April 10, 2024, 10:25 a.m. UTC | #2
On 10/04/2024 11:06, Mark Rutland wrote:
> On Thu, Apr 04, 2024 at 03:33:06PM +0100, Ryan Roberts wrote:
>> After removing uneccessary TLBIs, the next bottleneck when creating the
>> page tables for the linear map is DSB and ISB, which were previously
>> issued per-pte in __set_pte(). Since we are writing multiple ptes in a
>> given pte table, we can elide these barriers and insert them once we
>> have finished writing to the table.
>>
>> Execution time of map_mem(), which creates the kernel linear map page
>> tables, was measured on different machines with different RAM configs:
>>
>>                | Apple M2 VM | Ampere Altra| Ampere Altra| Ampere Altra
>>                | VM, 16G     | VM, 64G     | VM, 256G    | Metal, 512G
>> ---------------|-------------|-------------|-------------|-------------
>>                |   ms    (%) |   ms    (%) |   ms    (%) |    ms    (%)
>> ---------------|-------------|-------------|-------------|-------------
>> before         |   77   (0%) |  431   (0%) | 1727   (0%) |  3796   (0%)
>> after          |   13 (-84%) |  162 (-62%) |  655 (-62%) |  1656 (-56%)
>>
>> Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
>> Tested-by: Itaru Kitayama <itaru.kitayama@fujitsu.com>
>> Tested-by: Eric Chanudet <echanude@redhat.com>
>> ---
>>  arch/arm64/include/asm/pgtable.h |  7 ++++++-
>>  arch/arm64/mm/mmu.c              | 13 ++++++++++++-
>>  2 files changed, 18 insertions(+), 2 deletions(-)
>>
>> diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
>> index afdd56d26ad7..105a95a8845c 100644
>> --- a/arch/arm64/include/asm/pgtable.h
>> +++ b/arch/arm64/include/asm/pgtable.h
>> @@ -271,9 +271,14 @@ static inline pte_t pte_mkdevmap(pte_t pte)
>>  	return set_pte_bit(pte, __pgprot(PTE_DEVMAP | PTE_SPECIAL));
>>  }
>>  
>> -static inline void __set_pte(pte_t *ptep, pte_t pte)
>> +static inline void __set_pte_nosync(pte_t *ptep, pte_t pte)
>>  {
>>  	WRITE_ONCE(*ptep, pte);
>> +}
>> +
>> +static inline void __set_pte(pte_t *ptep, pte_t pte)
>> +{
>> +	__set_pte_nosync(ptep, pte);
>>  
>>  	/*
>>  	 * Only if the new pte is valid and kernel, otherwise TLB maintenance
>> diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
>> index fd91b5bdb514..dc86dceb0efe 100644
>> --- a/arch/arm64/mm/mmu.c
>> +++ b/arch/arm64/mm/mmu.c
>> @@ -178,7 +178,11 @@ static pte_t *init_pte(pte_t *ptep, unsigned long addr, unsigned long end,
>>  	do {
>>  		pte_t old_pte = __ptep_get(ptep);
>>  
>> -		__set_pte(ptep, pfn_pte(__phys_to_pfn(phys), prot));
>> +		/*
>> +		 * Required barriers to make this visible to the table walker
>> +		 * are deferred to the end of alloc_init_cont_pte().
>> +		 */
>> +		__set_pte_nosync(ptep, pfn_pte(__phys_to_pfn(phys), prot));
>>  
>>  		/*
>>  		 * After the PTE entry has been populated once, we
>> @@ -234,6 +238,13 @@ static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
>>  	} while (addr = next, addr != end);
>>  
>>  	pte_clear_fixmap();
>> +
>> +	/*
>> +	 * Ensure all previous pgtable writes are visible to the table walker.
>> +	 * See init_pte().
>> +	 */
>> +	dsb(ishst);
>> +	isb();
> 
> Hmm... currently the call to pte_clear_fixmap() alone should be sufficient,
> since that needs to update the PTE for the fixmap slot, then do maintenance for
> that.

Yes, true...

> 
> So we could avoid the addition of the dsb+isb here, and have a comment:
> 
> 	/*
> 	 * Note: barriers and maintenance necessary to clear the fixmap slot
> 	 * ensure that all previous pgtable writes are visible to the table
> 	 * walker.
> 	 */
> 	pte_clear_fixmap();
> 
> ... which'd be fine as long as we keep this fixmap clearing rather than trying
> to do that lazily as in patch 4.

But it isn't patch 4 that breaks it, it's patch 3. Once we have abstracted
pte_clear_fixmap() into the ops->unmap() call, for the "late" ops, unmap is a
noop. I guess the best solution there would be to require that unmap() always
issues these barriers.

I'll do as you suggest for this patch. If we want to keep patch 3, then I'll add
the barriers for all unmap() impls.

> 
> Mark.
> 
>>  }
>>  
>>  static pmd_t *init_pmd(pmd_t *pmdp, unsigned long addr, unsigned long end,
>> -- 
>> 2.25.1
>>
Mark Rutland April 10, 2024, 11:06 a.m. UTC | #3
On Wed, Apr 10, 2024 at 11:25:10AM +0100, Ryan Roberts wrote:
> On 10/04/2024 11:06, Mark Rutland wrote:
> > On Thu, Apr 04, 2024 at 03:33:06PM +0100, Ryan Roberts wrote:
[> >> @@ -234,6 +238,13 @@ static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
> >>  	} while (addr = next, addr != end);
> >>  
> >>  	pte_clear_fixmap();
> >> +
> >> +	/*
> >> +	 * Ensure all previous pgtable writes are visible to the table walker.
> >> +	 * See init_pte().
> >> +	 */
> >> +	dsb(ishst);
> >> +	isb();
> > 
> > Hmm... currently the call to pte_clear_fixmap() alone should be sufficient,
> > since that needs to update the PTE for the fixmap slot, then do maintenance for
> > that.
> 
> Yes, true...
> 
> > 
> > So we could avoid the addition of the dsb+isb here, and have a comment:
> > 
> > 	/*
> > 	 * Note: barriers and maintenance necessary to clear the fixmap slot
> > 	 * ensure that all previous pgtable writes are visible to the table
> > 	 * walker.
> > 	 */
> > 	pte_clear_fixmap();
> > 
> > ... which'd be fine as long as we keep this fixmap clearing rather than trying
> > to do that lazily as in patch 4.
> 
> But it isn't patch 4 that breaks it, it's patch 3. Once we have abstracted
> pte_clear_fixmap() into the ops->unmap() call, for the "late" ops, unmap is a
> noop.

Ah, yep; I hadn't spotted that yet.

> I guess the best solution there would be to require that unmap() always
> issues these barriers.
> 
> I'll do as you suggest for this patch. If we want to keep patch 3, then I'll add
> the barriers for all unmap() impls.

Thanks. It's going to take me a bit longer to chew through patches 3 and 4, but
I will try to get through those soon.

For now a slightly simpler option would be to have patch 3 introduce the
DSB+ISB as above rather than in each of the unmap() impls.

Mark.
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index afdd56d26ad7..105a95a8845c 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -271,9 +271,14 @@  static inline pte_t pte_mkdevmap(pte_t pte)
 	return set_pte_bit(pte, __pgprot(PTE_DEVMAP | PTE_SPECIAL));
 }
 
-static inline void __set_pte(pte_t *ptep, pte_t pte)
+static inline void __set_pte_nosync(pte_t *ptep, pte_t pte)
 {
 	WRITE_ONCE(*ptep, pte);
+}
+
+static inline void __set_pte(pte_t *ptep, pte_t pte)
+{
+	__set_pte_nosync(ptep, pte);
 
 	/*
 	 * Only if the new pte is valid and kernel, otherwise TLB maintenance
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index fd91b5bdb514..dc86dceb0efe 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -178,7 +178,11 @@  static pte_t *init_pte(pte_t *ptep, unsigned long addr, unsigned long end,
 	do {
 		pte_t old_pte = __ptep_get(ptep);
 
-		__set_pte(ptep, pfn_pte(__phys_to_pfn(phys), prot));
+		/*
+		 * Required barriers to make this visible to the table walker
+		 * are deferred to the end of alloc_init_cont_pte().
+		 */
+		__set_pte_nosync(ptep, pfn_pte(__phys_to_pfn(phys), prot));
 
 		/*
 		 * After the PTE entry has been populated once, we
@@ -234,6 +238,13 @@  static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
 	} while (addr = next, addr != end);
 
 	pte_clear_fixmap();
+
+	/*
+	 * Ensure all previous pgtable writes are visible to the table walker.
+	 * See init_pte().
+	 */
+	dsb(ishst);
+	isb();
 }
 
 static pmd_t *init_pmd(pmd_t *pmdp, unsigned long addr, unsigned long end,