diff mbox series

[10/7] riscv: Implement the new page table range API

Message ID 20230215000446.1655635-2-willy@infradead.org (mailing list archive)
State Superseded
Delegated to: Palmer Dabbelt
Headers show
Series None | expand

Commit Message

Matthew Wilcox Feb. 15, 2023, 12:04 a.m. UTC
Add set_ptes(), update_mmu_cache_range() and flush_dcache_folio().

The PG_dcache_clear flag changes from being a per-page bit to being a
per-folio bit.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 arch/riscv/include/asm/cacheflush.h | 19 +++++++++----------
 arch/riscv/include/asm/pgtable.h    | 25 ++++++++++++++++++-------
 arch/riscv/mm/cacheflush.c          | 11 ++---------
 3 files changed, 29 insertions(+), 26 deletions(-)

Comments

Yin Fengwei Feb. 15, 2023, 8:38 a.m. UTC | #1
On Wed, 2023-02-15 at 00:04 +0000, Matthew Wilcox (Oracle) wrote:
> Add set_ptes(), update_mmu_cache_range() and flush_dcache_folio().
> 
> The PG_dcache_clear flag changes from being a per-page bit to being a
> per-folio bit.
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> ---
>  arch/riscv/include/asm/cacheflush.h | 19 +++++++++----------
>  arch/riscv/include/asm/pgtable.h    | 25 ++++++++++++++++++-------
>  arch/riscv/mm/cacheflush.c          | 11 ++---------
>  3 files changed, 29 insertions(+), 26 deletions(-)
> 
> diff --git a/arch/riscv/include/asm/cacheflush.h
> b/arch/riscv/include/asm/cacheflush.h
> index 03e3b95ae6da..10e5e96f09b5 100644
> --- a/arch/riscv/include/asm/cacheflush.h
> +++ b/arch/riscv/include/asm/cacheflush.h
> @@ -15,20 +15,19 @@ static inline void local_flush_icache_all(void)
>  
>  #define PG_dcache_clean PG_arch_1
>  
> -static inline void flush_dcache_page(struct page *page)
> +static inline void flush_dcache_folio(struct folio *folio)
>  {
> -       /*
> -        * HugeTLB pages are always fully mapped and only head page
> will be
> -        * set PG_dcache_clean (see comments in flush_icache_pte()).
> -        */
> -       if (PageHuge(page))
> -               page = compound_head(page);
> -
> -       if (test_bit(PG_dcache_clean, &page->flags))
> -               clear_bit(PG_dcache_clean, &page->flags);
> +       if (test_bit(PG_dcache_clean, &folio->flags))
> +               clear_bit(PG_dcache_clean, &folio->flags);
>  }
> +#define flush_dcache_folio flush_dcache_folio
>  #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
>  
> +static inline void flush_dcache_page(struct page *page)
> +{
> +       flush_dcache_folio(page_folio(page));
> +}
> +
>  /*
>   * RISC-V doesn't have an instruction to flush parts of the
> instruction cache,
>   * so instead we just flush the whole thing.
> diff --git a/arch/riscv/include/asm/pgtable.h
> b/arch/riscv/include/asm/pgtable.h
> index 13222fd5c4b4..03706c833e70 100644
> --- a/arch/riscv/include/asm/pgtable.h
> +++ b/arch/riscv/include/asm/pgtable.h
> @@ -405,8 +405,8 @@ static inline pte_t pte_modify(pte_t pte,
> pgprot_t newprot)
>  
>  
>  /* Commit new configuration to MMU hardware */
> -static inline void update_mmu_cache(struct vm_area_struct *vma,
> -       unsigned long address, pte_t *ptep)
> +static inline void update_mmu_cache_range(struct vm_area_struct
> *vma,
> +               unsigned long address, pte_t *ptep, unsigned int nr)
>  {
>         /*
>          * The kernel assumes that TLBs don't cache invalid entries,
> but
> @@ -415,8 +415,10 @@ static inline void update_mmu_cache(struct
> vm_area_struct *vma,
>          * Relying on flush_tlb_fix_spurious_fault would suffice, but
>          * the extra traps reduce performance.  So, eagerly
> SFENCE.VMA.
>          */
> -       flush_tlb_page(vma, address);
> +       flush_tlb_range(vma, address, address + nr * PAGE_SIZE);

The flush_tlb_range() of riscv is a little bit strange to me. It gives
__sbi_tlb_flush_range() stride PAGE_SIZE. That means if (end - start)
is larger than stride, it will trigger flush_tlb_all().

So this change could trigger flush_tlb_all() while original
flush_tlb_page() just trigger flush_tlb_page().

My understanding is flush_tlb_page() should be better because 
flush_pmd_tlb_range() has PMD_SIZE as stride to avoid flush_tlb_all().
I must miss something here.

Regards
Yin, Fengwei

>  }
> +#define update_mmu_cache(vma, addr, ptep) \
> +       update_mmu_cache_range(vma, addr, ptep, 1)
>  
>  #define __HAVE_ARCH_UPDATE_MMU_TLB
>  #define update_mmu_tlb update_mmu_cache
> @@ -456,12 +458,21 @@ static inline void __set_pte_at(struct
> mm_struct *mm,
>         set_pte(ptep, pteval);
>  }
>  
> -static inline void set_pte_at(struct mm_struct *mm,
> -       unsigned long addr, pte_t *ptep, pte_t pteval)
> +static inline void set_ptes(struct mm_struct *mm, unsigned long
> addr,
> +               pte_t *ptep, pte_t pteval, unsigned int nr)
>  {
> -       page_table_check_ptes_set(mm, addr, ptep, pteval, 1);
> -       __set_pte_at(mm, addr, ptep, pteval);
> +       page_table_check_ptes_set(mm, addr, ptep, pteval, nr);
> +
> +       for (;;) {
> +               __set_pte_at(mm, addr, ptep, pteval);
> +               if (--nr == 0)
> +                       break;
> +               ptep++;
> +               addr += PAGE_SIZE;
> +               pte_val(pteval) += 1 << _PAGE_PFN_SHIFT;
> +       }
>  }
> +#define set_pte_at(mm, addr, ptep, pte) set_ptes(mm, addr, ptep,
> pte, 1)
>  
>  static inline void pte_clear(struct mm_struct *mm,
>         unsigned long addr, pte_t *ptep)
> diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c
> index 3cc07ed45aeb..b725c3f6f57f 100644
> --- a/arch/riscv/mm/cacheflush.c
> +++ b/arch/riscv/mm/cacheflush.c
> @@ -81,16 +81,9 @@ void flush_icache_mm(struct mm_struct *mm, bool
> local)
>  #ifdef CONFIG_MMU
>  void flush_icache_pte(pte_t pte)
>  {
> -       struct page *page = pte_page(pte);
> +       struct folio *folio = page_folio(pte_page(pte));
>  
> -       /*
> -        * HugeTLB pages are always fully mapped, so only setting
> head page's
> -        * PG_dcache_clean flag is enough.
> -        */
> -       if (PageHuge(page))
> -               page = compound_head(page);
> -
> -       if (!test_and_set_bit(PG_dcache_clean, &page->flags))
> +       if (!test_and_set_bit(PG_dcache_clean, &folio->flags))
>                 flush_icache_all();
>  }
>  #endif /* CONFIG_MMU */
Yin Fengwei Feb. 15, 2023, 12:27 p.m. UTC | #2
On 2/15/2023 4:35 PM, Yin Fengwei wrote:
> On Wed, 2023-02-15 at 00:04 +0000, Matthew Wilcox (Oracle) wrote:
>> Add set_ptes(), update_mmu_cache_range() and flush_dcache_folio().
>>
>> The PG_dcache_clear flag changes from being a per-page bit to being a
>> per-folio bit.
>>
>> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
>> ---
>>  arch/riscv/include/asm/cacheflush.h | 19 +++++++++----------
>>  arch/riscv/include/asm/pgtable.h    | 25 ++++++++++++++++++-------
>>  arch/riscv/mm/cacheflush.c          | 11 ++---------
>>  3 files changed, 29 insertions(+), 26 deletions(-)
>>
>> diff --git a/arch/riscv/include/asm/cacheflush.h
>> b/arch/riscv/include/asm/cacheflush.h
>> index 03e3b95ae6da..10e5e96f09b5 100644
>> --- a/arch/riscv/include/asm/cacheflush.h
>> +++ b/arch/riscv/include/asm/cacheflush.h
>> @@ -15,20 +15,19 @@ static inline void local_flush_icache_all(void)
>>  
>>  #define PG_dcache_clean PG_arch_1
>>  
>> -static inline void flush_dcache_page(struct page *page)
>> +static inline void flush_dcache_folio(struct folio *folio)
>>  {
>> -       /*
>> -        * HugeTLB pages are always fully mapped and only head page
>> will be
>> -        * set PG_dcache_clean (see comments in flush_icache_pte()).
>> -        */
>> -       if (PageHuge(page))
>> -               page = compound_head(page);
>> -
>> -       if (test_bit(PG_dcache_clean, &page->flags))
>> -               clear_bit(PG_dcache_clean, &page->flags);
>> +       if (test_bit(PG_dcache_clean, &folio->flags))
>> +               clear_bit(PG_dcache_clean, &folio->flags);
>>  }
>> +#define flush_dcache_folio flush_dcache_folio
>>  #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
>>  
>> +static inline void flush_dcache_page(struct page *page)
>> +{
>> +       flush_dcache_folio(page_folio(page));
>> +}
>> +
>>  /*
>>   * RISC-V doesn't have an instruction to flush parts of the
>> instruction cache,
>>   * so instead we just flush the whole thing.
>> diff --git a/arch/riscv/include/asm/pgtable.h
>> b/arch/riscv/include/asm/pgtable.h
>> index 13222fd5c4b4..03706c833e70 100644
>> --- a/arch/riscv/include/asm/pgtable.h
>> +++ b/arch/riscv/include/asm/pgtable.h
>> @@ -405,8 +405,8 @@ static inline pte_t pte_modify(pte_t pte,
>> pgprot_t newprot)
>>  
>>  
>>  /* Commit new configuration to MMU hardware */
>> -static inline void update_mmu_cache(struct vm_area_struct *vma,
>> -       unsigned long address, pte_t *ptep)
>> +static inline void update_mmu_cache_range(struct vm_area_struct
>> *vma,
>> +               unsigned long address, pte_t *ptep, unsigned int nr)
>>  {
>>         /*
>>          * The kernel assumes that TLBs don't cache invalid entries,
>> but
>> @@ -415,8 +415,10 @@ static inline void update_mmu_cache(struct
>> vm_area_struct *vma,
>>          * Relying on flush_tlb_fix_spurious_fault would suffice, but
>>          * the extra traps reduce performance.  So, eagerly
>> SFENCE.VMA.
>>          */
>> -       flush_tlb_page(vma, address);
>> +       flush_tlb_range(vma, address, address + nr * PAGE_SIZE);
> 
> The flush_tlb_range() of riscv is a little bit strange to me. It gives
> __sbi_tlb_flush_range() stride PAGE_SIZE. That means if (end - start)
> is larger than stride, it will trigger flush_tlb_all().
> 
> So this change could trigger flush_tlb_all() while original
> flush_tlb_page() just trigger flush_tlb_page().
> 
> My understanding is flush_tlb_page() should be better because 
> flush_pmd_tlb_range() has PMD_SIZE as stride to avoid flush_tlb_all().
> I must miss something here.
So the huge page can have one TLB for huge page. So PMD_SIZE here
makes sense.

Regards
Yin, Fengwei 

> 
> Regards
> Yin, Fengwei
> 
>>  }
>> +#define update_mmu_cache(vma, addr, ptep) \
>> +       update_mmu_cache_range(vma, addr, ptep, 1)
>>  
>>  #define __HAVE_ARCH_UPDATE_MMU_TLB
>>  #define update_mmu_tlb update_mmu_cache
>> @@ -456,12 +458,21 @@ static inline void __set_pte_at(struct
>> mm_struct *mm,
>>         set_pte(ptep, pteval);
>>  }
>>  
>> -static inline void set_pte_at(struct mm_struct *mm,
>> -       unsigned long addr, pte_t *ptep, pte_t pteval)
>> +static inline void set_ptes(struct mm_struct *mm, unsigned long
>> addr,
>> +               pte_t *ptep, pte_t pteval, unsigned int nr)
>>  {
>> -       page_table_check_ptes_set(mm, addr, ptep, pteval, 1);
>> -       __set_pte_at(mm, addr, ptep, pteval);
>> +       page_table_check_ptes_set(mm, addr, ptep, pteval, nr);
>> +
>> +       for (;;) {
>> +               __set_pte_at(mm, addr, ptep, pteval);
>> +               if (--nr == 0)
>> +                       break;
>> +               ptep++;
>> +               addr += PAGE_SIZE;
>> +               pte_val(pteval) += 1 << _PAGE_PFN_SHIFT;
>> +       }
>>  }
>> +#define set_pte_at(mm, addr, ptep, pte) set_ptes(mm, addr, ptep,
>> pte, 1)
>>  
>>  static inline void pte_clear(struct mm_struct *mm,
>>         unsigned long addr, pte_t *ptep)
>> diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c
>> index 3cc07ed45aeb..b725c3f6f57f 100644
>> --- a/arch/riscv/mm/cacheflush.c
>> +++ b/arch/riscv/mm/cacheflush.c
>> @@ -81,16 +81,9 @@ void flush_icache_mm(struct mm_struct *mm, bool
>> local)
>>  #ifdef CONFIG_MMU
>>  void flush_icache_pte(pte_t pte)
>>  {
>> -       struct page *page = pte_page(pte);
>> +       struct folio *folio = page_folio(pte_page(pte));
>>  
>> -       /*
>> -        * HugeTLB pages are always fully mapped, so only setting
>> head page's
>> -        * PG_dcache_clean flag is enough.
>> -        */
>> -       if (PageHuge(page))
>> -               page = compound_head(page);
>> -
>> -       if (!test_and_set_bit(PG_dcache_clean, &page->flags))
>> +       if (!test_and_set_bit(PG_dcache_clean, &folio->flags))
>>                 flush_icache_all();
>>  }
>>  #endif /* CONFIG_MMU */
>
Alexandre Ghiti Feb. 16, 2023, 8:14 a.m. UTC | #3
Hi Yin,

On 2/15/23 09:38, Yin, Fengwei wrote:
> On Wed, 2023-02-15 at 00:04 +0000, Matthew Wilcox (Oracle) wrote:
>> Add set_ptes(), update_mmu_cache_range() and flush_dcache_folio().
>>
>> The PG_dcache_clear flag changes from being a per-page bit to being a
>> per-folio bit.
>>
>> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
>> ---
>>   arch/riscv/include/asm/cacheflush.h | 19 +++++++++----------
>>   arch/riscv/include/asm/pgtable.h    | 25 ++++++++++++++++++-------
>>   arch/riscv/mm/cacheflush.c          | 11 ++---------
>>   3 files changed, 29 insertions(+), 26 deletions(-)
>>
>> diff --git a/arch/riscv/include/asm/cacheflush.h
>> b/arch/riscv/include/asm/cacheflush.h
>> index 03e3b95ae6da..10e5e96f09b5 100644
>> --- a/arch/riscv/include/asm/cacheflush.h
>> +++ b/arch/riscv/include/asm/cacheflush.h
>> @@ -15,20 +15,19 @@ static inline void local_flush_icache_all(void)
>>   
>>   #define PG_dcache_clean PG_arch_1
>>   
>> -static inline void flush_dcache_page(struct page *page)
>> +static inline void flush_dcache_folio(struct folio *folio)
>>   {
>> -       /*
>> -        * HugeTLB pages are always fully mapped and only head page
>> will be
>> -        * set PG_dcache_clean (see comments in flush_icache_pte()).
>> -        */
>> -       if (PageHuge(page))
>> -               page = compound_head(page);
>> -
>> -       if (test_bit(PG_dcache_clean, &page->flags))
>> -               clear_bit(PG_dcache_clean, &page->flags);
>> +       if (test_bit(PG_dcache_clean, &folio->flags))
>> +               clear_bit(PG_dcache_clean, &folio->flags);
>>   }
>> +#define flush_dcache_folio flush_dcache_folio
>>   #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
>>   
>> +static inline void flush_dcache_page(struct page *page)
>> +{
>> +       flush_dcache_folio(page_folio(page));
>> +}
>> +
>>   /*
>>    * RISC-V doesn't have an instruction to flush parts of the
>> instruction cache,
>>    * so instead we just flush the whole thing.
>> diff --git a/arch/riscv/include/asm/pgtable.h
>> b/arch/riscv/include/asm/pgtable.h
>> index 13222fd5c4b4..03706c833e70 100644
>> --- a/arch/riscv/include/asm/pgtable.h
>> +++ b/arch/riscv/include/asm/pgtable.h
>> @@ -405,8 +405,8 @@ static inline pte_t pte_modify(pte_t pte,
>> pgprot_t newprot)
>>   
>>   
>>   /* Commit new configuration to MMU hardware */
>> -static inline void update_mmu_cache(struct vm_area_struct *vma,
>> -       unsigned long address, pte_t *ptep)
>> +static inline void update_mmu_cache_range(struct vm_area_struct
>> *vma,
>> +               unsigned long address, pte_t *ptep, unsigned int nr)
>>   {
>>          /*
>>           * The kernel assumes that TLBs don't cache invalid entries,
>> but
>> @@ -415,8 +415,10 @@ static inline void update_mmu_cache(struct
>> vm_area_struct *vma,
>>           * Relying on flush_tlb_fix_spurious_fault would suffice, but
>>           * the extra traps reduce performance.  So, eagerly
>> SFENCE.VMA.
>>           */
>> -       flush_tlb_page(vma, address);
>> +       flush_tlb_range(vma, address, address + nr * PAGE_SIZE);
> The flush_tlb_range() of riscv is a little bit strange to me. It gives
> __sbi_tlb_flush_range() stride PAGE_SIZE. That means if (end - start)
> is larger than stride, it will trigger flush_tlb_all().
>
> So this change could trigger flush_tlb_all() while original
> flush_tlb_page() just trigger flush_tlb_page().


Maybe I'm missing something but update_mmu_cache behaviour is not 
changed here, it will always call flush_tlb_page as nr == 1, right?

update_mmu_cache_range though will likely call flush_tlb_all: I have to 
admit that I'm wondering why we don't only flush the range of pages 
instead of flushing everything, I'll look into that.

Alex


>
> My understanding is flush_tlb_page() should be better because
> flush_pmd_tlb_range() has PMD_SIZE as stride to avoid flush_tlb_all().
> I must miss something here.
>
> Regards
> Yin, Fengwei
>
>>   }
>> +#define update_mmu_cache(vma, addr, ptep) \
>> +       update_mmu_cache_range(vma, addr, ptep, 1)
>>   
>>   #define __HAVE_ARCH_UPDATE_MMU_TLB
>>   #define update_mmu_tlb update_mmu_cache
>> @@ -456,12 +458,21 @@ static inline void __set_pte_at(struct
>> mm_struct *mm,
>>          set_pte(ptep, pteval);
>>   }
>>   
>> -static inline void set_pte_at(struct mm_struct *mm,
>> -       unsigned long addr, pte_t *ptep, pte_t pteval)
>> +static inline void set_ptes(struct mm_struct *mm, unsigned long
>> addr,
>> +               pte_t *ptep, pte_t pteval, unsigned int nr)
>>   {
>> -       page_table_check_ptes_set(mm, addr, ptep, pteval, 1);
>> -       __set_pte_at(mm, addr, ptep, pteval);
>> +       page_table_check_ptes_set(mm, addr, ptep, pteval, nr);
>> +
>> +       for (;;) {
>> +               __set_pte_at(mm, addr, ptep, pteval);
>> +               if (--nr == 0)
>> +                       break;
>> +               ptep++;
>> +               addr += PAGE_SIZE;
>> +               pte_val(pteval) += 1 << _PAGE_PFN_SHIFT;
>> +       }
>>   }
>> +#define set_pte_at(mm, addr, ptep, pte) set_ptes(mm, addr, ptep,
>> pte, 1)
>>   
>>   static inline void pte_clear(struct mm_struct *mm,
>>          unsigned long addr, pte_t *ptep)
>> diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c
>> index 3cc07ed45aeb..b725c3f6f57f 100644
>> --- a/arch/riscv/mm/cacheflush.c
>> +++ b/arch/riscv/mm/cacheflush.c
>> @@ -81,16 +81,9 @@ void flush_icache_mm(struct mm_struct *mm, bool
>> local)
>>   #ifdef CONFIG_MMU
>>   void flush_icache_pte(pte_t pte)
>>   {
>> -       struct page *page = pte_page(pte);
>> +       struct folio *folio = page_folio(pte_page(pte));
>>   
>> -       /*
>> -        * HugeTLB pages are always fully mapped, so only setting
>> head page's
>> -        * PG_dcache_clean flag is enough.
>> -        */
>> -       if (PageHuge(page))
>> -               page = compound_head(page);
>> -
>> -       if (!test_and_set_bit(PG_dcache_clean, &page->flags))
>> +       if (!test_and_set_bit(PG_dcache_clean, &folio->flags))
>>                  flush_icache_all();
>>   }
>>   #endif /* CONFIG_MMU */
Alexandre Ghiti Feb. 16, 2023, 8:16 a.m. UTC | #4
Hi Matthew,

On 2/15/23 01:04, Matthew Wilcox (Oracle) wrote:
> Add set_ptes(), update_mmu_cache_range() and flush_dcache_folio().
>
> The PG_dcache_clear flag changes from being a per-page bit to being a
> per-folio bit.
>
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> ---
>   arch/riscv/include/asm/cacheflush.h | 19 +++++++++----------
>   arch/riscv/include/asm/pgtable.h    | 25 ++++++++++++++++++-------
>   arch/riscv/mm/cacheflush.c          | 11 ++---------
>   3 files changed, 29 insertions(+), 26 deletions(-)
>
> diff --git a/arch/riscv/include/asm/cacheflush.h b/arch/riscv/include/asm/cacheflush.h
> index 03e3b95ae6da..10e5e96f09b5 100644
> --- a/arch/riscv/include/asm/cacheflush.h
> +++ b/arch/riscv/include/asm/cacheflush.h
> @@ -15,20 +15,19 @@ static inline void local_flush_icache_all(void)
>   
>   #define PG_dcache_clean PG_arch_1
>   
> -static inline void flush_dcache_page(struct page *page)
> +static inline void flush_dcache_folio(struct folio *folio)
>   {
> -	/*
> -	 * HugeTLB pages are always fully mapped and only head page will be
> -	 * set PG_dcache_clean (see comments in flush_icache_pte()).
> -	 */
> -	if (PageHuge(page))
> -		page = compound_head(page);
> -
> -	if (test_bit(PG_dcache_clean, &page->flags))
> -		clear_bit(PG_dcache_clean, &page->flags);
> +	if (test_bit(PG_dcache_clean, &folio->flags))
> +		clear_bit(PG_dcache_clean, &folio->flags);
>   }
> +#define flush_dcache_folio flush_dcache_folio
>   #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
>   
> +static inline void flush_dcache_page(struct page *page)
> +{
> +	flush_dcache_folio(page_folio(page));
> +}
> +
>   /*
>    * RISC-V doesn't have an instruction to flush parts of the instruction cache,
>    * so instead we just flush the whole thing.
> diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
> index 13222fd5c4b4..03706c833e70 100644
> --- a/arch/riscv/include/asm/pgtable.h
> +++ b/arch/riscv/include/asm/pgtable.h
> @@ -405,8 +405,8 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
>   
>   
>   /* Commit new configuration to MMU hardware */
> -static inline void update_mmu_cache(struct vm_area_struct *vma,
> -	unsigned long address, pte_t *ptep)
> +static inline void update_mmu_cache_range(struct vm_area_struct *vma,
> +		unsigned long address, pte_t *ptep, unsigned int nr)
>   {
>   	/*
>   	 * The kernel assumes that TLBs don't cache invalid entries, but
> @@ -415,8 +415,10 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
>   	 * Relying on flush_tlb_fix_spurious_fault would suffice, but
>   	 * the extra traps reduce performance.  So, eagerly SFENCE.VMA.
>   	 */
> -	flush_tlb_page(vma, address);
> +	flush_tlb_range(vma, address, address + nr * PAGE_SIZE);
>   }
> +#define update_mmu_cache(vma, addr, ptep) \
> +	update_mmu_cache_range(vma, addr, ptep, 1)
>   
>   #define __HAVE_ARCH_UPDATE_MMU_TLB
>   #define update_mmu_tlb update_mmu_cache
> @@ -456,12 +458,21 @@ static inline void __set_pte_at(struct mm_struct *mm,
>   	set_pte(ptep, pteval);
>   }
>   
> -static inline void set_pte_at(struct mm_struct *mm,
> -	unsigned long addr, pte_t *ptep, pte_t pteval)
> +static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
> +		pte_t *ptep, pte_t pteval, unsigned int nr)
>   {
> -	page_table_check_ptes_set(mm, addr, ptep, pteval, 1);
> -	__set_pte_at(mm, addr, ptep, pteval);
> +	page_table_check_ptes_set(mm, addr, ptep, pteval, nr);
> +
> +	for (;;) {
> +		__set_pte_at(mm, addr, ptep, pteval);
> +		if (--nr == 0)
> +			break;
> +		ptep++;
> +		addr += PAGE_SIZE;
> +		pte_val(pteval) += 1 << _PAGE_PFN_SHIFT;
> +	}
>   }
> +#define set_pte_at(mm, addr, ptep, pte) set_ptes(mm, addr, ptep, pte, 1)
>   
>   static inline void pte_clear(struct mm_struct *mm,
>   	unsigned long addr, pte_t *ptep)
> diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c
> index 3cc07ed45aeb..b725c3f6f57f 100644
> --- a/arch/riscv/mm/cacheflush.c
> +++ b/arch/riscv/mm/cacheflush.c
> @@ -81,16 +81,9 @@ void flush_icache_mm(struct mm_struct *mm, bool local)
>   #ifdef CONFIG_MMU
>   void flush_icache_pte(pte_t pte)
>   {
> -	struct page *page = pte_page(pte);
> +	struct folio *folio = page_folio(pte_page(pte));
>   
> -	/*
> -	 * HugeTLB pages are always fully mapped, so only setting head page's
> -	 * PG_dcache_clean flag is enough.
> -	 */
> -	if (PageHuge(page))
> -		page = compound_head(page);
> -
> -	if (!test_and_set_bit(PG_dcache_clean, &page->flags))
> +	if (!test_and_set_bit(PG_dcache_clean, &folio->flags))
>   		flush_icache_all();
>   }
>   #endif /* CONFIG_MMU */

Reviewed-by: Alexandre Ghiti <alexghiti@rivosinc.com>

Thanks,

Alex
Yin Fengwei Feb. 16, 2023, 1:27 p.m. UTC | #5
Hi Alex,

On 2/16/2023 4:14 PM, Alexandre Ghiti wrote:
> Hi Yin,
> 
> On 2/15/23 09:38, Yin, Fengwei wrote:
>> On Wed, 2023-02-15 at 00:04 +0000, Matthew Wilcox (Oracle) wrote:
>>> Add set_ptes(), update_mmu_cache_range() and flush_dcache_folio().
>>>
>>> The PG_dcache_clear flag changes from being a per-page bit to being a
>>> per-folio bit.
>>>
>>> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
>>> ---
>>>   arch/riscv/include/asm/cacheflush.h | 19 +++++++++----------
>>>   arch/riscv/include/asm/pgtable.h    | 25 ++++++++++++++++++-------
>>>   arch/riscv/mm/cacheflush.c          | 11 ++---------
>>>   3 files changed, 29 insertions(+), 26 deletions(-)
>>>
>>> diff --git a/arch/riscv/include/asm/cacheflush.h
>>> b/arch/riscv/include/asm/cacheflush.h
>>> index 03e3b95ae6da..10e5e96f09b5 100644
>>> --- a/arch/riscv/include/asm/cacheflush.h
>>> +++ b/arch/riscv/include/asm/cacheflush.h
>>> @@ -15,20 +15,19 @@ static inline void local_flush_icache_all(void)
>>>     #define PG_dcache_clean PG_arch_1
>>>   -static inline void flush_dcache_page(struct page *page)
>>> +static inline void flush_dcache_folio(struct folio *folio)
>>>   {
>>> -       /*
>>> -        * HugeTLB pages are always fully mapped and only head page
>>> will be
>>> -        * set PG_dcache_clean (see comments in flush_icache_pte()).
>>> -        */
>>> -       if (PageHuge(page))
>>> -               page = compound_head(page);
>>> -
>>> -       if (test_bit(PG_dcache_clean, &page->flags))
>>> -               clear_bit(PG_dcache_clean, &page->flags);
>>> +       if (test_bit(PG_dcache_clean, &folio->flags))
>>> +               clear_bit(PG_dcache_clean, &folio->flags);
>>>   }
>>> +#define flush_dcache_folio flush_dcache_folio
>>>   #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
>>>   +static inline void flush_dcache_page(struct page *page)
>>> +{
>>> +       flush_dcache_folio(page_folio(page));
>>> +}
>>> +
>>>   /*
>>>    * RISC-V doesn't have an instruction to flush parts of the
>>> instruction cache,
>>>    * so instead we just flush the whole thing.
>>> diff --git a/arch/riscv/include/asm/pgtable.h
>>> b/arch/riscv/include/asm/pgtable.h
>>> index 13222fd5c4b4..03706c833e70 100644
>>> --- a/arch/riscv/include/asm/pgtable.h
>>> +++ b/arch/riscv/include/asm/pgtable.h
>>> @@ -405,8 +405,8 @@ static inline pte_t pte_modify(pte_t pte,
>>> pgprot_t newprot)
>>>       /* Commit new configuration to MMU hardware */
>>> -static inline void update_mmu_cache(struct vm_area_struct *vma,
>>> -       unsigned long address, pte_t *ptep)
>>> +static inline void update_mmu_cache_range(struct vm_area_struct
>>> *vma,
>>> +               unsigned long address, pte_t *ptep, unsigned int nr)
>>>   {
>>>          /*
>>>           * The kernel assumes that TLBs don't cache invalid entries,
>>> but
>>> @@ -415,8 +415,10 @@ static inline void update_mmu_cache(struct
>>> vm_area_struct *vma,
>>>           * Relying on flush_tlb_fix_spurious_fault would suffice, but
>>>           * the extra traps reduce performance.  So, eagerly
>>> SFENCE.VMA.
>>>           */
>>> -       flush_tlb_page(vma, address);
>>> +       flush_tlb_range(vma, address, address + nr * PAGE_SIZE);
>> The flush_tlb_range() of riscv is a little bit strange to me. It gives
>> __sbi_tlb_flush_range() stride PAGE_SIZE. That means if (end - start)
>> is larger than stride, it will trigger flush_tlb_all().
>>
>> So this change could trigger flush_tlb_all() while original
>> flush_tlb_page() just trigger flush_tlb_page().
> 
> 
> Maybe I'm missing something but update_mmu_cache behaviour is not changed here, it will always call flush_tlb_page as nr == 1, right?
Yes. This is my understanding too. Thanks.


Regards
Yin, Fengwei

> 
> update_mmu_cache_range though will likely call flush_tlb_all: I have to admit that I'm wondering why we don't only flush the range of pages instead of flushing everything, I'll look into that.
> 
> Alex
> 
> 
>>
>> My understanding is flush_tlb_page() should be better because
>> flush_pmd_tlb_range() has PMD_SIZE as stride to avoid flush_tlb_all().
>> I must miss something here.
>>
>> Regards
>> Yin, Fengwei
>>
>>>   }
>>> +#define update_mmu_cache(vma, addr, ptep) \
>>> +       update_mmu_cache_range(vma, addr, ptep, 1)
>>>     #define __HAVE_ARCH_UPDATE_MMU_TLB
>>>   #define update_mmu_tlb update_mmu_cache
>>> @@ -456,12 +458,21 @@ static inline void __set_pte_at(struct
>>> mm_struct *mm,
>>>          set_pte(ptep, pteval);
>>>   }
>>>   -static inline void set_pte_at(struct mm_struct *mm,
>>> -       unsigned long addr, pte_t *ptep, pte_t pteval)
>>> +static inline void set_ptes(struct mm_struct *mm, unsigned long
>>> addr,
>>> +               pte_t *ptep, pte_t pteval, unsigned int nr)
>>>   {
>>> -       page_table_check_ptes_set(mm, addr, ptep, pteval, 1);
>>> -       __set_pte_at(mm, addr, ptep, pteval);
>>> +       page_table_check_ptes_set(mm, addr, ptep, pteval, nr);
>>> +
>>> +       for (;;) {
>>> +               __set_pte_at(mm, addr, ptep, pteval);
>>> +               if (--nr == 0)
>>> +                       break;
>>> +               ptep++;
>>> +               addr += PAGE_SIZE;
>>> +               pte_val(pteval) += 1 << _PAGE_PFN_SHIFT;
>>> +       }
>>>   }
>>> +#define set_pte_at(mm, addr, ptep, pte) set_ptes(mm, addr, ptep,
>>> pte, 1)
>>>     static inline void pte_clear(struct mm_struct *mm,
>>>          unsigned long addr, pte_t *ptep)
>>> diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c
>>> index 3cc07ed45aeb..b725c3f6f57f 100644
>>> --- a/arch/riscv/mm/cacheflush.c
>>> +++ b/arch/riscv/mm/cacheflush.c
>>> @@ -81,16 +81,9 @@ void flush_icache_mm(struct mm_struct *mm, bool
>>> local)
>>>   #ifdef CONFIG_MMU
>>>   void flush_icache_pte(pte_t pte)
>>>   {
>>> -       struct page *page = pte_page(pte);
>>> +       struct folio *folio = page_folio(pte_page(pte));
>>>   -       /*
>>> -        * HugeTLB pages are always fully mapped, so only setting
>>> head page's
>>> -        * PG_dcache_clean flag is enough.
>>> -        */
>>> -       if (PageHuge(page))
>>> -               page = compound_head(page);
>>> -
>>> -       if (!test_and_set_bit(PG_dcache_clean, &page->flags))
>>> +       if (!test_and_set_bit(PG_dcache_clean, &folio->flags))
>>>                  flush_icache_all();
>>>   }
>>>   #endif /* CONFIG_MMU */
diff mbox series

Patch

diff --git a/arch/riscv/include/asm/cacheflush.h b/arch/riscv/include/asm/cacheflush.h
index 03e3b95ae6da..10e5e96f09b5 100644
--- a/arch/riscv/include/asm/cacheflush.h
+++ b/arch/riscv/include/asm/cacheflush.h
@@ -15,20 +15,19 @@  static inline void local_flush_icache_all(void)
 
 #define PG_dcache_clean PG_arch_1
 
-static inline void flush_dcache_page(struct page *page)
+static inline void flush_dcache_folio(struct folio *folio)
 {
-	/*
-	 * HugeTLB pages are always fully mapped and only head page will be
-	 * set PG_dcache_clean (see comments in flush_icache_pte()).
-	 */
-	if (PageHuge(page))
-		page = compound_head(page);
-
-	if (test_bit(PG_dcache_clean, &page->flags))
-		clear_bit(PG_dcache_clean, &page->flags);
+	if (test_bit(PG_dcache_clean, &folio->flags))
+		clear_bit(PG_dcache_clean, &folio->flags);
 }
+#define flush_dcache_folio flush_dcache_folio
 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
 
+static inline void flush_dcache_page(struct page *page)
+{
+	flush_dcache_folio(page_folio(page));
+}
+
 /*
  * RISC-V doesn't have an instruction to flush parts of the instruction cache,
  * so instead we just flush the whole thing.
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 13222fd5c4b4..03706c833e70 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -405,8 +405,8 @@  static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 
 
 /* Commit new configuration to MMU hardware */
-static inline void update_mmu_cache(struct vm_area_struct *vma,
-	unsigned long address, pte_t *ptep)
+static inline void update_mmu_cache_range(struct vm_area_struct *vma,
+		unsigned long address, pte_t *ptep, unsigned int nr)
 {
 	/*
 	 * The kernel assumes that TLBs don't cache invalid entries, but
@@ -415,8 +415,10 @@  static inline void update_mmu_cache(struct vm_area_struct *vma,
 	 * Relying on flush_tlb_fix_spurious_fault would suffice, but
 	 * the extra traps reduce performance.  So, eagerly SFENCE.VMA.
 	 */
-	flush_tlb_page(vma, address);
+	flush_tlb_range(vma, address, address + nr * PAGE_SIZE);
 }
+#define update_mmu_cache(vma, addr, ptep) \
+	update_mmu_cache_range(vma, addr, ptep, 1)
 
 #define __HAVE_ARCH_UPDATE_MMU_TLB
 #define update_mmu_tlb update_mmu_cache
@@ -456,12 +458,21 @@  static inline void __set_pte_at(struct mm_struct *mm,
 	set_pte(ptep, pteval);
 }
 
-static inline void set_pte_at(struct mm_struct *mm,
-	unsigned long addr, pte_t *ptep, pte_t pteval)
+static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
+		pte_t *ptep, pte_t pteval, unsigned int nr)
 {
-	page_table_check_ptes_set(mm, addr, ptep, pteval, 1);
-	__set_pte_at(mm, addr, ptep, pteval);
+	page_table_check_ptes_set(mm, addr, ptep, pteval, nr);
+
+	for (;;) {
+		__set_pte_at(mm, addr, ptep, pteval);
+		if (--nr == 0)
+			break;
+		ptep++;
+		addr += PAGE_SIZE;
+		pte_val(pteval) += 1 << _PAGE_PFN_SHIFT;
+	}
 }
+#define set_pte_at(mm, addr, ptep, pte) set_ptes(mm, addr, ptep, pte, 1)
 
 static inline void pte_clear(struct mm_struct *mm,
 	unsigned long addr, pte_t *ptep)
diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c
index 3cc07ed45aeb..b725c3f6f57f 100644
--- a/arch/riscv/mm/cacheflush.c
+++ b/arch/riscv/mm/cacheflush.c
@@ -81,16 +81,9 @@  void flush_icache_mm(struct mm_struct *mm, bool local)
 #ifdef CONFIG_MMU
 void flush_icache_pte(pte_t pte)
 {
-	struct page *page = pte_page(pte);
+	struct folio *folio = page_folio(pte_page(pte));
 
-	/*
-	 * HugeTLB pages are always fully mapped, so only setting head page's
-	 * PG_dcache_clean flag is enough.
-	 */
-	if (PageHuge(page))
-		page = compound_head(page);
-
-	if (!test_and_set_bit(PG_dcache_clean, &page->flags))
+	if (!test_and_set_bit(PG_dcache_clean, &folio->flags))
 		flush_icache_all();
 }
 #endif /* CONFIG_MMU */