diff mbox series

[v4,12/12] riscv: mm: Always use an ASID to flush mm contexts

Message ID 20240102220134.3229156-13-samuel.holland@sifive.com (mailing list archive)
State Superseded
Headers show
Series riscv: ASID-related and UP-related TLB flush enhancements | expand

Checks

Context Check Description
conchuod/vmtest-for-next-PR success PR summary
conchuod/vmtest-fixes-PR success PR summary
conchuod/patch-12-test-1 success .github/scripts/patches/tests/build_rv32_defconfig.sh
conchuod/patch-12-test-2 success .github/scripts/patches/tests/build_rv64_clang_allmodconfig.sh
conchuod/patch-12-test-3 success .github/scripts/patches/tests/build_rv64_gcc_allmodconfig.sh
conchuod/patch-12-test-4 success .github/scripts/patches/tests/build_rv64_nommu_k210_defconfig.sh
conchuod/patch-12-test-5 success .github/scripts/patches/tests/build_rv64_nommu_virt_defconfig.sh
conchuod/patch-12-test-6 success .github/scripts/patches/tests/checkpatch.sh
conchuod/patch-12-test-7 success .github/scripts/patches/tests/dtb_warn_rv64.sh
conchuod/patch-12-test-8 success .github/scripts/patches/tests/header_inline.sh
conchuod/patch-12-test-9 success .github/scripts/patches/tests/kdoc.sh
conchuod/patch-12-test-10 success .github/scripts/patches/tests/module_param.sh
conchuod/patch-12-test-11 success .github/scripts/patches/tests/verify_fixes.sh
conchuod/patch-12-test-12 success .github/scripts/patches/tests/verify_signedoff.sh

Commit Message

Samuel Holland Jan. 2, 2024, 10 p.m. UTC
Even if multiple ASIDs are not supported, using the single-ASID variant
of the sfence.vma instruction preserves TLB entries for global (kernel)
pages. So it is always more efficient to use the single-ASID code path.

Signed-off-by: Samuel Holland <samuel.holland@sifive.com>
---

Changes in v4:
 - There is now only one copy of __flush_tlb_range()

Changes in v2:
 - Update both copies of __flush_tlb_range()

 arch/riscv/include/asm/mmu_context.h | 2 --
 arch/riscv/mm/context.c              | 3 +--
 arch/riscv/mm/tlbflush.c             | 3 +--
 3 files changed, 2 insertions(+), 6 deletions(-)

Comments

Jisheng Zhang Jan. 3, 2024, 3:02 p.m. UTC | #1
On Tue, Jan 02, 2024 at 02:00:49PM -0800, Samuel Holland wrote:
> Even if multiple ASIDs are not supported, using the single-ASID variant
> of the sfence.vma instruction preserves TLB entries for global (kernel)
> pages. So it is always more efficient to use the single-ASID code path.
> 
> Signed-off-by: Samuel Holland <samuel.holland@sifive.com>
> ---
> 
> Changes in v4:
>  - There is now only one copy of __flush_tlb_range()
> 
> Changes in v2:
>  - Update both copies of __flush_tlb_range()
> 
>  arch/riscv/include/asm/mmu_context.h | 2 --
>  arch/riscv/mm/context.c              | 3 +--
>  arch/riscv/mm/tlbflush.c             | 3 +--
>  3 files changed, 2 insertions(+), 6 deletions(-)
> 
> diff --git a/arch/riscv/include/asm/mmu_context.h b/arch/riscv/include/asm/mmu_context.h
> index 7030837adc1a..b0659413a080 100644
> --- a/arch/riscv/include/asm/mmu_context.h
> +++ b/arch/riscv/include/asm/mmu_context.h
> @@ -33,8 +33,6 @@ static inline int init_new_context(struct task_struct *tsk,
>  	return 0;
>  }
>  
> -DECLARE_STATIC_KEY_FALSE(use_asid_allocator);
> -
>  #include <asm-generic/mmu_context.h>
>  
>  #endif /* _ASM_RISCV_MMU_CONTEXT_H */
> diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c
> index 3ca9b653df7d..20057085ab8a 100644
> --- a/arch/riscv/mm/context.c
> +++ b/arch/riscv/mm/context.c
> @@ -18,8 +18,7 @@
>  
>  #ifdef CONFIG_MMU
>  
> -DEFINE_STATIC_KEY_FALSE(use_asid_allocator);
> -
> +static DEFINE_STATIC_KEY_FALSE(use_asid_allocator);

One of my optimization "riscv: tlb: avoid tlb flushing if fullmm == 1"
will make use of use_asid_allocator, so could we remove this modification?

>  static unsigned long num_asids;
>  
>  static atomic_long_t current_version;
> diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
> index 5ec621545c69..39d80f56d292 100644
> --- a/arch/riscv/mm/tlbflush.c
> +++ b/arch/riscv/mm/tlbflush.c
> @@ -84,8 +84,7 @@ static void __flush_tlb_range(struct mm_struct *mm, unsigned long start,
>  		if (cpumask_empty(cmask))
>  			return;
>  
> -		if (static_branch_unlikely(&use_asid_allocator))
> -			asid = cntx2asid(atomic_long_read(&mm->context.id));
> +		asid = cntx2asid(atomic_long_read(&mm->context.id));
>  	} else {
>  		cmask = cpu_online_mask;
>  	}
> -- 
> 2.42.0
> 
> 
> _______________________________________________
> linux-riscv mailing list
> linux-riscv@lists.infradead.org
> http://lists.infradead.org/mailman/listinfo/linux-riscv
Alexandre Ghiti Jan. 4, 2024, 1:01 p.m. UTC | #2
On Tue, Jan 2, 2024 at 11:01 PM Samuel Holland
<samuel.holland@sifive.com> wrote:
>
> Even if multiple ASIDs are not supported, using the single-ASID variant
> of the sfence.vma instruction preserves TLB entries for global (kernel)
> pages. So it is always more efficient to use the single-ASID code path.
>
> Signed-off-by: Samuel Holland <samuel.holland@sifive.com>
> ---
>
> Changes in v4:
>  - There is now only one copy of __flush_tlb_range()
>
> Changes in v2:
>  - Update both copies of __flush_tlb_range()
>
>  arch/riscv/include/asm/mmu_context.h | 2 --
>  arch/riscv/mm/context.c              | 3 +--
>  arch/riscv/mm/tlbflush.c             | 3 +--
>  3 files changed, 2 insertions(+), 6 deletions(-)
>
> diff --git a/arch/riscv/include/asm/mmu_context.h b/arch/riscv/include/asm/mmu_context.h
> index 7030837adc1a..b0659413a080 100644
> --- a/arch/riscv/include/asm/mmu_context.h
> +++ b/arch/riscv/include/asm/mmu_context.h
> @@ -33,8 +33,6 @@ static inline int init_new_context(struct task_struct *tsk,
>         return 0;
>  }
>
> -DECLARE_STATIC_KEY_FALSE(use_asid_allocator);
> -
>  #include <asm-generic/mmu_context.h>
>
>  #endif /* _ASM_RISCV_MMU_CONTEXT_H */
> diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c
> index 3ca9b653df7d..20057085ab8a 100644
> --- a/arch/riscv/mm/context.c
> +++ b/arch/riscv/mm/context.c
> @@ -18,8 +18,7 @@
>
>  #ifdef CONFIG_MMU
>
> -DEFINE_STATIC_KEY_FALSE(use_asid_allocator);
> -
> +static DEFINE_STATIC_KEY_FALSE(use_asid_allocator);
>  static unsigned long num_asids;
>
>  static atomic_long_t current_version;
> diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
> index 5ec621545c69..39d80f56d292 100644
> --- a/arch/riscv/mm/tlbflush.c
> +++ b/arch/riscv/mm/tlbflush.c
> @@ -84,8 +84,7 @@ static void __flush_tlb_range(struct mm_struct *mm, unsigned long start,
>                 if (cpumask_empty(cmask))
>                         return;
>
> -               if (static_branch_unlikely(&use_asid_allocator))
> -                       asid = cntx2asid(atomic_long_read(&mm->context.id));
> +               asid = cntx2asid(atomic_long_read(&mm->context.id));
>         } else {
>                 cmask = cpu_online_mask;
>         }
> --
> 2.42.0
>

You can add:

Reviewed-by: Alexandre Ghiti <alexghiti@rivosinc.com>

Thanks!

Alex
Samuel Holland Jan. 4, 2024, 3:50 p.m. UTC | #3
On 2024-01-03 9:02 AM, Jisheng Zhang wrote:
> On Tue, Jan 02, 2024 at 02:00:49PM -0800, Samuel Holland wrote:
>> Even if multiple ASIDs are not supported, using the single-ASID variant
>> of the sfence.vma instruction preserves TLB entries for global (kernel)
>> pages. So it is always more efficient to use the single-ASID code path.
>>
>> Signed-off-by: Samuel Holland <samuel.holland@sifive.com>
>> ---
>>
>> Changes in v4:
>>  - There is now only one copy of __flush_tlb_range()
>>
>> Changes in v2:
>>  - Update both copies of __flush_tlb_range()
>>
>>  arch/riscv/include/asm/mmu_context.h | 2 --
>>  arch/riscv/mm/context.c              | 3 +--
>>  arch/riscv/mm/tlbflush.c             | 3 +--
>>  3 files changed, 2 insertions(+), 6 deletions(-)
>>
>> diff --git a/arch/riscv/include/asm/mmu_context.h b/arch/riscv/include/asm/mmu_context.h
>> index 7030837adc1a..b0659413a080 100644
>> --- a/arch/riscv/include/asm/mmu_context.h
>> +++ b/arch/riscv/include/asm/mmu_context.h
>> @@ -33,8 +33,6 @@ static inline int init_new_context(struct task_struct *tsk,
>>  	return 0;
>>  }
>>  
>> -DECLARE_STATIC_KEY_FALSE(use_asid_allocator);
>> -
>>  #include <asm-generic/mmu_context.h>
>>  
>>  #endif /* _ASM_RISCV_MMU_CONTEXT_H */
>> diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c
>> index 3ca9b653df7d..20057085ab8a 100644
>> --- a/arch/riscv/mm/context.c
>> +++ b/arch/riscv/mm/context.c
>> @@ -18,8 +18,7 @@
>>  
>>  #ifdef CONFIG_MMU
>>  
>> -DEFINE_STATIC_KEY_FALSE(use_asid_allocator);
>> -
>> +static DEFINE_STATIC_KEY_FALSE(use_asid_allocator);
> 
> One of my optimization "riscv: tlb: avoid tlb flushing if fullmm == 1"
> will make use of use_asid_allocator, so could we remove this modification?

Yes, I can leave the global declaration alone for now.

>>  static unsigned long num_asids;
>>  
>>  static atomic_long_t current_version;
>> diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
>> index 5ec621545c69..39d80f56d292 100644
>> --- a/arch/riscv/mm/tlbflush.c
>> +++ b/arch/riscv/mm/tlbflush.c
>> @@ -84,8 +84,7 @@ static void __flush_tlb_range(struct mm_struct *mm, unsigned long start,
>>  		if (cpumask_empty(cmask))
>>  			return;
>>  
>> -		if (static_branch_unlikely(&use_asid_allocator))
>> -			asid = cntx2asid(atomic_long_read(&mm->context.id));
>> +		asid = cntx2asid(atomic_long_read(&mm->context.id));
>>  	} else {
>>  		cmask = cpu_online_mask;
>>  	}
>> -- 
>> 2.42.0
>>
>>
>> _______________________________________________
>> linux-riscv mailing list
>> linux-riscv@lists.infradead.org
>> http://lists.infradead.org/mailman/listinfo/linux-riscv
diff mbox series

Patch

diff --git a/arch/riscv/include/asm/mmu_context.h b/arch/riscv/include/asm/mmu_context.h
index 7030837adc1a..b0659413a080 100644
--- a/arch/riscv/include/asm/mmu_context.h
+++ b/arch/riscv/include/asm/mmu_context.h
@@ -33,8 +33,6 @@  static inline int init_new_context(struct task_struct *tsk,
 	return 0;
 }
 
-DECLARE_STATIC_KEY_FALSE(use_asid_allocator);
-
 #include <asm-generic/mmu_context.h>
 
 #endif /* _ASM_RISCV_MMU_CONTEXT_H */
diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c
index 3ca9b653df7d..20057085ab8a 100644
--- a/arch/riscv/mm/context.c
+++ b/arch/riscv/mm/context.c
@@ -18,8 +18,7 @@ 
 
 #ifdef CONFIG_MMU
 
-DEFINE_STATIC_KEY_FALSE(use_asid_allocator);
-
+static DEFINE_STATIC_KEY_FALSE(use_asid_allocator);
 static unsigned long num_asids;
 
 static atomic_long_t current_version;
diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
index 5ec621545c69..39d80f56d292 100644
--- a/arch/riscv/mm/tlbflush.c
+++ b/arch/riscv/mm/tlbflush.c
@@ -84,8 +84,7 @@  static void __flush_tlb_range(struct mm_struct *mm, unsigned long start,
 		if (cpumask_empty(cmask))
 			return;
 
-		if (static_branch_unlikely(&use_asid_allocator))
-			asid = cntx2asid(atomic_long_read(&mm->context.id));
+		asid = cntx2asid(atomic_long_read(&mm->context.id));
 	} else {
 		cmask = cpu_online_mask;
 	}