@@ -33,8 +33,6 @@ static inline int init_new_context(struct task_struct *tsk,
return 0;
}
-DECLARE_STATIC_KEY_FALSE(use_asid_allocator);
-
#include <asm-generic/mmu_context.h>
#endif /* _ASM_RISCV_MMU_CONTEXT_H */
@@ -18,8 +18,7 @@
#ifdef CONFIG_MMU
-DEFINE_STATIC_KEY_FALSE(use_asid_allocator);
-
+static DEFINE_STATIC_KEY_FALSE(use_asid_allocator);
static unsigned long num_asids;
static atomic_long_t current_version;
@@ -90,8 +90,7 @@ static void __flush_tlb_range(struct mm_struct *mm, unsigned long start,
/* check if the tlbflush needs to be sent to other CPUs */
broadcast = cpumask_any_but(cmask, cpuid) < nr_cpu_ids;
- if (static_branch_unlikely(&use_asid_allocator))
- asid = cntx2asid(atomic_long_read(&mm->context.id));
+ asid = cntx2asid(atomic_long_read(&mm->context.id));
} else {
cmask = cpu_online_mask;
broadcast = true;
@@ -122,7 +121,7 @@ static void __flush_tlb_range(struct mm_struct *mm, unsigned long start,
{
unsigned long asid = FLUSH_TLB_NO_ASID;
- if (mm && static_branch_unlikely(&use_asid_allocator))
+ if (mm)
asid = cntx2asid(atomic_long_read(&mm->context.id));
local_flush_tlb_range_asid(start, size, stride, asid);
Even if multiple ASIDs are not supported, using the single-ASID variant of the sfence.vma instruction preserves TLB entries for global (kernel) pages. So it is always most efficient to use the single-ASID code path. Signed-off-by: Samuel Holland <samuel.holland@sifive.com> --- Changes in v2: - Update both copies of __flush_tlb_range() arch/riscv/include/asm/mmu_context.h | 2 -- arch/riscv/mm/context.c | 3 +-- arch/riscv/mm/tlbflush.c | 5 ++--- 3 files changed, 3 insertions(+), 7 deletions(-)