@@ -40,6 +40,7 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end);
+void flush_tlb_kernel_range(unsigned long start, unsigned long end);
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
@@ -56,15 +57,15 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
local_flush_tlb_all();
}
-#define flush_tlb_mm(mm) flush_tlb_all()
-#define flush_tlb_mm_range(mm, start, end, page_size) flush_tlb_all()
-#endif /* !CONFIG_SMP || !CONFIG_MMU */
-
/* Flush a range of kernel pages */
static inline void flush_tlb_kernel_range(unsigned long start,
unsigned long end)
{
- flush_tlb_all();
+ local_flush_tlb_all();
}
+#define flush_tlb_mm(mm) flush_tlb_all()
+#define flush_tlb_mm_range(mm, start, end, page_size) flush_tlb_all()
+#endif /* !CONFIG_SMP || !CONFIG_MMU */
+
#endif /* _ASM_RISCV_TLBFLUSH_H */
@@ -97,20 +97,27 @@ static void __flush_tlb_range(struct mm_struct *mm, unsigned long start,
unsigned long size, unsigned long stride)
{
struct flush_tlb_range_data ftd;
- struct cpumask *cmask = mm_cpumask(mm);
+ const struct cpumask *cmask;
unsigned long asid = FLUSH_TLB_NO_ASID;
- unsigned int cpuid;
bool broadcast;
- if (cpumask_empty(cmask))
- return;
+ if (mm) {
+ unsigned int cpuid;
+
+ cmask = mm_cpumask(mm);
+ if (cpumask_empty(cmask))
+ return;
- cpuid = get_cpu();
- /* check if the tlbflush needs to be sent to other CPUs */
- broadcast = cpumask_any_but(cmask, cpuid) < nr_cpu_ids;
+ cpuid = get_cpu();
+ /* check if the tlbflush needs to be sent to other CPUs */
+ broadcast = cpumask_any_but(cmask, cpuid) < nr_cpu_ids;
- if (static_branch_unlikely(&use_asid_allocator))
- asid = atomic_long_read(&mm->context.id) & asid_mask;
+ if (static_branch_unlikely(&use_asid_allocator))
+ asid = atomic_long_read(&mm->context.id) & asid_mask;
+ } else {
+ cmask = cpu_online_mask;
+ broadcast = true;
+ }
if (broadcast) {
if (riscv_use_ipi_for_rfence()) {
@@ -128,7 +135,8 @@ static void __flush_tlb_range(struct mm_struct *mm, unsigned long start,
local_flush_tlb_range_asid(start, size, stride, asid);
}
- put_cpu();
+ if (mm)
+ put_cpu();
}
void flush_tlb_mm(struct mm_struct *mm)
@@ -179,6 +187,12 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
__flush_tlb_range(vma->vm_mm, start, end - start, stride_size);
}
+
+void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+ __flush_tlb_range(NULL, start, end - start, PAGE_SIZE);
+}
+
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)