@@ -94,5 +94,8 @@ static inline void sbi_remote_sfence_vma_asid(const unsigned long *hart_mask,
{
SBI_CALL_4(SBI_REMOTE_SFENCE_VMA_ASID, hart_mask, start, size, asid);
}
+#else /* CONFIG_RISCV_SBI */
+/* stub to for code is only reachable under IS_ENABLED(CONFIG_RISCV_SBI): */
+void sbi_remote_fence_i(const unsigned long *hart_mask);
#endif /* CONFIG_RISCV_SBI */
#endif /* _ASM_RISCV_SBI_H */
@@ -10,9 +10,17 @@
#include <asm/sbi.h>
+static void ipi_remote_fence_i(void *info)
+{
+ return local_flush_icache_all();
+}
+
void flush_icache_all(void)
{
- sbi_remote_fence_i(NULL);
+ if (IS_ENABLED(CONFIG_RISCV_SBI))
+ sbi_remote_fence_i(NULL);
+ else
+ on_each_cpu(ipi_remote_fence_i, NULL, 1);
}
/*
@@ -28,7 +36,7 @@ void flush_icache_all(void)
void flush_icache_mm(struct mm_struct *mm, bool local)
{
unsigned int cpu;
- cpumask_t others, hmask, *mask;
+ cpumask_t others, *mask;
preempt_disable();
@@ -46,10 +54,7 @@ void flush_icache_mm(struct mm_struct *mm, bool local)
*/
cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
local |= cpumask_empty(&others);
- if (mm != current->active_mm || !local) {
- riscv_cpuid_to_hartid_mask(&others, &hmask);
- sbi_remote_fence_i(hmask.bits);
- } else {
+ if (mm == current->active_mm && local) {
/*
* It's assumed that at least one strongly ordered operation is
* performed on this hart between setting a hart's cpumask bit
@@ -59,6 +64,13 @@ void flush_icache_mm(struct mm_struct *mm, bool local)
* with flush_icache_deferred().
*/
smp_mb();
+ } else if (IS_ENABLED(CONFIG_RISCV_SBI)) {
+ cpumask_t hartid_mask;
+
+ riscv_cpuid_to_hartid_mask(&others, &hartid_mask);
+ sbi_remote_fence_i(cpumask_bits(&hartid_mask));
+ } else {
+ on_each_cpu_mask(&others, ipi_remote_fence_i, NULL, 1);
}
preempt_enable();