@@ -231,6 +231,13 @@ config RISCV_DMA_NONCOHERENT
select ARCH_HAS_SETUP_DMA_OPS
select DMA_DIRECT_REMAP
+config RISCV_NONSTANDARD_CACHE_OPS
+ bool
+ depends on RISCV_DMA_NONCOHERENT
+ help
+ This enables function pointer support for non-standard noncoherent
+ systems to handle cache management.
+
config AS_HAS_INSN
def_bool $(as-instr,.insn r 51$(comma) 0$(comma) 0$(comma) t0$(comma) t0$(comma) zero)
new file mode 100644
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2023 Renesas Electronics Corp.
+ */
+
+#ifndef __ASM_DMA_NONCOHERENT_H
+#define __ASM_DMA_NONCOHERENT_H
+
+#include <linux/dma-direct.h>
+
+/*
+ * struct riscv_nonstd_cache_ops - Structure for non-standard CMO function pointers
+ *
+ * @wback: Function pointer for cache writeback
+ * @inv: Function pointer for invalidating cache
+ * @wback_inv: Function pointer for flushing the cache (writeback + invalidating)
+ */
+struct riscv_nonstd_cache_ops {
+ void (*wback)(phys_addr_t paddr, size_t size);
+ void (*inv)(phys_addr_t paddr, size_t size);
+ void (*wback_inv)(phys_addr_t paddr, size_t size);
+};
+
+extern struct riscv_nonstd_cache_ops noncoherent_cache_ops;
+
+void riscv_noncoherent_register_cache_ops(const struct riscv_nonstd_cache_ops *ops);
+
+#endif /* __ASM_DMA_NONCOHERENT_H */
@@ -9,13 +9,26 @@
#include <linux/dma-map-ops.h>
#include <linux/mm.h>
#include <asm/cacheflush.h>
+#include <asm/dma-noncoherent.h>
static bool noncoherent_supported __ro_after_init;
+struct riscv_nonstd_cache_ops noncoherent_cache_ops __ro_after_init = {
+ .wback = NULL,
+ .inv = NULL,
+ .wback_inv = NULL,
+};
+
static inline void arch_dma_cache_wback(phys_addr_t paddr, size_t size)
{
void *vaddr = phys_to_virt(paddr);
+#ifdef CONFIG_RISCV_NONSTANDARD_CACHE_OPS
+ if (unlikely(noncoherent_cache_ops.wback)) {
+ noncoherent_cache_ops.wback(paddr, size);
+ return;
+ }
+#endif
ALT_CMO_OP(clean, vaddr, size, riscv_cbom_block_size);
}
@@ -23,6 +36,13 @@ static inline void arch_dma_cache_inv(phys_addr_t paddr, size_t size)
{
void *vaddr = phys_to_virt(paddr);
+#ifdef CONFIG_RISCV_NONSTANDARD_CACHE_OPS
+ if (unlikely(noncoherent_cache_ops.inv)) {
+ noncoherent_cache_ops.inv(paddr, size);
+ return;
+ }
+#endif
+
ALT_CMO_OP(inval, vaddr, size, riscv_cbom_block_size);
}
@@ -30,6 +50,13 @@ static inline void arch_dma_cache_wback_inv(phys_addr_t paddr, size_t size)
{
void *vaddr = phys_to_virt(paddr);
+#ifdef CONFIG_RISCV_NONSTANDARD_CACHE_OPS
+ if (unlikely(noncoherent_cache_ops.wback_inv)) {
+ noncoherent_cache_ops.wback_inv(paddr, size);
+ return;
+ }
+#endif
+
ALT_CMO_OP(flush, vaddr, size, riscv_cbom_block_size);
}
@@ -95,6 +122,13 @@ void arch_dma_prep_coherent(struct page *page, size_t size)
{
void *flush_addr = page_address(page);
+#ifdef CONFIG_RISCV_NONSTANDARD_CACHE_OPS
+ if (unlikely(noncoherent_cache_ops.wback_inv)) {
+ noncoherent_cache_ops.wback_inv(page_to_phys(page), size);
+ return;
+ }
+#endif
+
ALT_CMO_OP(flush, flush_addr, size, riscv_cbom_block_size);
}
@@ -120,3 +154,12 @@ void riscv_noncoherent_supported(void)
"Non-coherent DMA support enabled without a block size\n");
noncoherent_supported = true;
}
+
+void riscv_noncoherent_register_cache_ops(const struct riscv_nonstd_cache_ops *ops)
+{
+ if (!ops)
+ return;
+
+ noncoherent_cache_ops = *ops;
+}
+EXPORT_SYMBOL_GPL(riscv_noncoherent_register_cache_ops);