@@ -80,54 +80,7 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
#endif
/*
- * Unordered I/O memory access primitives. These are even more relaxed than
- * the relaxed versions, as they don't even order accesses between successive
- * operations to the I/O regions.
- */
-#define readb_cpu(c) ({ u8 __r = __raw_readb(c); __r; })
-#define readw_cpu(c) ({ u16 __r = le16_to_cpu((__force __le16)__raw_readw(c)); __r; })
-#define readl_cpu(c) ({ u32 __r = le32_to_cpu((__force __le32)__raw_readl(c)); __r; })
-
-#define writeb_cpu(v, c) ((void)__raw_writeb((v), (c)))
-#define writew_cpu(v, c) ((void)__raw_writew((__force u16)cpu_to_le16(v), (c)))
-#define writel_cpu(v, c) ((void)__raw_writel((__force u32)cpu_to_le32(v), (c)))
-
-#ifdef CONFIG_64BIT
-#define readq_cpu(c) ({ u64 __r = le64_to_cpu((__force __le64)__raw_readq(c)); __r; })
-#define writeq_cpu(v, c) ((void)__raw_writeq((__force u64)cpu_to_le64(v), (c)))
-#endif
-
-/*
- * Relaxed I/O memory access primitives. These follow the Device memory
- * ordering rules but do not guarantee any ordering relative to Normal memory
- * accesses. These are defined to order the indicated access (either a read or
- * write) with all other I/O memory accesses to the same peripheral. Since the
- * platform specification defines that all I/O regions are strongly ordered on
- * channel 0, no explicit fences are required to enforce this ordering.
- */
-/* FIXME: These are now the same as asm-generic */
-#define __io_rbr() do {} while (0)
-#define __io_rar() do {} while (0)
-#define __io_rbw() do {} while (0)
-#define __io_raw() do {} while (0)
-
-#define readb_relaxed(c) ({ u8 __v; __io_rbr(); __v = readb_cpu(c); __io_rar(); __v; })
-#define readw_relaxed(c) ({ u16 __v; __io_rbr(); __v = readw_cpu(c); __io_rar(); __v; })
-#define readl_relaxed(c) ({ u32 __v; __io_rbr(); __v = readl_cpu(c); __io_rar(); __v; })
-
-#define writeb_relaxed(v, c) ({ __io_rbw(); writeb_cpu((v), (c)); __io_raw(); })
-#define writew_relaxed(v, c) ({ __io_rbw(); writew_cpu((v), (c)); __io_raw(); })
-#define writel_relaxed(v, c) ({ __io_rbw(); writel_cpu((v), (c)); __io_raw(); })
-
-#ifdef CONFIG_64BIT
-#define readq_relaxed(c) ({ u64 __v; __io_rbr(); __v = readq_cpu(c); __io_rar(); __v; })
-#define writeq_relaxed(v, c) ({ __io_rbw(); writeq_cpu((v), (c)); __io_raw(); })
-#endif
-
-/*
- * I/O memory access primitives. Reads are ordered relative to any following
- * Normal memory read and delay() loop. Writes are ordered relative to any
- * prior Normal memory write. The memory barriers here are necessary as RISC-V
+ * I/O barriers. The memory barriers here are necessary as RISC-V
* doesn't define any ordering between the memory space and the I/O space.
*/
#define __io_br() do {} while (0)
@@ -135,17 +88,4 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
#define __io_bw() ({ __asm__ __volatile__ ("fence w,o" : : : "memory"); })
#define __io_aw() mmiowb_set_pending()
-#define readb(c) ({ u8 __v; __io_br(); __v = readb_cpu(c); __io_ar(__v); __v; })
-#define readw(c) ({ u16 __v; __io_br(); __v = readw_cpu(c); __io_ar(__v); __v; })
-#define readl(c) ({ u32 __v; __io_br(); __v = readl_cpu(c); __io_ar(__v); __v; })
-
-#define writeb(v, c) ({ __io_bw(); writeb_cpu((v), (c)); __io_aw(); })
-#define writew(v, c) ({ __io_bw(); writew_cpu((v), (c)); __io_aw(); })
-#define writel(v, c) ({ __io_bw(); writel_cpu((v), (c)); __io_aw(); })
-
-#ifdef CONFIG_64BIT
-#define readq(c) ({ u64 __v; __io_br(); __v = readq_cpu(c); __io_ar(__v); __v; })
-#define writeq(v, c) ({ __io_bw(); writeq_cpu((v), (c)); __io_aw(); })
-#endif
-
#endif /* _ASM_RISCV_MMIO_H */
The asm-generic implementation is functionally identical to the riscv version. Signed-off-by: Jisheng Zhang <jszhang@kernel.org> --- arch/riscv/include/asm/mmio.h | 62 +---------------------------------- 1 file changed, 1 insertion(+), 61 deletions(-)