@@ -19,6 +19,7 @@ extern void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
extern void iounmap(const void __iomem *addr);
#define ioremap_nocache(phy, sz) ioremap(phy, sz)
+#define ioremap_cache(phy, sz) ioremap(phy, sz)
#define ioremap_wc(phy, sz) ioremap(phy, sz)
#define ioremap_wt(phy, sz) ioremap(phy, sz)
@@ -23,6 +23,8 @@
#ifdef __KERNEL__
+#define ARCH_HAS_IOREMAP_CACHE
+
#include <linux/types.h>
#include <linux/blk_types.h>
#include <asm/byteorder.h>
@@ -21,6 +21,8 @@
#ifdef __KERNEL__
+#define ARCH_HAS_IOREMAP_CACHE
+
#include <linux/types.h>
#include <linux/blk_types.h>
@@ -171,6 +173,7 @@ extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size);
#define ioremap_nocache(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
#define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC))
#define ioremap_wt(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
+#define ioremap_cache ioremap_cache
#define iounmap __iounmap
/*
@@ -297,6 +297,7 @@ extern void __iounmap(void __iomem *addr);
#define ioremap_wc ioremap_nocache
#define ioremap_wt ioremap_nocache
+#define ioremap_cache ioremap_nocache
#define cached(addr) P1SEGADDR(addr)
#define uncached(addr) P2SEGADDR(addr)
@@ -18,6 +18,7 @@
#ifdef __KERNEL__
#define ARCH_HAS_IOREMAP_WT
+#define ARCH_HAS_IOREMAP_CACHE
#include <linux/types.h>
#include <asm/virtconvert.h>
@@ -277,6 +278,11 @@ static inline void __iomem *ioremap_fullcache(unsigned long physaddr, unsigned l
return __ioremap(physaddr, size, IOMAP_FULL_CACHING);
}
+static inline void __iomem *ioremap_cache(unsigned long physaddr, unsigned long size)
+{
+ return __ioremap(physaddr, size, IOMAP_FULL_CACHING);
+}
+
#define ioremap_wc ioremap_nocache
extern void iounmap(void volatile __iomem *addr);
@@ -431,11 +431,6 @@ extern void __iomem * early_ioremap (unsigned long phys_addr, unsigned long size
#define early_memremap(phys_addr, size) early_ioremap(phys_addr, size)
extern void early_iounmap (volatile void __iomem *addr, unsigned long size);
#define early_memunmap(addr, size) early_iounmap(addr, size)
-static inline void __iomem * ioremap_cache (unsigned long phys_addr, unsigned long size)
-{
- return ioremap(phys_addr, size);
-}
-
/*
* String version of IO memory access ops:
@@ -67,6 +67,7 @@ static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
extern void iounmap(volatile void __iomem *addr);
#define ioremap_nocache(off,size) ioremap(off,size)
+#define ioremap_cache ioremap_nocache
#define ioremap_wc ioremap_nocache
#define ioremap_wt ioremap_nocache
@@ -21,6 +21,7 @@
#ifdef __KERNEL__
#define ARCH_HAS_IOREMAP_WT
+#define ARCH_HAS_IOREMAP_CACHE
#include <linux/compiler.h>
#include <asm/raw_io.h>
@@ -478,6 +479,12 @@ static inline void __iomem *ioremap_fullcache(unsigned long physaddr,
return __ioremap(physaddr, size, IOMAP_FULL_CACHING);
}
+static inline void __iomem *ioremap_cache(unsigned long physaddr,
+ unsigned long size)
+{
+ return __ioremap(physaddr, size, IOMAP_FULL_CACHING);
+}
+
static inline void memset_io(volatile void __iomem *addr, unsigned char val, int count)
{
__builtin_memset((void __force *) addr, val, count);
@@ -4,6 +4,7 @@
#ifdef __KERNEL__
#define ARCH_HAS_IOREMAP_WT
+#define ARCH_HAS_IOREMAP_CACHE
#include <asm/virtconvert.h>
#include <asm-generic/iomap.h>
@@ -163,6 +164,10 @@ static inline void *ioremap_fullcache(unsigned long physaddr, unsigned long size
{
return __ioremap(physaddr, size, IOMAP_FULL_CACHING);
}
+static inline void *ioremap_cache(unsigned long physaddr, unsigned long size)
+{
+ return __ioremap(physaddr, size, IOMAP_FULL_CACHING);
+}
#define iounmap(addr) do { } while(0)
@@ -1,6 +1,8 @@
#ifndef _ASM_METAG_IO_H
#define _ASM_METAG_IO_H
+#define ARCH_HAS_IOREMAP_CACHE
+
#include <linux/types.h>
#include <asm/pgtable-bits.h>
@@ -157,6 +159,9 @@ extern void __iounmap(void __iomem *addr);
#define ioremap_cached(offset, size) \
__ioremap((offset), (size), _PAGE_CACHEABLE)
+#define ioremap_cache(offset, size) \
+ __ioremap((offset), (size), _PAGE_CACHEABLE)
+
#define ioremap_wc(offset, size) \
__ioremap((offset), (size), _PAGE_WR_COMBINE)
@@ -43,6 +43,7 @@ extern void __iomem *ioremap(phys_addr_t address, unsigned long size);
#define ioremap_fullcache(addr, size) ioremap((addr), (size))
#define ioremap_wc(addr, size) ioremap((addr), (size))
#define ioremap_wt(addr, size) ioremap((addr), (size))
+#define ioremap_cache(addr, size) ioremap((addr), (size))
#endif /* CONFIG_MMU */
@@ -12,6 +12,8 @@
#ifndef _ASM_IO_H
#define _ASM_IO_H
+#define ARCH_HAS_IOREMAP_CACHE
+
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/types.h>
@@ -232,8 +234,10 @@ static inline void __iomem * __ioremap_mode(phys_addr_t offset, unsigned long si
* address is not guaranteed to be usable directly as a virtual
* address.
*/
-#define ioremap(offset, size) \
- __ioremap_mode((offset), (size), _CACHE_UNCACHED)
+static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
+{
+ return __ioremap_mode(offset, size, _CACHE_UNCACHED);
+}
/*
* ioremap_nocache - map bus memory into CPU space
@@ -254,8 +258,7 @@ static inline void __iomem * __ioremap_mode(phys_addr_t offset, unsigned long si
* It's useful if some control registers are in such an area and
* write combining or read caching is not desirable:
*/
-#define ioremap_nocache(offset, size) \
- __ioremap_mode((offset), (size), _CACHE_UNCACHED)
+#define ioremap_nocache ioremap
/*
* ioremap_cachable - map bus memory into CPU space
@@ -272,8 +275,14 @@ static inline void __iomem * __ioremap_mode(phys_addr_t offset, unsigned long si
* the CPU. Also enables full write-combining. Useful for some
* memory-like regions on I/O busses.
*/
+extern unsigned long _page_cachable_default;
#define ioremap_cachable(offset, size) \
__ioremap_mode((offset), (size), _page_cachable_default)
+static inline void __iomem *ioremap_cache(resource_size_t offset,
+ unsigned long size)
+{
+ return ioremap_cachable(offset, size);
+}
/*
* These two are MIPS specific ioremap variant. ioremap_cacheable_cow
@@ -283,6 +283,7 @@ static inline void __iomem *ioremap_nocache(unsigned long offset, unsigned long
#define ioremap_wc ioremap_nocache
#define ioremap_wt ioremap_nocache
+#define ioremap_cache ioremap_nocache
static inline void iounmap(void __iomem *addr)
{
@@ -47,6 +47,7 @@ static inline void iounmap(void __iomem *addr)
#define ioremap_wc ioremap_nocache
#define ioremap_wt ioremap_nocache
+#define ioremap_cache ioremap_nocache
/* Pages to physical address... */
#define page_to_phys(page) virt_to_phys(page_to_virt(page))
@@ -30,6 +30,7 @@ void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr);
#define ioremap_nocache(addr, size) ioremap(addr, size)
#define ioremap_wc ioremap_nocache
#define ioremap_wt ioremap_nocache
+#define ioremap_cache ioremap_nocache
static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
{
@@ -128,6 +128,7 @@ static inline void sbus_memcpy_toio(volatile void __iomem *dst,
*/
void __iomem *ioremap(unsigned long offset, unsigned long size);
#define ioremap_nocache(X,Y) ioremap((X),(Y))
+#define ioremap_cache(X,Y) ioremap((X),(Y))
#define ioremap_wc(X,Y) ioremap((X),(Y))
#define ioremap_wt(X,Y) ioremap((X),(Y))
void iounmap(volatile void __iomem *addr);
@@ -401,6 +401,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
}
#define ioremap_nocache ioremap
+#define ioremap_cache ioremap
#define ioremap_wc ioremap
#define ioremap_wt ioremap
@@ -53,6 +53,7 @@ extern void iounmap(volatile void __iomem *addr);
#endif
#define ioremap_nocache(physaddr, size) ioremap(physaddr, size)
+#define ioremap_cache(physaddr, size) ioremap(physaddr, size)
#define ioremap_wc(physaddr, size) ioremap(physaddr, size)
#define ioremap_wt(physaddr, size) ioremap(physaddr, size)
#define ioremap_fullcache(physaddr, size) ioremap(physaddr, size)
@@ -36,6 +36,7 @@
#define ARCH_HAS_IOREMAP_WC
#define ARCH_HAS_IOREMAP_WT
+#define ARCH_HAS_IOREMAP_CACHE
#include <linux/string.h>
#include <linux/compiler.h>
@@ -12,6 +12,9 @@
#define _XTENSA_IO_H
#ifdef __KERNEL__
+
+#define ARCH_HAS_IOREMAP_CACHE
+
#include <asm/byteorder.h>
#include <asm/page.h>
#include <asm/vectors.h>
@@ -793,6 +793,14 @@ static inline void __iomem *ioremap_wt(phys_addr_t offset, size_t size)
}
#endif
+#ifndef ioremap_cache
+#define ioremap_cache ioremap_cache
+static inline void __iomem *ioremap_cache(phys_addr_t offset, size_t size)
+{
+ return ioremap_nocache(offset, size);
+}
+#endif
+
#ifndef iounmap
#define iounmap iounmap
@@ -70,6 +70,10 @@ extern void ioport_unmap(void __iomem *);
#define ioremap_wt ioremap_nocache
#endif
+#ifndef ARCH_HAS_IOREMAP_CACHE
+#define ioremap_cache ioremap_nocache
+#endif
+
#ifdef CONFIG_PCI
/* Destroy a virtual mapping cookie for a PCI BAR (memory or IO) */
struct pci_dev;
Similar to ioremap_wc() let architecture implementations optionally provide ioremap_cache(). As is, current ioremap_cache() users have architecture dependencies that prevent them from compiling on archs without ioremap_cache(). In some cases the architectures that have a cached ioremap() capability have an identifier other than "ioremap_cache". Allow drivers to compile with ioremap_cache() support and fallback to a safe / uncached ioremap otherwise. Cc: Toshi Kani <toshi.kani@hp.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com> --- arch/arc/include/asm/io.h | 1 + arch/arm/include/asm/io.h | 2 ++ arch/arm64/include/asm/io.h | 3 +++ arch/avr32/include/asm/io.h | 1 + arch/frv/include/asm/io.h | 6 ++++++ arch/ia64/include/asm/io.h | 5 ----- arch/m32r/include/asm/io.h | 1 + arch/m68k/include/asm/io_mm.h | 7 +++++++ arch/m68k/include/asm/io_no.h | 5 +++++ arch/metag/include/asm/io.h | 5 +++++ arch/microblaze/include/asm/io.h | 1 + arch/mips/include/asm/io.h | 17 +++++++++++++---- arch/mn10300/include/asm/io.h | 1 + arch/nios2/include/asm/io.h | 1 + arch/s390/include/asm/io.h | 1 + arch/sparc/include/asm/io_32.h | 1 + arch/sparc/include/asm/io_64.h | 1 + arch/tile/include/asm/io.h | 1 + arch/x86/include/asm/io.h | 1 + arch/xtensa/include/asm/io.h | 3 +++ include/asm-generic/io.h | 8 ++++++++ include/asm-generic/iomap.h | 4 ++++ 22 files changed, 67 insertions(+), 9 deletions(-)