Message ID | 20230515090848.833045-13-bhe@redhat.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | mm: ioremap: Convert architectures to take GENERIC_IOREMAP way | expand |
On Mon, May 15, 2023 at 05:08:43PM +0800, Baoquan He wrote: > By taking GENERIC_IOREMAP method, the generic generic_ioremap_prot(), > generic_iounmap(), and their generic wrapper ioremap_prot(), ioremap() > and iounmap() are all visible and available to arch. Arch needs to > provide wrapper functions to override the generic versions if there's > arch specific handling in its ioremap_prot(), ioremap() or iounmap(). > This change will simplify implementation by removing duplicated codes > with generic_ioremap_prot() and generic_iounmap(), and has the equivalent > functioality as before. > > Here, add wrapper functions ioremap_prot(), ioremap() and iounmap() for > xtensa's special operation when ioremap() and iounmap(). > > Signed-off-by: Baoquan He <bhe@redhat.com> > Cc: Chris Zankel <chris@zankel.net> > Cc: Max Filippov <jcmvbkbc@gmail.com> Reviewed-by: Mike Rapoport (IBM) <rppt@kernel.org> > --- > arch/xtensa/Kconfig | 1 + > arch/xtensa/include/asm/io.h | 32 ++++++++------------ > arch/xtensa/mm/ioremap.c | 58 +++++++++--------------------------- > 3 files changed, 27 insertions(+), 64 deletions(-) > > diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig > index 3c6e5471f025..474cbbff3e6c 100644 > --- a/arch/xtensa/Kconfig > +++ b/arch/xtensa/Kconfig > @@ -29,6 +29,7 @@ config XTENSA > select GENERIC_LIB_UCMPDI2 > select GENERIC_PCI_IOMAP > select GENERIC_SCHED_CLOCK > + select GENERIC_IOREMAP if MMU > select HAVE_ARCH_AUDITSYSCALL > select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL > select HAVE_ARCH_KASAN if MMU && !XIP_KERNEL > diff --git a/arch/xtensa/include/asm/io.h b/arch/xtensa/include/asm/io.h > index a5b707e1c0f4..934e58399c8c 100644 > --- a/arch/xtensa/include/asm/io.h > +++ b/arch/xtensa/include/asm/io.h > @@ -16,6 +16,7 @@ > #include <asm/vectors.h> > #include <linux/bug.h> > #include <linux/kernel.h> > +#include <linux/pgtable.h> > > #include <linux/types.h> > > @@ -24,22 +25,24 @@ > #define PCI_IOBASE ((void __iomem *)XCHAL_KIO_BYPASS_VADDR) > > #ifdef CONFIG_MMU > - > -void __iomem *xtensa_ioremap_nocache(unsigned long addr, unsigned long size); > -void __iomem *xtensa_ioremap_cache(unsigned long addr, unsigned long size); > -void xtensa_iounmap(volatile void __iomem *addr); > - > /* > - * Return the virtual address for the specified bus memory. > + * I/O memory mapping functions. > */ > +void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size, > + unsigned long prot); > +#define ioremap_prot ioremap_prot > +#define iounmap iounmap > + > static inline void __iomem *ioremap(unsigned long offset, unsigned long size) > { > if (offset >= XCHAL_KIO_PADDR > && offset - XCHAL_KIO_PADDR < XCHAL_KIO_SIZE) > return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_BYPASS_VADDR); > else > - return xtensa_ioremap_nocache(offset, size); > + return ioremap_prot(offset, size, > + pgprot_val(pgprot_noncached(PAGE_KERNEL))); > } > +#define ioremap ioremap > > static inline void __iomem *ioremap_cache(unsigned long offset, > unsigned long size) > @@ -48,21 +51,10 @@ static inline void __iomem *ioremap_cache(unsigned long offset, > && offset - XCHAL_KIO_PADDR < XCHAL_KIO_SIZE) > return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_CACHED_VADDR); > else > - return xtensa_ioremap_cache(offset, size); > -} > -#define ioremap_cache ioremap_cache > + return ioremap_prot(offset, size, pgprot_val(PAGE_KERNEL)); > > -static inline void iounmap(volatile void __iomem *addr) > -{ > - unsigned long va = (unsigned long) addr; > - > - if (!(va >= XCHAL_KIO_CACHED_VADDR && > - va - XCHAL_KIO_CACHED_VADDR < XCHAL_KIO_SIZE) && > - !(va >= XCHAL_KIO_BYPASS_VADDR && > - va - XCHAL_KIO_BYPASS_VADDR < XCHAL_KIO_SIZE)) > - xtensa_iounmap(addr); > } > - > +#define ioremap_cache ioremap_cache > #endif /* CONFIG_MMU */ > > #include <asm-generic/io.h> > diff --git a/arch/xtensa/mm/ioremap.c b/arch/xtensa/mm/ioremap.c > index a400188c16b9..8ca660b7ab49 100644 > --- a/arch/xtensa/mm/ioremap.c > +++ b/arch/xtensa/mm/ioremap.c > @@ -6,60 +6,30 @@ > */ > > #include <linux/io.h> > -#include <linux/vmalloc.h> > #include <linux/pgtable.h> > #include <asm/cacheflush.h> > #include <asm/io.h> > > -static void __iomem *xtensa_ioremap(unsigned long paddr, unsigned long size, > - pgprot_t prot) > +void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size, > + unsigned long prot) > { > - unsigned long offset = paddr & ~PAGE_MASK; > - unsigned long pfn = __phys_to_pfn(paddr); > - struct vm_struct *area; > - unsigned long vaddr; > - int err; > - > - paddr &= PAGE_MASK; > - > + unsigned long pfn = __phys_to_pfn((phys_addr)); > WARN_ON(pfn_valid(pfn)); > > - size = PAGE_ALIGN(offset + size); > - > - area = get_vm_area(size, VM_IOREMAP); > - if (!area) > - return NULL; > - > - vaddr = (unsigned long)area->addr; > - area->phys_addr = paddr; > - > - err = ioremap_page_range(vaddr, vaddr + size, paddr, prot); > - > - if (err) { > - vunmap((void *)vaddr); > - return NULL; > - } > - > - flush_cache_vmap(vaddr, vaddr + size); > - return (void __iomem *)(offset + vaddr); > -} > - > -void __iomem *xtensa_ioremap_nocache(unsigned long addr, unsigned long size) > -{ > - return xtensa_ioremap(addr, size, pgprot_noncached(PAGE_KERNEL)); > + return generic_ioremap_prot(phys_addr, size, __pgprot(prot)); > } > -EXPORT_SYMBOL(xtensa_ioremap_nocache); > +EXPORT_SYMBOL(ioremap_prot); > > -void __iomem *xtensa_ioremap_cache(unsigned long addr, unsigned long size) > +void iounmap(volatile void __iomem *addr) > { > - return xtensa_ioremap(addr, size, PAGE_KERNEL); > -} > -EXPORT_SYMBOL(xtensa_ioremap_cache); > + unsigned long va = (unsigned long) addr; > > -void xtensa_iounmap(volatile void __iomem *io_addr) > -{ > - void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); > + if ((va >= XCHAL_KIO_CACHED_VADDR && > + va - XCHAL_KIO_CACHED_VADDR < XCHAL_KIO_SIZE) || > + (va >= XCHAL_KIO_BYPASS_VADDR && > + va - XCHAL_KIO_BYPASS_VADDR < XCHAL_KIO_SIZE)) > + return; > > - vunmap(addr); > + generic_iounmap(addr); > } > -EXPORT_SYMBOL(xtensa_iounmap); > +EXPORT_SYMBOL(iounmap); > -- > 2.34.1 > >
Looks good:
Reviewed-by: Christoph Hellwig <hch@lst.de>
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index 3c6e5471f025..474cbbff3e6c 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -29,6 +29,7 @@ config XTENSA select GENERIC_LIB_UCMPDI2 select GENERIC_PCI_IOMAP select GENERIC_SCHED_CLOCK + select GENERIC_IOREMAP if MMU select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL select HAVE_ARCH_KASAN if MMU && !XIP_KERNEL diff --git a/arch/xtensa/include/asm/io.h b/arch/xtensa/include/asm/io.h index a5b707e1c0f4..934e58399c8c 100644 --- a/arch/xtensa/include/asm/io.h +++ b/arch/xtensa/include/asm/io.h @@ -16,6 +16,7 @@ #include <asm/vectors.h> #include <linux/bug.h> #include <linux/kernel.h> +#include <linux/pgtable.h> #include <linux/types.h> @@ -24,22 +25,24 @@ #define PCI_IOBASE ((void __iomem *)XCHAL_KIO_BYPASS_VADDR) #ifdef CONFIG_MMU - -void __iomem *xtensa_ioremap_nocache(unsigned long addr, unsigned long size); -void __iomem *xtensa_ioremap_cache(unsigned long addr, unsigned long size); -void xtensa_iounmap(volatile void __iomem *addr); - /* - * Return the virtual address for the specified bus memory. + * I/O memory mapping functions. */ +void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size, + unsigned long prot); +#define ioremap_prot ioremap_prot +#define iounmap iounmap + static inline void __iomem *ioremap(unsigned long offset, unsigned long size) { if (offset >= XCHAL_KIO_PADDR && offset - XCHAL_KIO_PADDR < XCHAL_KIO_SIZE) return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_BYPASS_VADDR); else - return xtensa_ioremap_nocache(offset, size); + return ioremap_prot(offset, size, + pgprot_val(pgprot_noncached(PAGE_KERNEL))); } +#define ioremap ioremap static inline void __iomem *ioremap_cache(unsigned long offset, unsigned long size) @@ -48,21 +51,10 @@ static inline void __iomem *ioremap_cache(unsigned long offset, && offset - XCHAL_KIO_PADDR < XCHAL_KIO_SIZE) return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_CACHED_VADDR); else - return xtensa_ioremap_cache(offset, size); -} -#define ioremap_cache ioremap_cache + return ioremap_prot(offset, size, pgprot_val(PAGE_KERNEL)); -static inline void iounmap(volatile void __iomem *addr) -{ - unsigned long va = (unsigned long) addr; - - if (!(va >= XCHAL_KIO_CACHED_VADDR && - va - XCHAL_KIO_CACHED_VADDR < XCHAL_KIO_SIZE) && - !(va >= XCHAL_KIO_BYPASS_VADDR && - va - XCHAL_KIO_BYPASS_VADDR < XCHAL_KIO_SIZE)) - xtensa_iounmap(addr); } - +#define ioremap_cache ioremap_cache #endif /* CONFIG_MMU */ #include <asm-generic/io.h> diff --git a/arch/xtensa/mm/ioremap.c b/arch/xtensa/mm/ioremap.c index a400188c16b9..8ca660b7ab49 100644 --- a/arch/xtensa/mm/ioremap.c +++ b/arch/xtensa/mm/ioremap.c @@ -6,60 +6,30 @@ */ #include <linux/io.h> -#include <linux/vmalloc.h> #include <linux/pgtable.h> #include <asm/cacheflush.h> #include <asm/io.h> -static void __iomem *xtensa_ioremap(unsigned long paddr, unsigned long size, - pgprot_t prot) +void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size, + unsigned long prot) { - unsigned long offset = paddr & ~PAGE_MASK; - unsigned long pfn = __phys_to_pfn(paddr); - struct vm_struct *area; - unsigned long vaddr; - int err; - - paddr &= PAGE_MASK; - + unsigned long pfn = __phys_to_pfn((phys_addr)); WARN_ON(pfn_valid(pfn)); - size = PAGE_ALIGN(offset + size); - - area = get_vm_area(size, VM_IOREMAP); - if (!area) - return NULL; - - vaddr = (unsigned long)area->addr; - area->phys_addr = paddr; - - err = ioremap_page_range(vaddr, vaddr + size, paddr, prot); - - if (err) { - vunmap((void *)vaddr); - return NULL; - } - - flush_cache_vmap(vaddr, vaddr + size); - return (void __iomem *)(offset + vaddr); -} - -void __iomem *xtensa_ioremap_nocache(unsigned long addr, unsigned long size) -{ - return xtensa_ioremap(addr, size, pgprot_noncached(PAGE_KERNEL)); + return generic_ioremap_prot(phys_addr, size, __pgprot(prot)); } -EXPORT_SYMBOL(xtensa_ioremap_nocache); +EXPORT_SYMBOL(ioremap_prot); -void __iomem *xtensa_ioremap_cache(unsigned long addr, unsigned long size) +void iounmap(volatile void __iomem *addr) { - return xtensa_ioremap(addr, size, PAGE_KERNEL); -} -EXPORT_SYMBOL(xtensa_ioremap_cache); + unsigned long va = (unsigned long) addr; -void xtensa_iounmap(volatile void __iomem *io_addr) -{ - void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); + if ((va >= XCHAL_KIO_CACHED_VADDR && + va - XCHAL_KIO_CACHED_VADDR < XCHAL_KIO_SIZE) || + (va >= XCHAL_KIO_BYPASS_VADDR && + va - XCHAL_KIO_BYPASS_VADDR < XCHAL_KIO_SIZE)) + return; - vunmap(addr); + generic_iounmap(addr); } -EXPORT_SYMBOL(xtensa_iounmap); +EXPORT_SYMBOL(iounmap);
By taking GENERIC_IOREMAP method, the generic generic_ioremap_prot(), generic_iounmap(), and their generic wrapper ioremap_prot(), ioremap() and iounmap() are all visible and available to arch. Arch needs to provide wrapper functions to override the generic versions if there's arch specific handling in its ioremap_prot(), ioremap() or iounmap(). This change will simplify implementation by removing duplicated codes with generic_ioremap_prot() and generic_iounmap(), and has the equivalent functioality as before. Here, add wrapper functions ioremap_prot(), ioremap() and iounmap() for xtensa's special operation when ioremap() and iounmap(). Signed-off-by: Baoquan He <bhe@redhat.com> Cc: Chris Zankel <chris@zankel.net> Cc: Max Filippov <jcmvbkbc@gmail.com> --- arch/xtensa/Kconfig | 1 + arch/xtensa/include/asm/io.h | 32 ++++++++------------ arch/xtensa/mm/ioremap.c | 58 +++++++++--------------------------- 3 files changed, 27 insertions(+), 64 deletions(-)