Message ID | 20230515090848.833045-9-bhe@redhat.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | mm: ioremap: Convert architectures to take GENERIC_IOREMAP way | expand |
On Mon, May 15, 2023 at 05:08:39PM +0800, Baoquan He wrote: > By taking GENERIC_IOREMAP method, the generic generic_ioremap_prot(), > generic_iounmap(), and their generic wrapper ioremap_prot(), ioremap() > and iounmap() are all visible and available to arch. Arch needs to > provide wrapper functions to override the generic versions if there's > arch specific handling in its ioremap_prot(), ioremap() or iounmap(). > This change will simplify implementation by removing duplicated codes > with generic_ioremap_prot() and generic_iounmap(), and has the equivalent > functioality as before. > > Here, add wrapper functions ioremap_prot() and iounmap() for ia64's > special operation when ioremap() and iounmap(). > > Signed-off-by: Baoquan He <bhe@redhat.com> > Cc: linux-ia64@vger.kernel.org Reviewed-by: Mike Rapoport (IBM) <rppt@kernel.org> > --- > arch/ia64/Kconfig | 1 + > arch/ia64/include/asm/io.h | 13 +++++------- > arch/ia64/mm/ioremap.c | 41 ++++++-------------------------------- > 3 files changed, 12 insertions(+), 43 deletions(-) > > diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig > index 21fa63ce5ffc..4f970b6d8032 100644 > --- a/arch/ia64/Kconfig > +++ b/arch/ia64/Kconfig > @@ -46,6 +46,7 @@ config IA64 > select GENERIC_IRQ_LEGACY > select ARCH_HAVE_NMI_SAFE_CMPXCHG > select GENERIC_IOMAP > + select GENERIC_IOREMAP > select GENERIC_SMP_IDLE_THREAD > select ARCH_TASK_STRUCT_ON_STACK > select ARCH_TASK_STRUCT_ALLOCATOR > diff --git a/arch/ia64/include/asm/io.h b/arch/ia64/include/asm/io.h > index 83a492c8d298..eedc0afa8cad 100644 > --- a/arch/ia64/include/asm/io.h > +++ b/arch/ia64/include/asm/io.h > @@ -243,15 +243,12 @@ static inline void outsl(unsigned long port, const void *src, > > # ifdef __KERNEL__ > > -extern void __iomem * ioremap(unsigned long offset, unsigned long size); > +#define _PAGE_IOREMAP pgprot_val(PAGE_KERNEL) > + > extern void __iomem * ioremap_uc(unsigned long offset, unsigned long size); > -extern void iounmap (volatile void __iomem *addr); > -static inline void __iomem * ioremap_cache (unsigned long phys_addr, unsigned long size) > -{ > - return ioremap(phys_addr, size); > -} > -#define ioremap ioremap > -#define ioremap_cache ioremap_cache > + > +#define ioremap_prot ioremap_prot > +#define ioremap_cache ioremap > #define ioremap_uc ioremap_uc > #define iounmap iounmap > > diff --git a/arch/ia64/mm/ioremap.c b/arch/ia64/mm/ioremap.c > index 92b81bc91397..711b6abc822e 100644 > --- a/arch/ia64/mm/ioremap.c > +++ b/arch/ia64/mm/ioremap.c > @@ -29,13 +29,9 @@ early_ioremap (unsigned long phys_addr, unsigned long size) > return __ioremap_uc(phys_addr); > } > > -void __iomem * > -ioremap (unsigned long phys_addr, unsigned long size) > +void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size, > + unsigned long flags) > { > - void __iomem *addr; > - struct vm_struct *area; > - unsigned long offset; > - pgprot_t prot; > u64 attr; > unsigned long gran_base, gran_size; > unsigned long page_base; > @@ -68,36 +64,12 @@ ioremap (unsigned long phys_addr, unsigned long size) > */ > page_base = phys_addr & PAGE_MASK; > size = PAGE_ALIGN(phys_addr + size) - page_base; > - if (efi_mem_attribute(page_base, size) & EFI_MEMORY_WB) { > - prot = PAGE_KERNEL; > - > - /* > - * Mappings have to be page-aligned > - */ > - offset = phys_addr & ~PAGE_MASK; > - phys_addr &= PAGE_MASK; > - > - /* > - * Ok, go for it.. > - */ > - area = get_vm_area(size, VM_IOREMAP); > - if (!area) > - return NULL; > - > - area->phys_addr = phys_addr; > - addr = (void __iomem *) area->addr; > - if (ioremap_page_range((unsigned long) addr, > - (unsigned long) addr + size, phys_addr, prot)) { > - vunmap((void __force *) addr); > - return NULL; > - } > - > - return (void __iomem *) (offset + (char __iomem *)addr); > - } > + if (efi_mem_attribute(page_base, size) & EFI_MEMORY_WB) > + return generic_ioremap_prot(phys_addr, size, __pgprot(flags)); > > return __ioremap_uc(phys_addr); > } > -EXPORT_SYMBOL(ioremap); > +EXPORT_SYMBOL(ioremap_prot); > > void __iomem * > ioremap_uc(unsigned long phys_addr, unsigned long size) > @@ -114,8 +86,7 @@ early_iounmap (volatile void __iomem *addr, unsigned long size) > { > } > > -void > -iounmap (volatile void __iomem *addr) > +void iounmap(volatile void __iomem *addr) > { > if (REGION_NUMBER(addr) == RGN_GATE) > vunmap((void *) ((unsigned long) addr & PAGE_MASK)); > -- > 2.34.1 > >
> +#define ioremap_prot ioremap_prot > +#define ioremap_cache ioremap > #define ioremap_uc ioremap_uc > #define iounmap iounmap Same comment about the placement here, I'm not going to repeat it if it shows up in more patches. Otherwise looks good: Reviewed-by: Christoph Hellwig <hch@lst.de>
On 05/16/23 at 11:33pm, Christoph Hellwig wrote: > > +#define ioremap_prot ioremap_prot > > +#define ioremap_cache ioremap > > #define ioremap_uc ioremap_uc > > #define iounmap iounmap > > Same comment about the placement here, I'm not going to repeat it if > it shows up in more patches. Sure, Will check all of them and change, thanks. > > Otherwise looks good: > > Reviewed-by: Christoph Hellwig <hch@lst.de> >
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 21fa63ce5ffc..4f970b6d8032 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -46,6 +46,7 @@ config IA64 select GENERIC_IRQ_LEGACY select ARCH_HAVE_NMI_SAFE_CMPXCHG select GENERIC_IOMAP + select GENERIC_IOREMAP select GENERIC_SMP_IDLE_THREAD select ARCH_TASK_STRUCT_ON_STACK select ARCH_TASK_STRUCT_ALLOCATOR diff --git a/arch/ia64/include/asm/io.h b/arch/ia64/include/asm/io.h index 83a492c8d298..eedc0afa8cad 100644 --- a/arch/ia64/include/asm/io.h +++ b/arch/ia64/include/asm/io.h @@ -243,15 +243,12 @@ static inline void outsl(unsigned long port, const void *src, # ifdef __KERNEL__ -extern void __iomem * ioremap(unsigned long offset, unsigned long size); +#define _PAGE_IOREMAP pgprot_val(PAGE_KERNEL) + extern void __iomem * ioremap_uc(unsigned long offset, unsigned long size); -extern void iounmap (volatile void __iomem *addr); -static inline void __iomem * ioremap_cache (unsigned long phys_addr, unsigned long size) -{ - return ioremap(phys_addr, size); -} -#define ioremap ioremap -#define ioremap_cache ioremap_cache + +#define ioremap_prot ioremap_prot +#define ioremap_cache ioremap #define ioremap_uc ioremap_uc #define iounmap iounmap diff --git a/arch/ia64/mm/ioremap.c b/arch/ia64/mm/ioremap.c index 92b81bc91397..711b6abc822e 100644 --- a/arch/ia64/mm/ioremap.c +++ b/arch/ia64/mm/ioremap.c @@ -29,13 +29,9 @@ early_ioremap (unsigned long phys_addr, unsigned long size) return __ioremap_uc(phys_addr); } -void __iomem * -ioremap (unsigned long phys_addr, unsigned long size) +void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size, + unsigned long flags) { - void __iomem *addr; - struct vm_struct *area; - unsigned long offset; - pgprot_t prot; u64 attr; unsigned long gran_base, gran_size; unsigned long page_base; @@ -68,36 +64,12 @@ ioremap (unsigned long phys_addr, unsigned long size) */ page_base = phys_addr & PAGE_MASK; size = PAGE_ALIGN(phys_addr + size) - page_base; - if (efi_mem_attribute(page_base, size) & EFI_MEMORY_WB) { - prot = PAGE_KERNEL; - - /* - * Mappings have to be page-aligned - */ - offset = phys_addr & ~PAGE_MASK; - phys_addr &= PAGE_MASK; - - /* - * Ok, go for it.. - */ - area = get_vm_area(size, VM_IOREMAP); - if (!area) - return NULL; - - area->phys_addr = phys_addr; - addr = (void __iomem *) area->addr; - if (ioremap_page_range((unsigned long) addr, - (unsigned long) addr + size, phys_addr, prot)) { - vunmap((void __force *) addr); - return NULL; - } - - return (void __iomem *) (offset + (char __iomem *)addr); - } + if (efi_mem_attribute(page_base, size) & EFI_MEMORY_WB) + return generic_ioremap_prot(phys_addr, size, __pgprot(flags)); return __ioremap_uc(phys_addr); } -EXPORT_SYMBOL(ioremap); +EXPORT_SYMBOL(ioremap_prot); void __iomem * ioremap_uc(unsigned long phys_addr, unsigned long size) @@ -114,8 +86,7 @@ early_iounmap (volatile void __iomem *addr, unsigned long size) { } -void -iounmap (volatile void __iomem *addr) +void iounmap(volatile void __iomem *addr) { if (REGION_NUMBER(addr) == RGN_GATE) vunmap((void *) ((unsigned long) addr & PAGE_MASK));
By taking GENERIC_IOREMAP method, the generic generic_ioremap_prot(), generic_iounmap(), and their generic wrapper ioremap_prot(), ioremap() and iounmap() are all visible and available to arch. Arch needs to provide wrapper functions to override the generic versions if there's arch specific handling in its ioremap_prot(), ioremap() or iounmap(). This change will simplify implementation by removing duplicated codes with generic_ioremap_prot() and generic_iounmap(), and has the equivalent functioality as before. Here, add wrapper functions ioremap_prot() and iounmap() for ia64's special operation when ioremap() and iounmap(). Signed-off-by: Baoquan He <bhe@redhat.com> Cc: linux-ia64@vger.kernel.org --- arch/ia64/Kconfig | 1 + arch/ia64/include/asm/io.h | 13 +++++------- arch/ia64/mm/ioremap.c | 41 ++++++-------------------------------- 3 files changed, 12 insertions(+), 43 deletions(-)