diff mbox series

[v3,05/11] arc: mm: Convert to GENERIC_IOREMAP

Message ID 20221009103114.149036-6-bhe@redhat.com (mailing list archive)
State New
Headers show
Series mm: ioremap: Convert architectures to take GENERIC_IOREMAP way | expand

Commit Message

Baoquan He Oct. 9, 2022, 10:31 a.m. UTC
By taking GENERIC_IOREMAP method, the generic ioremap_prot() and
iounmap() are visible and available to arch. Arch only needs to
provide implementation of arch_ioremap() or arch_iounmap() if there's
arch specific handling needed in its ioremap() or iounmap(). This
change will simplify implementation by removing duplicated codes with
generic ioremap() and iounmap(), and has the equivalent functioality
as before.

Here, add hooks arch_ioremap() and arch_iounmap() for arc's special
operation when ioremap_prot() and iounmap(). Meanwhile define and
implement arc's own ioremap() because arc has some special handling
in ioremap() than standard ioremap().

Signed-off-by: Baoquan He <bhe@redhat.com>
Cc: Vineet Gupta <vgupta@kernel.org>
Cc: linux-snps-arc@lists.infradead.org
---
 arch/arc/Kconfig          |  1 +
 arch/arc/include/asm/io.h | 19 +++++++++----
 arch/arc/mm/ioremap.c     | 60 ++++++---------------------------------
 3 files changed, 23 insertions(+), 57 deletions(-)

Comments

Christophe Leroy Oct. 12, 2022, 10:17 a.m. UTC | #1
Le 09/10/2022 à 12:31, Baoquan He a écrit :
> By taking GENERIC_IOREMAP method, the generic ioremap_prot() and
> iounmap() are visible and available to arch. Arch only needs to
> provide implementation of arch_ioremap() or arch_iounmap() if there's
> arch specific handling needed in its ioremap() or iounmap(). This
> change will simplify implementation by removing duplicated codes with
> generic ioremap() and iounmap(), and has the equivalent functioality
> as before.
> 
> Here, add hooks arch_ioremap() and arch_iounmap() for arc's special
> operation when ioremap_prot() and iounmap(). Meanwhile define and
> implement arc's own ioremap() because arc has some special handling
> in ioremap() than standard ioremap().
> 
> Signed-off-by: Baoquan He <bhe@redhat.com>
> Cc: Vineet Gupta <vgupta@kernel.org>
> Cc: linux-snps-arc@lists.infradead.org
> ---
>   arch/arc/Kconfig          |  1 +
>   arch/arc/include/asm/io.h | 19 +++++++++----
>   arch/arc/mm/ioremap.c     | 60 ++++++---------------------------------
>   3 files changed, 23 insertions(+), 57 deletions(-)
> 
> diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
> index 9e3653253ef2..a08d2abfaf61 100644
> --- a/arch/arc/Kconfig
> +++ b/arch/arc/Kconfig
> @@ -26,6 +26,7 @@ config ARC
>   	select GENERIC_PENDING_IRQ if SMP
>   	select GENERIC_SCHED_CLOCK
>   	select GENERIC_SMP_IDLE_THREAD
> +	select GENERIC_IOREMAP
>   	select HAVE_ARCH_KGDB
>   	select HAVE_ARCH_TRACEHOOK
>   	select HAVE_ARCH_TRANSPARENT_HUGEPAGE if ARC_MMU_V4
> diff --git a/arch/arc/include/asm/io.h b/arch/arc/include/asm/io.h
> index 8f777d6441a5..41a317567920 100644
> --- a/arch/arc/include/asm/io.h
> +++ b/arch/arc/include/asm/io.h
> @@ -20,9 +20,20 @@
>   #define __iowmb()		do { } while (0)
>   #endif
>   
> -extern void __iomem *ioremap(phys_addr_t paddr, unsigned long size);
> -extern void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
> -				  unsigned long flags);
> +/*
> + * I/O memory mapping functions.
> + */
> +
> +void __iomem *
> +arch_ioremap(phys_addr_t *paddr, size_t size, unsigned long *prot_val);
> +#define arch_ioremap arch_ioremap
> +
> +bool arch_iounmap(void __iomem *addr);
> +#define arch_iounmap arch_iounmap
> +
> +void __iomem *ioremap(phys_addr_t paddr, unsigned long size);
> +#define ioremap ioremap
> +
>   static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
>   {
>   	return (void __iomem *)port;
> @@ -32,8 +43,6 @@ static inline void ioport_unmap(void __iomem *addr)
>   {
>   }
>   
> -extern void iounmap(const void __iomem *addr);
> -
>   /*
>    * io{read,write}{16,32}be() macros
>    */
> diff --git a/arch/arc/mm/ioremap.c b/arch/arc/mm/ioremap.c
> index 0ee75aca6e10..c2dcacd56aca 100644
> --- a/arch/arc/mm/ioremap.c
> +++ b/arch/arc/mm/ioremap.c
> @@ -25,13 +25,6 @@ static inline bool arc_uncached_addr_space(phys_addr_t paddr)
>   
>   void __iomem *ioremap(phys_addr_t paddr, unsigned long size)
>   {
> -	phys_addr_t end;
> -
> -	/* Don't allow wraparound or zero size */
> -	end = paddr + size - 1;
> -	if (!size || (end < paddr))
> -		return NULL;
> -
>   	/*
>   	 * If the region is h/w uncached, MMU mapping can be elided as optim
>   	 * The cast to u32 is fine as this region can only be inside 4GB
> @@ -44,62 +37,25 @@ void __iomem *ioremap(phys_addr_t paddr, unsigned long size)
>   }
>   EXPORT_SYMBOL(ioremap);
>   
> -/*
> - * ioremap with access flags
> - * Cache semantics wise it is same as ioremap - "forced" uncached.
> - * However unlike vanilla ioremap which bypasses ARC MMU for addresses in
> - * ARC hardware uncached region, this one still goes thru the MMU as caller
> - * might need finer access control (R/W/X)
> - */
> -void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
> -			   unsigned long flags)
> +void __iomem *
> +arch_ioremap(phys_addr_t *paddr, size_t size, unsigned long *prot_val)
>   {
> -	unsigned int off;
> -	unsigned long vaddr;
> -	struct vm_struct *area;
> -	phys_addr_t end;
> -	pgprot_t prot = __pgprot(flags);
> -
> -	/* Don't allow wraparound, zero size */
> -	end = paddr + size - 1;
> -	if ((!size) || (end < paddr))
> -		return NULL;
> -
>   	/* An early platform driver might end up here */
>   	if (!slab_is_available())
> -		return NULL;
> +		return IOMEM_ERR_PTR(-EINVAL);

I think the slab_is_available() check should be done in the generic 
functions. On all architectures SLAB must be available before you can 
use get_vm_area_caller() and vunmap()

Christophe
Baoquan He Oct. 13, 2022, 9:51 a.m. UTC | #2
On 10/12/22 at 10:17am, Christophe Leroy wrote:
......
> > -/*
> > - * ioremap with access flags
> > - * Cache semantics wise it is same as ioremap - "forced" uncached.
> > - * However unlike vanilla ioremap which bypasses ARC MMU for addresses in
> > - * ARC hardware uncached region, this one still goes thru the MMU as caller
> > - * might need finer access control (R/W/X)
> > - */
> > -void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
> > -			   unsigned long flags)
> > +void __iomem *
> > +arch_ioremap(phys_addr_t *paddr, size_t size, unsigned long *prot_val)
> >   {
> > -	unsigned int off;
> > -	unsigned long vaddr;
> > -	struct vm_struct *area;
> > -	phys_addr_t end;
> > -	pgprot_t prot = __pgprot(flags);
> > -
> > -	/* Don't allow wraparound, zero size */
> > -	end = paddr + size - 1;
> > -	if ((!size) || (end < paddr))
> > -		return NULL;
> > -
> >   	/* An early platform driver might end up here */
> >   	if (!slab_is_available())
> > -		return NULL;
> > +		return IOMEM_ERR_PTR(-EINVAL);
> 
> I think the slab_is_available() check should be done in the generic 
> functions. On all architectures SLAB must be available before you can 
> use get_vm_area_caller() and vunmap()

Tend to agree.

W/o slab initialized, the get_vm_area_caller() calling definitely will
fail. The arch's early ioremap code could call into this.
diff mbox series

Patch

diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 9e3653253ef2..a08d2abfaf61 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -26,6 +26,7 @@  config ARC
 	select GENERIC_PENDING_IRQ if SMP
 	select GENERIC_SCHED_CLOCK
 	select GENERIC_SMP_IDLE_THREAD
+	select GENERIC_IOREMAP
 	select HAVE_ARCH_KGDB
 	select HAVE_ARCH_TRACEHOOK
 	select HAVE_ARCH_TRANSPARENT_HUGEPAGE if ARC_MMU_V4
diff --git a/arch/arc/include/asm/io.h b/arch/arc/include/asm/io.h
index 8f777d6441a5..41a317567920 100644
--- a/arch/arc/include/asm/io.h
+++ b/arch/arc/include/asm/io.h
@@ -20,9 +20,20 @@ 
 #define __iowmb()		do { } while (0)
 #endif
 
-extern void __iomem *ioremap(phys_addr_t paddr, unsigned long size);
-extern void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
-				  unsigned long flags);
+/*
+ * I/O memory mapping functions.
+ */
+
+void __iomem *
+arch_ioremap(phys_addr_t *paddr, size_t size, unsigned long *prot_val);
+#define arch_ioremap arch_ioremap
+
+bool arch_iounmap(void __iomem *addr);
+#define arch_iounmap arch_iounmap
+
+void __iomem *ioremap(phys_addr_t paddr, unsigned long size);
+#define ioremap ioremap
+
 static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
 {
 	return (void __iomem *)port;
@@ -32,8 +43,6 @@  static inline void ioport_unmap(void __iomem *addr)
 {
 }
 
-extern void iounmap(const void __iomem *addr);
-
 /*
  * io{read,write}{16,32}be() macros
  */
diff --git a/arch/arc/mm/ioremap.c b/arch/arc/mm/ioremap.c
index 0ee75aca6e10..c2dcacd56aca 100644
--- a/arch/arc/mm/ioremap.c
+++ b/arch/arc/mm/ioremap.c
@@ -25,13 +25,6 @@  static inline bool arc_uncached_addr_space(phys_addr_t paddr)
 
 void __iomem *ioremap(phys_addr_t paddr, unsigned long size)
 {
-	phys_addr_t end;
-
-	/* Don't allow wraparound or zero size */
-	end = paddr + size - 1;
-	if (!size || (end < paddr))
-		return NULL;
-
 	/*
 	 * If the region is h/w uncached, MMU mapping can be elided as optim
 	 * The cast to u32 is fine as this region can only be inside 4GB
@@ -44,62 +37,25 @@  void __iomem *ioremap(phys_addr_t paddr, unsigned long size)
 }
 EXPORT_SYMBOL(ioremap);
 
-/*
- * ioremap with access flags
- * Cache semantics wise it is same as ioremap - "forced" uncached.
- * However unlike vanilla ioremap which bypasses ARC MMU for addresses in
- * ARC hardware uncached region, this one still goes thru the MMU as caller
- * might need finer access control (R/W/X)
- */
-void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
-			   unsigned long flags)
+void __iomem *
+arch_ioremap(phys_addr_t *paddr, size_t size, unsigned long *prot_val)
 {
-	unsigned int off;
-	unsigned long vaddr;
-	struct vm_struct *area;
-	phys_addr_t end;
-	pgprot_t prot = __pgprot(flags);
-
-	/* Don't allow wraparound, zero size */
-	end = paddr + size - 1;
-	if ((!size) || (end < paddr))
-		return NULL;
-
 	/* An early platform driver might end up here */
 	if (!slab_is_available())
-		return NULL;
+		return IOMEM_ERR_PTR(-EINVAL);
 
 	/* force uncached */
-	prot = pgprot_noncached(prot);
+	*prot_val = pgprot_val(pgprot_noncached(__pgprot(*prot_val)));
 
-	/* Mappings have to be page-aligned */
-	off = paddr & ~PAGE_MASK;
-	paddr &= PAGE_MASK_PHYS;
-	size = PAGE_ALIGN(end + 1) - paddr;
+	return NULL;
 
-	/*
-	 * Ok, go for it..
-	 */
-	area = get_vm_area(size, VM_IOREMAP);
-	if (!area)
-		return NULL;
-	area->phys_addr = paddr;
-	vaddr = (unsigned long)area->addr;
-	if (ioremap_page_range(vaddr, vaddr + size, paddr, prot)) {
-		vunmap((void __force *)vaddr);
-		return NULL;
-	}
-	return (void __iomem *)(off + (char __iomem *)vaddr);
 }
-EXPORT_SYMBOL(ioremap_prot);
-
 
-void iounmap(const void __iomem *addr)
+bool arch_iounmap(void __iomem *addr)
 {
 	/* weird double cast to handle phys_addr_t > 32 bits */
 	if (arc_uncached_addr_space((phys_addr_t)(u32)addr))
-		return;
+		return false;
 
-	vfree((void *)(PAGE_MASK & (unsigned long __force)addr));
+	return true;
 }
-EXPORT_SYMBOL(iounmap);