@@ -165,7 +165,7 @@ ioremap()
If the granule contains non-WB memory, but we can cover the
region safely with kernel page table mappings, we can use
- ioremap_page_range() as most other architectures do.
+ ioremap_range() as most other architectures do.
Failing all of the above, we have to fall back to a UC mapping.
@@ -187,7 +187,7 @@ __alpha_remap_area_pages(unsigned long address, unsigned long phys_addr,
prot = __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE
| _PAGE_KWE | flags);
- return ioremap_page_range(address, address + size, phys_addr, prot);
+ return ioremap_range(address, address + size, phys_addr, prot);
}
/* irq.c */
@@ -85,7 +85,7 @@ void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
return NULL;
area->phys_addr = paddr;
vaddr = (unsigned long)area->addr;
- if (ioremap_page_range(vaddr, vaddr + size, paddr, prot)) {
+ if (ioremap_range(vaddr, vaddr + size, paddr, prot)) {
vunmap((void __force *)vaddr);
return NULL;
}
@@ -110,7 +110,7 @@ void __init add_static_vm_early(struct static_vm *svm)
int ioremap_page(unsigned long virt, unsigned long phys,
const struct mem_type *mtype)
{
- return ioremap_page_range(virt, virt + PAGE_SIZE, phys,
+ return ioremap_range(virt, virt + PAGE_SIZE, phys,
__pgprot(mtype->prot_pte));
}
EXPORT_SYMBOL(ioremap_page);
@@ -312,7 +312,7 @@ static void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
err = remap_area_sections(addr, pfn, size, type);
} else
#endif
- err = ioremap_page_range(addr, addr + size, paddr,
+ err = ioremap_range(addr, addr + size, paddr,
__pgprot(type->prot_pte));
if (err) {
@@ -473,7 +473,7 @@ int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
if (res->end > IO_SPACE_LIMIT)
return -EINVAL;
- return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
+ return ioremap_range(vaddr, vaddr + resource_size(res), phys_addr,
__pgprot(get_mem_type(pci_ioremap_mem_type)->prot_pte));
}
EXPORT_SYMBOL(pci_remap_iospace);
@@ -52,7 +52,7 @@ static void __iomem *__ioremap_caller(phys_addr_t phys_addr, size_t size,
addr = (unsigned long)area->addr;
area->phys_addr = phys_addr;
- err = ioremap_page_range(addr, addr + size, phys_addr, prot);
+ err = ioremap_range(addr, addr + size, phys_addr, prot);
if (err) {
vunmap((void *)addr);
return NULL;
@@ -30,7 +30,7 @@ void __iomem *ioremap(unsigned long phys_addr, unsigned long size)
area = get_vm_area(size, VM_IOREMAP);
addr = (unsigned long)area->addr;
- if (ioremap_page_range(addr, addr+size, phys_addr, prot)) {
+ if (ioremap_range(addr, addr+size, phys_addr, prot)) {
vunmap((void *)addr);
return NULL;
}
@@ -86,7 +86,7 @@ ioremap (unsigned long phys_addr, unsigned long size)
area->phys_addr = phys_addr;
addr = (void __iomem *) area->addr;
- if (ioremap_page_range((unsigned long) addr,
+ if (ioremap_range((unsigned long) addr,
(unsigned long) addr + size, phys_addr, prot)) {
vunmap((void __force *) addr);
return NULL;
@@ -162,7 +162,7 @@ static int __init add_legacy_isa_io(struct fwnode_handle *fwnode, resource_size_
vaddr = PCI_IOBASE + range->io_start;
- ioremap_page_range(vaddr, vaddr + size, hw_start, pgprot_device(PAGE_KERNEL));
+ ioremap_range(vaddr, vaddr + size, hw_start, pgprot_device(PAGE_KERNEL));
return 0;
}
@@ -101,7 +101,7 @@ void __iomem *ioremap_prot(phys_addr_t phys_addr, unsigned long size,
vaddr = (unsigned long)area->addr;
flags |= _PAGE_GLOBAL | _PAGE_PRESENT | __READABLE | __WRITEABLE;
- if (ioremap_page_range(vaddr, vaddr + size, phys_addr,
+ if (ioremap_range(vaddr, vaddr + size, phys_addr,
__pgprot(flags))) {
free_vm_area(area);
return NULL;
@@ -64,7 +64,7 @@ void __iomem *__ref ioremap(phys_addr_t addr, unsigned long size)
fixmaps_used += (size >> PAGE_SHIFT);
}
- if (ioremap_page_range(v, v + size, p,
+ if (ioremap_range(v, v + size, p,
__pgprot(pgprot_val(PAGE_KERNEL) | _PAGE_CI))) {
if (likely(mem_init_done))
vfree(area->addr);
@@ -80,7 +80,7 @@ void __iomem *ioremap(unsigned long phys_addr, unsigned long size)
return NULL;
addr = (void __iomem *) area->addr;
- if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
+ if (ioremap_range((unsigned long)addr, (unsigned long)addr + size,
phys_addr, pgprot)) {
vunmap(addr);
return NULL;
@@ -46,7 +46,7 @@ static void remap_isa_base(phys_addr_t pa, unsigned long size)
WARN_ON_ONCE(size & ~PAGE_MASK);
if (slab_is_available()) {
- if (ioremap_page_range(ISA_IO_BASE, ISA_IO_BASE + size, pa,
+ if (ioremap_range(ISA_IO_BASE, ISA_IO_BASE + size, pa,
pgprot_noncached(PAGE_KERNEL)))
vunmap_range(ISA_IO_BASE, ISA_IO_BASE + size);
} else {
@@ -138,7 +138,7 @@ void __iomem *ioremap_phb(phys_addr_t paddr, unsigned long size)
return NULL;
addr = (unsigned long)area->addr;
- if (ioremap_page_range(addr, addr + size, paddr,
+ if (ioremap_range(addr, addr + size, paddr,
pgprot_noncached(PAGE_KERNEL))) {
vunmap_range(addr, addr + size);
return NULL;
@@ -89,7 +89,7 @@ void __iomem *do_ioremap(phys_addr_t pa, phys_addr_t offset, unsigned long size,
area->phys_addr = pa;
va = (unsigned long)area->addr;
- ret = ioremap_page_range(va, va + size, pa, prot);
+ ret = ioremap_range(va, va + size, pa, prot);
if (!ret)
return (void __iomem *)area->addr + offset;
@@ -252,7 +252,7 @@ static void __iomem *__ioremap(phys_addr_t addr, size_t size, pgprot_t prot)
return NULL;
vaddr = (unsigned long) area->addr;
- if (ioremap_page_range(vaddr, vaddr + size, addr, prot)) {
+ if (ioremap_range(vaddr, vaddr + size, addr, prot)) {
free_vm_area(area);
return NULL;
}
@@ -110,7 +110,7 @@ static int __sq_remap(struct sq_mapping *map, pgprot_t prot)
vma->phys_addr = map->addr;
- if (ioremap_page_range((unsigned long)vma->addr,
+ if (ioremap_range((unsigned long)vma->addr,
(unsigned long)vma->addr + map->size,
vma->phys_addr, prot)) {
vunmap(vma->addr);
@@ -132,7 +132,7 @@ __ioremap_caller(phys_addr_t phys_addr, unsigned long size,
area->phys_addr = phys_addr;
orig_addr = addr = (unsigned long)area->addr;
- if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) {
+ if (ioremap_range(addr, addr + size, phys_addr, pgprot)) {
vunmap((void *)orig_addr);
return NULL;
}
@@ -286,7 +286,7 @@ __ioremap_caller(resource_size_t phys_addr, unsigned long size,
if (memtype_kernel_map_sync(phys_addr, size, pcm))
goto err_free_area;
- if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot))
+ if (ioremap_range(vaddr, vaddr + size, phys_addr, prot))
goto err_free_area;
ret_addr = (void __iomem *) (vaddr + offset);
@@ -33,7 +33,7 @@ static void __iomem *xtensa_ioremap(unsigned long paddr, unsigned long size,
vaddr = (unsigned long)area->addr;
area->phys_addr = paddr;
- err = ioremap_page_range(vaddr, vaddr + size, paddr, prot);
+ err = ioremap_range(vaddr, vaddr + size, paddr, prot);
if (err) {
vunmap((void *)vaddr);
@@ -4234,7 +4234,7 @@ int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
if (res->end > IO_SPACE_LIMIT)
return -EINVAL;
- return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
+ return ioremap_range(vaddr, vaddr + resource_size(res), phys_addr,
pgprot_device(PAGE_KERNEL));
#else
/*
@@ -21,10 +21,10 @@ void __ioread32_copy(void *to, const void __iomem *from, size_t count);
void __iowrite64_copy(void __iomem *to, const void *from, size_t count);
#ifdef CONFIG_MMU
-int ioremap_page_range(unsigned long addr, unsigned long end,
+int ioremap_range(unsigned long addr, unsigned long end,
phys_addr_t phys_addr, pgprot_t prot);
#else
-static inline int ioremap_page_range(unsigned long addr, unsigned long end,
+static inline int ioremap_range(unsigned long addr, unsigned long end,
phys_addr_t phys_addr, pgprot_t prot)
{
return 0;
@@ -33,7 +33,7 @@ void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot)
return NULL;
vaddr = (unsigned long)area->addr;
- if (ioremap_page_range(vaddr, vaddr + size, addr, __pgprot(prot))) {
+ if (ioremap_range(vaddr, vaddr + size, addr, __pgprot(prot))) {
free_vm_area(area);
return NULL;
}
@@ -312,7 +312,7 @@ static int vmap_range_noflush(unsigned long addr, unsigned long end,
return err;
}
-int ioremap_page_range(unsigned long addr, unsigned long end,
+int ioremap_range(unsigned long addr, unsigned long end,
phys_addr_t phys_addr, pgprot_t prot)
{
int err;
Because the current ioremap_page_range() only maps IO address to kernel virtual address, no struct page pointer passed in or page handling related. So rename it here. The renaming is done with below command: sed -i "s/ioremap_page_range/ioremap_range/g" `git grep -l ioremap_page_range` Signed-off-by: Baoquan He <bhe@redhat.com> --- Documentation/ia64/aliasing.rst | 2 +- arch/alpha/kernel/proto.h | 2 +- arch/arc/mm/ioremap.c | 2 +- arch/arm/mm/ioremap.c | 6 +++--- arch/arm64/mm/ioremap.c | 2 +- arch/hexagon/mm/ioremap.c | 2 +- arch/ia64/mm/ioremap.c | 2 +- arch/mips/loongson64/init.c | 2 +- arch/mips/mm/ioremap.c | 2 +- arch/openrisc/mm/ioremap.c | 2 +- arch/parisc/mm/ioremap.c | 2 +- arch/powerpc/kernel/isa-bridge.c | 2 +- arch/powerpc/kernel/pci_64.c | 2 +- arch/powerpc/mm/ioremap.c | 2 +- arch/s390/pci/pci.c | 2 +- arch/sh/kernel/cpu/sh4/sq.c | 2 +- arch/sh/mm/ioremap.c | 2 +- arch/x86/mm/ioremap.c | 2 +- arch/xtensa/mm/ioremap.c | 2 +- drivers/pci/pci.c | 2 +- include/linux/io.h | 4 ++-- mm/ioremap.c | 2 +- mm/vmalloc.c | 2 +- 23 files changed, 26 insertions(+), 26 deletions(-)