Message ID | 1360024314-1895-4-git-send-email-iamjoonsoo.kim@lge.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
On Tue, 5 Feb 2013, Joonsoo Kim wrote: > A static mapped area is ARM-specific, so it is better not to use > generic vmalloc data structure, that is, vmlist and vmlist_lock > for managing static mapped area. And it causes some needless overhead and > reducing this overhead is better idea. > > Now, we have newly introduced static_vm infrastructure. > With it, we don't need to iterate all mapped areas. Instead, we just > iterate static mapped areas. It helps to reduce an overhead of finding > matched area. And architecture dependency on vmalloc layer is removed, > so it will help to maintainability for vmalloc layer. > > Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Some comments below. > diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c > index 904c15e..c7fef4b 100644 > --- a/arch/arm/mm/ioremap.c > +++ b/arch/arm/mm/ioremap.c > @@ -261,13 +261,14 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, > const struct mem_type *type; > int err; > unsigned long addr; > - struct vm_struct * area; > + struct vm_struct *area; > + phys_addr_t paddr = __pfn_to_phys(pfn); > > #ifndef CONFIG_ARM_LPAE > /* > * High mappings must be supersection aligned > */ > - if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK)) > + if (pfn >= 0x100000 && (paddr & ~SUPERSECTION_MASK)) > return NULL; > #endif > > @@ -283,24 +284,16 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, > /* > * Try to reuse one of the static mapping whenever possible. > */ > - read_lock(&vmlist_lock); > - for (area = vmlist; area; area = area->next) { > - if (!size || (sizeof(phys_addr_t) == 4 && pfn >= 0x100000)) > - break; > - if (!(area->flags & VM_ARM_STATIC_MAPPING)) > - continue; > - if ((area->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype)) > - continue; > - if (__phys_to_pfn(area->phys_addr) > pfn || > - __pfn_to_phys(pfn) + size-1 > area->phys_addr + area->size-1) > - continue; > - /* we can drop the lock here as we know *area is static */ > - read_unlock(&vmlist_lock); > - addr = (unsigned long)area->addr; > - addr += __pfn_to_phys(pfn) - area->phys_addr; > - return (void __iomem *) (offset + addr); > + if (size && !((sizeof(phys_addr_t) == 4 && pfn >= 0x100000))) { ^ ^ You have a needless extra set of parents here. [...] > diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c > index ce328c7..b2c0356 100644 > --- a/arch/arm/mm/mmu.c > +++ b/arch/arm/mm/mmu.c > @@ -757,21 +757,24 @@ void __init iotable_init(struct map_desc *io_desc, int nr) > { > struct map_desc *md; > struct vm_struct *vm; > + struct static_vm *svm; > > if (!nr) > return; > > - vm = early_alloc_aligned(sizeof(*vm) * nr, __alignof__(*vm)); > + svm = early_alloc_aligned(sizeof(*svm) * nr, __alignof__(*svm)); > > for (md = io_desc; nr; md++, nr--) { > create_mapping(md); > + > + vm = &svm->vm; > vm->addr = (void *)(md->virtual & PAGE_MASK); > vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK)); > vm->phys_addr = __pfn_to_phys(md->pfn); > vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING; > vm->flags |= VM_ARM_MTYPE(md->type); > vm->caller = iotable_init; > - vm_area_add_early(vm++); > + add_static_vm_early(svm++); > } > } > > @@ -779,13 +782,16 @@ void __init vm_reserve_area_early(unsigned long addr, unsigned long size, > void *caller) > { > struct vm_struct *vm; > + struct static_vm *svm; > + > + svm = early_alloc_aligned(sizeof(*svm), __alignof__(*svm)); > > - vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm)); > + vm = &svm->vm; > vm->addr = (void *)addr; > vm->size = size; > vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING; > vm->caller = caller; > - vm_area_add_early(vm); > + add_static_vm_early(svm); > } > > #ifndef CONFIG_ARM_LPAE > @@ -810,14 +816,13 @@ static void __init pmd_empty_section_gap(unsigned long addr) > > static void __init fill_pmd_gaps(void) > { > + struct static_vm *svm; > struct vm_struct *vm; > unsigned long addr, next = 0; > pmd_t *pmd; > > - /* we're still single threaded hence no lock needed here */ > - for (vm = vmlist; vm; vm = vm->next) { > - if (!(vm->flags & (VM_ARM_STATIC_MAPPING | VM_ARM_EMPTY_MAPPING))) > - continue; > + list_for_each_entry(svm, &static_vmlist, list) { > + vm = &svm->vm; > addr = (unsigned long)vm->addr; > if (addr < next) > continue; > @@ -859,17 +864,12 @@ static void __init pci_reserve_io(void) > { > struct vm_struct *vm; > unsigned long addr; > + struct static_vm *svm; > > - /* we're still single threaded hence no lock needed here */ > - for (vm = vmlist; vm; vm = vm->next) { > - if (!(vm->flags & VM_ARM_STATIC_MAPPING)) > - continue; > - addr = (unsigned long)vm->addr; > - addr &= ~(SZ_2M - 1); > - if (addr == PCI_IO_VIRT_BASE) > - return; > + svm = find_static_vm_vaddr((void *)PCI_IO_VIRT_BASE); > + if (svm) > + return; > > - } > > vm_reserve_area_early(PCI_IO_VIRT_BASE, SZ_2M, pci_reserve_io); > } The replacement code is not equivalent. I can't recall why the original is as it is, but it doesn't look right to me. The 2MB round down certainly looks suspicious. The replacement code should be better. However I'd like you to get an ACK from Rob Herring as well for this patch. Once that is sorted out, you can add Reviewed-by: Nicolas Pitre <nico@linaro.org> Nicolas
On 02/04/2013 10:44 PM, Nicolas Pitre wrote: > On Tue, 5 Feb 2013, Joonsoo Kim wrote: > >> A static mapped area is ARM-specific, so it is better not to use >> generic vmalloc data structure, that is, vmlist and vmlist_lock >> for managing static mapped area. And it causes some needless overhead and >> reducing this overhead is better idea. >> >> Now, we have newly introduced static_vm infrastructure. >> With it, we don't need to iterate all mapped areas. Instead, we just >> iterate static mapped areas. It helps to reduce an overhead of finding >> matched area. And architecture dependency on vmalloc layer is removed, >> so it will help to maintainability for vmalloc layer. >> >> Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> [snip] >> @@ -859,17 +864,12 @@ static void __init pci_reserve_io(void) >> { >> struct vm_struct *vm; >> unsigned long addr; >> + struct static_vm *svm; >> >> - /* we're still single threaded hence no lock needed here */ >> - for (vm = vmlist; vm; vm = vm->next) { >> - if (!(vm->flags & VM_ARM_STATIC_MAPPING)) >> - continue; >> - addr = (unsigned long)vm->addr; >> - addr &= ~(SZ_2M - 1); >> - if (addr == PCI_IO_VIRT_BASE) >> - return; >> + svm = find_static_vm_vaddr((void *)PCI_IO_VIRT_BASE); >> + if (svm) >> + return; >> >> - } >> >> vm_reserve_area_early(PCI_IO_VIRT_BASE, SZ_2M, pci_reserve_io); >> } > > The replacement code is not equivalent. I can't recall why the original > is as it is, but it doesn't look right to me. The 2MB round down > certainly looks suspicious. The PCI mapping is at a fixed, aligned 2MB mapping. If we find any virtual address within that region already mapped, it is an error. We probably should have had a WARN here. > > The replacement code should be better. However I'd like you to get an > ACK from Rob Herring as well for this patch. It doesn't appear to me the above case is handled. The virt addr is checked whether it is within an existing mapping, but not whether the new mapping would overlap an existing mapping. It would be good to check for this generically rather than specifically for the PCI i/o mapping. Rob
On Tue, 5 Feb 2013, Rob Herring wrote: > On 02/04/2013 10:44 PM, Nicolas Pitre wrote: > > On Tue, 5 Feb 2013, Joonsoo Kim wrote: > > > >> A static mapped area is ARM-specific, so it is better not to use > >> generic vmalloc data structure, that is, vmlist and vmlist_lock > >> for managing static mapped area. And it causes some needless overhead and > >> reducing this overhead is better idea. > >> > >> Now, we have newly introduced static_vm infrastructure. > >> With it, we don't need to iterate all mapped areas. Instead, we just > >> iterate static mapped areas. It helps to reduce an overhead of finding > >> matched area. And architecture dependency on vmalloc layer is removed, > >> so it will help to maintainability for vmalloc layer. > >> > >> Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> > > [snip] > > >> @@ -859,17 +864,12 @@ static void __init pci_reserve_io(void) > >> { > >> struct vm_struct *vm; > >> unsigned long addr; > >> + struct static_vm *svm; > >> > >> - /* we're still single threaded hence no lock needed here */ > >> - for (vm = vmlist; vm; vm = vm->next) { > >> - if (!(vm->flags & VM_ARM_STATIC_MAPPING)) > >> - continue; > >> - addr = (unsigned long)vm->addr; > >> - addr &= ~(SZ_2M - 1); > >> - if (addr == PCI_IO_VIRT_BASE) > >> - return; > >> + svm = find_static_vm_vaddr((void *)PCI_IO_VIRT_BASE); > >> + if (svm) > >> + return; > >> > >> - } > >> > >> vm_reserve_area_early(PCI_IO_VIRT_BASE, SZ_2M, pci_reserve_io); > >> } > > > > The replacement code is not equivalent. I can't recall why the original > > is as it is, but it doesn't look right to me. The 2MB round down > > certainly looks suspicious. > > The PCI mapping is at a fixed, aligned 2MB mapping. If we find any > virtual address within that region already mapped, it is an error. Ah, OK. This wasn't clear looking at the code. > We probably should have had a WARN here. Indeed. > > > > The replacement code should be better. However I'd like you to get an > > ACK from Rob Herring as well for this patch. > > It doesn't appear to me the above case is handled. The virt addr is > checked whether it is within an existing mapping, but not whether the > new mapping would overlap an existing mapping. It would be good to check > for this generically rather than specifically for the PCI i/o mapping. Agreed. However that is checked already in vm_area_add_early(). Therefore the overlap test here is redundant. Nicolas
On 02/05/2013 12:13 PM, Nicolas Pitre wrote: > On Tue, 5 Feb 2013, Rob Herring wrote: > >> On 02/04/2013 10:44 PM, Nicolas Pitre wrote: >>> On Tue, 5 Feb 2013, Joonsoo Kim wrote: >>> >>>> A static mapped area is ARM-specific, so it is better not to use >>>> generic vmalloc data structure, that is, vmlist and vmlist_lock >>>> for managing static mapped area. And it causes some needless overhead and >>>> reducing this overhead is better idea. >>>> >>>> Now, we have newly introduced static_vm infrastructure. >>>> With it, we don't need to iterate all mapped areas. Instead, we just >>>> iterate static mapped areas. It helps to reduce an overhead of finding >>>> matched area. And architecture dependency on vmalloc layer is removed, >>>> so it will help to maintainability for vmalloc layer. >>>> >>>> Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> >> >> [snip] >> >>>> @@ -859,17 +864,12 @@ static void __init pci_reserve_io(void) >>>> { >>>> struct vm_struct *vm; >>>> unsigned long addr; >>>> + struct static_vm *svm; >>>> >>>> - /* we're still single threaded hence no lock needed here */ >>>> - for (vm = vmlist; vm; vm = vm->next) { >>>> - if (!(vm->flags & VM_ARM_STATIC_MAPPING)) >>>> - continue; >>>> - addr = (unsigned long)vm->addr; >>>> - addr &= ~(SZ_2M - 1); >>>> - if (addr == PCI_IO_VIRT_BASE) >>>> - return; >>>> + svm = find_static_vm_vaddr((void *)PCI_IO_VIRT_BASE); >>>> + if (svm) >>>> + return; >>>> >>>> - } >>>> >>>> vm_reserve_area_early(PCI_IO_VIRT_BASE, SZ_2M, pci_reserve_io); >>>> } >>> >>> The replacement code is not equivalent. I can't recall why the original >>> is as it is, but it doesn't look right to me. The 2MB round down >>> certainly looks suspicious. >> >> The PCI mapping is at a fixed, aligned 2MB mapping. If we find any >> virtual address within that region already mapped, it is an error. > > Ah, OK. This wasn't clear looking at the code. > >> We probably should have had a WARN here. > > Indeed. > >>> >>> The replacement code should be better. However I'd like you to get an >>> ACK from Rob Herring as well for this patch. >> >> It doesn't appear to me the above case is handled. The virt addr is >> checked whether it is within an existing mapping, but not whether the >> new mapping would overlap an existing mapping. It would be good to check >> for this generically rather than specifically for the PCI i/o mapping. > > Agreed. However that is checked already in vm_area_add_early(). > Therefore the overlap test here is redundant. Ah, right. In that case: Acked-by: Rob Herring <rob.herring@calxeda.com> Rob
Hello, Nicolas. On Mon, Feb 04, 2013 at 11:44:16PM -0500, Nicolas Pitre wrote: > On Tue, 5 Feb 2013, Joonsoo Kim wrote: > > > A static mapped area is ARM-specific, so it is better not to use > > generic vmalloc data structure, that is, vmlist and vmlist_lock > > for managing static mapped area. And it causes some needless overhead and > > reducing this overhead is better idea. > > > > Now, we have newly introduced static_vm infrastructure. > > With it, we don't need to iterate all mapped areas. Instead, we just > > iterate static mapped areas. It helps to reduce an overhead of finding > > matched area. And architecture dependency on vmalloc layer is removed, > > so it will help to maintainability for vmalloc layer. > > > > Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> > > Some comments below. > > > diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c > > index 904c15e..c7fef4b 100644 > > --- a/arch/arm/mm/ioremap.c > > +++ b/arch/arm/mm/ioremap.c > > @@ -261,13 +261,14 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, > > const struct mem_type *type; > > int err; > > unsigned long addr; > > - struct vm_struct * area; > > + struct vm_struct *area; > > + phys_addr_t paddr = __pfn_to_phys(pfn); > > > > #ifndef CONFIG_ARM_LPAE > > /* > > * High mappings must be supersection aligned > > */ > > - if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK)) > > + if (pfn >= 0x100000 && (paddr & ~SUPERSECTION_MASK)) > > return NULL; > > #endif > > > > @@ -283,24 +284,16 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, > > /* > > * Try to reuse one of the static mapping whenever possible. > > */ > > - read_lock(&vmlist_lock); > > - for (area = vmlist; area; area = area->next) { > > - if (!size || (sizeof(phys_addr_t) == 4 && pfn >= 0x100000)) > > - break; > > - if (!(area->flags & VM_ARM_STATIC_MAPPING)) > > - continue; > > - if ((area->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype)) > > - continue; > > - if (__phys_to_pfn(area->phys_addr) > pfn || > > - __pfn_to_phys(pfn) + size-1 > area->phys_addr + area->size-1) > > - continue; > > - /* we can drop the lock here as we know *area is static */ > > - read_unlock(&vmlist_lock); > > - addr = (unsigned long)area->addr; > > - addr += __pfn_to_phys(pfn) - area->phys_addr; > > - return (void __iomem *) (offset + addr); > > + if (size && !((sizeof(phys_addr_t) == 4 && pfn >= 0x100000))) { > ^ ^ > You have a needless extra set of parents here. Okay. > [...] > > > diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c > > index ce328c7..b2c0356 100644 > > --- a/arch/arm/mm/mmu.c > > +++ b/arch/arm/mm/mmu.c > > @@ -757,21 +757,24 @@ void __init iotable_init(struct map_desc *io_desc, int nr) > > { > > struct map_desc *md; > > struct vm_struct *vm; > > + struct static_vm *svm; > > > > if (!nr) > > return; > > > > - vm = early_alloc_aligned(sizeof(*vm) * nr, __alignof__(*vm)); > > + svm = early_alloc_aligned(sizeof(*svm) * nr, __alignof__(*svm)); > > > > for (md = io_desc; nr; md++, nr--) { > > create_mapping(md); > > + > > + vm = &svm->vm; > > vm->addr = (void *)(md->virtual & PAGE_MASK); > > vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK)); > > vm->phys_addr = __pfn_to_phys(md->pfn); > > vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING; > > vm->flags |= VM_ARM_MTYPE(md->type); > > vm->caller = iotable_init; > > - vm_area_add_early(vm++); > > + add_static_vm_early(svm++); > > } > > } > > > > @@ -779,13 +782,16 @@ void __init vm_reserve_area_early(unsigned long addr, unsigned long size, > > void *caller) > > { > > struct vm_struct *vm; > > + struct static_vm *svm; > > + > > + svm = early_alloc_aligned(sizeof(*svm), __alignof__(*svm)); > > > > - vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm)); > > + vm = &svm->vm; > > vm->addr = (void *)addr; > > vm->size = size; > > vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING; > > vm->caller = caller; > > - vm_area_add_early(vm); > > + add_static_vm_early(svm); > > } > > > > #ifndef CONFIG_ARM_LPAE > > @@ -810,14 +816,13 @@ static void __init pmd_empty_section_gap(unsigned long addr) > > > > static void __init fill_pmd_gaps(void) > > { > > + struct static_vm *svm; > > struct vm_struct *vm; > > unsigned long addr, next = 0; > > pmd_t *pmd; > > > > - /* we're still single threaded hence no lock needed here */ > > - for (vm = vmlist; vm; vm = vm->next) { > > - if (!(vm->flags & (VM_ARM_STATIC_MAPPING | VM_ARM_EMPTY_MAPPING))) > > - continue; > > + list_for_each_entry(svm, &static_vmlist, list) { > > + vm = &svm->vm; > > addr = (unsigned long)vm->addr; > > if (addr < next) > > continue; > > @@ -859,17 +864,12 @@ static void __init pci_reserve_io(void) > > { > > struct vm_struct *vm; > > unsigned long addr; > > + struct static_vm *svm; > > > > - /* we're still single threaded hence no lock needed here */ > > - for (vm = vmlist; vm; vm = vm->next) { > > - if (!(vm->flags & VM_ARM_STATIC_MAPPING)) > > - continue; > > - addr = (unsigned long)vm->addr; > > - addr &= ~(SZ_2M - 1); > > - if (addr == PCI_IO_VIRT_BASE) > > - return; > > + svm = find_static_vm_vaddr((void *)PCI_IO_VIRT_BASE); > > + if (svm) > > + return; > > > > - } > > > > vm_reserve_area_early(PCI_IO_VIRT_BASE, SZ_2M, pci_reserve_io); > > } > > The replacement code is not equivalent. I can't recall why the original > is as it is, but it doesn't look right to me. The 2MB round down > certainly looks suspicious. > > The replacement code should be better. However I'd like you to get an > ACK from Rob Herring as well for this patch. > > Once that is sorted out, you can add > > Reviewed-by: Nicolas Pitre <nico@linaro.org> Okay. I will fix this and re-send it with your "Reviewed-by". Thanks. > > Nicolas > -- > To unsubscribe from this list: send the line "unsubscribe linux-kernel" in > the body of a message to majordomo@vger.kernel.org > More majordomo info at http://vger.kernel.org/majordomo-info.html > Please read the FAQ at http://www.tux.org/lkml/
Hello, Rob. On Tue, Feb 05, 2013 at 01:12:51PM -0600, Rob Herring wrote: > On 02/05/2013 12:13 PM, Nicolas Pitre wrote: > > On Tue, 5 Feb 2013, Rob Herring wrote: > > > >> On 02/04/2013 10:44 PM, Nicolas Pitre wrote: > >>> On Tue, 5 Feb 2013, Joonsoo Kim wrote: > >>> > >>>> A static mapped area is ARM-specific, so it is better not to use > >>>> generic vmalloc data structure, that is, vmlist and vmlist_lock > >>>> for managing static mapped area. And it causes some needless overhead and > >>>> reducing this overhead is better idea. > >>>> > >>>> Now, we have newly introduced static_vm infrastructure. > >>>> With it, we don't need to iterate all mapped areas. Instead, we just > >>>> iterate static mapped areas. It helps to reduce an overhead of finding > >>>> matched area. And architecture dependency on vmalloc layer is removed, > >>>> so it will help to maintainability for vmalloc layer. > >>>> > >>>> Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> > >> > >> [snip] > >> > >>>> @@ -859,17 +864,12 @@ static void __init pci_reserve_io(void) > >>>> { > >>>> struct vm_struct *vm; > >>>> unsigned long addr; > >>>> + struct static_vm *svm; > >>>> > >>>> - /* we're still single threaded hence no lock needed here */ > >>>> - for (vm = vmlist; vm; vm = vm->next) { > >>>> - if (!(vm->flags & VM_ARM_STATIC_MAPPING)) > >>>> - continue; > >>>> - addr = (unsigned long)vm->addr; > >>>> - addr &= ~(SZ_2M - 1); > >>>> - if (addr == PCI_IO_VIRT_BASE) > >>>> - return; > >>>> + svm = find_static_vm_vaddr((void *)PCI_IO_VIRT_BASE); > >>>> + if (svm) > >>>> + return; > >>>> > >>>> - } > >>>> > >>>> vm_reserve_area_early(PCI_IO_VIRT_BASE, SZ_2M, pci_reserve_io); > >>>> } > >>> > >>> The replacement code is not equivalent. I can't recall why the original > >>> is as it is, but it doesn't look right to me. The 2MB round down > >>> certainly looks suspicious. > >> > >> The PCI mapping is at a fixed, aligned 2MB mapping. If we find any > >> virtual address within that region already mapped, it is an error. > > Ah, OK. This wasn't clear looking at the code. > >> We probably should have had a WARN here. > > > > Indeed. > > Okay. I should fix it to find any mapping within PCI reserved region. But, I think that it is not an error. Now, I see your original commit 'c2794437091a4fda72c4a4f3567dd728dcc0c3c9' and find below message. "Platforms which need early i/o mapping (e.g. for vga console) can call pci_map_io_early in their .map_io function." Therfore, for some platform, it is possible that there is a mapping within PCI reserved range. So, I will not add WARN here. I will fix and re-send v6 with your ACK. Thanks for review. > >>> > >>> The replacement code should be better. However I'd like you to get an > >>> ACK from Rob Herring as well for this patch. > >> > >> It doesn't appear to me the above case is handled. The virt addr is > >> checked whether it is within an existing mapping, but not whether the > >> new mapping would overlap an existing mapping. It would be good to check > >> for this generically rather than specifically for the PCI i/o mapping. > > > > Agreed. However that is checked already in vm_area_add_early(). > > Therefore the overlap test here is redundant. > > Ah, right. In that case: > > Acked-by: Rob Herring <rob.herring@calxeda.com> > > Rob > > -- > To unsubscribe from this list: send the line "unsubscribe linux-kernel" in > the body of a message to majordomo@vger.kernel.org > More majordomo info at http://vger.kernel.org/majordomo-info.html > Please read the FAQ at http://www.tux.org/lkml/
On Wed, Feb 06, 2013 at 11:07:07AM +0900, Joonsoo Kim wrote: > Hello, Rob. > > On Tue, Feb 05, 2013 at 01:12:51PM -0600, Rob Herring wrote: > > On 02/05/2013 12:13 PM, Nicolas Pitre wrote: > > > On Tue, 5 Feb 2013, Rob Herring wrote: > > > > > >> On 02/04/2013 10:44 PM, Nicolas Pitre wrote: > > >>> On Tue, 5 Feb 2013, Joonsoo Kim wrote: > > >>> > > >>>> A static mapped area is ARM-specific, so it is better not to use > > >>>> generic vmalloc data structure, that is, vmlist and vmlist_lock > > >>>> for managing static mapped area. And it causes some needless overhead and > > >>>> reducing this overhead is better idea. > > >>>> > > >>>> Now, we have newly introduced static_vm infrastructure. > > >>>> With it, we don't need to iterate all mapped areas. Instead, we just > > >>>> iterate static mapped areas. It helps to reduce an overhead of finding > > >>>> matched area. And architecture dependency on vmalloc layer is removed, > > >>>> so it will help to maintainability for vmalloc layer. > > >>>> > > >>>> Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> > > >> > > >> [snip] > > >> > > >>>> @@ -859,17 +864,12 @@ static void __init pci_reserve_io(void) > > >>>> { > > >>>> struct vm_struct *vm; > > >>>> unsigned long addr; > > >>>> + struct static_vm *svm; > > >>>> > > >>>> - /* we're still single threaded hence no lock needed here */ > > >>>> - for (vm = vmlist; vm; vm = vm->next) { > > >>>> - if (!(vm->flags & VM_ARM_STATIC_MAPPING)) > > >>>> - continue; > > >>>> - addr = (unsigned long)vm->addr; > > >>>> - addr &= ~(SZ_2M - 1); > > >>>> - if (addr == PCI_IO_VIRT_BASE) > > >>>> - return; > > >>>> + svm = find_static_vm_vaddr((void *)PCI_IO_VIRT_BASE); > > >>>> + if (svm) > > >>>> + return; > > >>>> > > >>>> - } > > >>>> > > >>>> vm_reserve_area_early(PCI_IO_VIRT_BASE, SZ_2M, pci_reserve_io); > > >>>> } > > >>> > > >>> The replacement code is not equivalent. I can't recall why the original > > >>> is as it is, but it doesn't look right to me. The 2MB round down > > >>> certainly looks suspicious. > > >> > > >> The PCI mapping is at a fixed, aligned 2MB mapping. If we find any > > >> virtual address within that region already mapped, it is an error. > > > Ah, OK. This wasn't clear looking at the code. > > >> We probably should have had a WARN here. > > > > > > Indeed. > > > > > Okay. > I should fix it to find any mapping within PCI reserved region. Ah... Above comment is my mistake. If there is a region already mapped within PCI reserved region and it is not found by find_static_vm_vaddr(), vm_area_add_early() hit BUG_ON(). So, to leave find_static_vm_vaddr() is safe. > But, I think that it is not an error. > Now, I see your original commit 'c2794437091a4fda72c4a4f3567dd728dcc0c3c9' > and find below message. > > "Platforms which need early i/o mapping (e.g. for vga console) can call > pci_map_io_early in their .map_io function." > > Therfore, for some platform, it is possible that there is a mapping within > PCI reserved range. > > So, I will not add WARN here. > > I will fix and re-send v6 with your ACK. > > Thanks for review. > > > >>> > > >>> The replacement code should be better. However I'd like you to get an > > >>> ACK from Rob Herring as well for this patch. > > >> > > >> It doesn't appear to me the above case is handled. The virt addr is > > >> checked whether it is within an existing mapping, but not whether the > > >> new mapping would overlap an existing mapping. It would be good to check > > >> for this generically rather than specifically for the PCI i/o mapping. > > > > > > Agreed. However that is checked already in vm_area_add_early(). > > > Therefore the overlap test here is redundant. > > > > Ah, right. In that case: > > > > Acked-by: Rob Herring <rob.herring@calxeda.com> > > > > Rob > > > > -- > > To unsubscribe from this list: send the line "unsubscribe linux-kernel" in > > the body of a message to majordomo@vger.kernel.org > > More majordomo info at http://vger.kernel.org/majordomo-info.html > > Please read the FAQ at http://www.tux.org/lkml/ > -- > To unsubscribe from this list: send the line "unsubscribe linux-kernel" in > the body of a message to majordomo@vger.kernel.org > More majordomo info at http://vger.kernel.org/majordomo-info.html > Please read the FAQ at http://www.tux.org/lkml/
On Wed, 6 Feb 2013, Joonsoo Kim wrote: > On Wed, Feb 06, 2013 at 11:07:07AM +0900, Joonsoo Kim wrote: > > Hello, Rob. > > > > On Tue, Feb 05, 2013 at 01:12:51PM -0600, Rob Herring wrote: > > > On 02/05/2013 12:13 PM, Nicolas Pitre wrote: > > > > On Tue, 5 Feb 2013, Rob Herring wrote: > > > > > > > >> On 02/04/2013 10:44 PM, Nicolas Pitre wrote: > > > >>> On Tue, 5 Feb 2013, Joonsoo Kim wrote: > > > >>> > > > >>>> A static mapped area is ARM-specific, so it is better not to use > > > >>>> generic vmalloc data structure, that is, vmlist and vmlist_lock > > > >>>> for managing static mapped area. And it causes some needless overhead and > > > >>>> reducing this overhead is better idea. > > > >>>> > > > >>>> Now, we have newly introduced static_vm infrastructure. > > > >>>> With it, we don't need to iterate all mapped areas. Instead, we just > > > >>>> iterate static mapped areas. It helps to reduce an overhead of finding > > > >>>> matched area. And architecture dependency on vmalloc layer is removed, > > > >>>> so it will help to maintainability for vmalloc layer. > > > >>>> > > > >>>> Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> > > > >> > > > >> [snip] > > > >> > > > >>>> @@ -859,17 +864,12 @@ static void __init pci_reserve_io(void) > > > >>>> { > > > >>>> struct vm_struct *vm; > > > >>>> unsigned long addr; > > > >>>> + struct static_vm *svm; > > > >>>> > > > >>>> - /* we're still single threaded hence no lock needed here */ > > > >>>> - for (vm = vmlist; vm; vm = vm->next) { > > > >>>> - if (!(vm->flags & VM_ARM_STATIC_MAPPING)) > > > >>>> - continue; > > > >>>> - addr = (unsigned long)vm->addr; > > > >>>> - addr &= ~(SZ_2M - 1); > > > >>>> - if (addr == PCI_IO_VIRT_BASE) > > > >>>> - return; > > > >>>> + svm = find_static_vm_vaddr((void *)PCI_IO_VIRT_BASE); > > > >>>> + if (svm) > > > >>>> + return; > > > >>>> > > > >>>> - } > > > >>>> > > > >>>> vm_reserve_area_early(PCI_IO_VIRT_BASE, SZ_2M, pci_reserve_io); > > > >>>> } > > > >>> > > > >>> The replacement code is not equivalent. I can't recall why the original > > > >>> is as it is, but it doesn't look right to me. The 2MB round down > > > >>> certainly looks suspicious. > > > >> > > > >> The PCI mapping is at a fixed, aligned 2MB mapping. If we find any > > > >> virtual address within that region already mapped, it is an error. > > > > Ah, OK. This wasn't clear looking at the code. > > > >> We probably should have had a WARN here. > > > > > > > > Indeed. > > > > > > > > Okay. > > I should fix it to find any mapping within PCI reserved region. > > Ah... > Above comment is my mistake. > > If there is a region already mapped within PCI reserved region and > it is not found by find_static_vm_vaddr(), vm_area_add_early() hit BUG_ON(). > So, to leave find_static_vm_vaddr() is safe. Yes. In conclusion, your patch was fine. You may remove the redundant parents and send the whole set to Russell. Nicolas
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index 904c15e..c7fef4b 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c @@ -261,13 +261,14 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, const struct mem_type *type; int err; unsigned long addr; - struct vm_struct * area; + struct vm_struct *area; + phys_addr_t paddr = __pfn_to_phys(pfn); #ifndef CONFIG_ARM_LPAE /* * High mappings must be supersection aligned */ - if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK)) + if (pfn >= 0x100000 && (paddr & ~SUPERSECTION_MASK)) return NULL; #endif @@ -283,24 +284,16 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, /* * Try to reuse one of the static mapping whenever possible. */ - read_lock(&vmlist_lock); - for (area = vmlist; area; area = area->next) { - if (!size || (sizeof(phys_addr_t) == 4 && pfn >= 0x100000)) - break; - if (!(area->flags & VM_ARM_STATIC_MAPPING)) - continue; - if ((area->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype)) - continue; - if (__phys_to_pfn(area->phys_addr) > pfn || - __pfn_to_phys(pfn) + size-1 > area->phys_addr + area->size-1) - continue; - /* we can drop the lock here as we know *area is static */ - read_unlock(&vmlist_lock); - addr = (unsigned long)area->addr; - addr += __pfn_to_phys(pfn) - area->phys_addr; - return (void __iomem *) (offset + addr); + if (size && !((sizeof(phys_addr_t) == 4 && pfn >= 0x100000))) { + struct static_vm *svm; + + svm = find_static_vm_paddr(paddr, size, mtype); + if (svm) { + addr = (unsigned long)svm->vm.addr; + addr += paddr - svm->vm.phys_addr; + return (void __iomem *) (offset + addr); + } } - read_unlock(&vmlist_lock); /* * Don't allow RAM to be mapped - this causes problems with ARMv6+ @@ -312,21 +305,21 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, if (!area) return NULL; addr = (unsigned long)area->addr; - area->phys_addr = __pfn_to_phys(pfn); + area->phys_addr = paddr; #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) if (DOMAIN_IO == 0 && (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) || cpu_is_xsc3()) && pfn >= 0x100000 && - !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) { + !((paddr | size | addr) & ~SUPERSECTION_MASK)) { area->flags |= VM_ARM_SECTION_MAPPING; err = remap_area_supersections(addr, pfn, size, type); - } else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) { + } else if (!((paddr | size | addr) & ~PMD_MASK)) { area->flags |= VM_ARM_SECTION_MAPPING; err = remap_area_sections(addr, pfn, size, type); } else #endif - err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn), + err = ioremap_page_range(addr, addr + size, paddr, __pgprot(type->prot_pte)); if (err) { @@ -410,34 +403,28 @@ __arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached) void __iounmap(volatile void __iomem *io_addr) { void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); - struct vm_struct *vm; + struct static_vm *svm; + + /* If this is a static mapping, we must leave it alone */ + svm = find_static_vm_vaddr(addr); + if (svm) + return; - read_lock(&vmlist_lock); - for (vm = vmlist; vm; vm = vm->next) { - if (vm->addr > addr) - break; - if (!(vm->flags & VM_IOREMAP)) - continue; - /* If this is a static mapping we must leave it alone */ - if ((vm->flags & VM_ARM_STATIC_MAPPING) && - (vm->addr <= addr) && (vm->addr + vm->size > addr)) { - read_unlock(&vmlist_lock); - return; - } #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) + { + struct vm_struct *vm; + + vm = find_vm_area(addr); + /* * If this is a section based mapping we need to handle it * specially as the VM subsystem does not know how to handle * such a beast. */ - if ((vm->addr == addr) && - (vm->flags & VM_ARM_SECTION_MAPPING)) { + if (vm && (vm->flags & VM_ARM_SECTION_MAPPING)) unmap_area_sections((unsigned long)vm->addr, vm->size); - break; - } -#endif } - read_unlock(&vmlist_lock); +#endif vunmap(addr); } diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index ce328c7..b2c0356 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -757,21 +757,24 @@ void __init iotable_init(struct map_desc *io_desc, int nr) { struct map_desc *md; struct vm_struct *vm; + struct static_vm *svm; if (!nr) return; - vm = early_alloc_aligned(sizeof(*vm) * nr, __alignof__(*vm)); + svm = early_alloc_aligned(sizeof(*svm) * nr, __alignof__(*svm)); for (md = io_desc; nr; md++, nr--) { create_mapping(md); + + vm = &svm->vm; vm->addr = (void *)(md->virtual & PAGE_MASK); vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK)); vm->phys_addr = __pfn_to_phys(md->pfn); vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING; vm->flags |= VM_ARM_MTYPE(md->type); vm->caller = iotable_init; - vm_area_add_early(vm++); + add_static_vm_early(svm++); } } @@ -779,13 +782,16 @@ void __init vm_reserve_area_early(unsigned long addr, unsigned long size, void *caller) { struct vm_struct *vm; + struct static_vm *svm; + + svm = early_alloc_aligned(sizeof(*svm), __alignof__(*svm)); - vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm)); + vm = &svm->vm; vm->addr = (void *)addr; vm->size = size; vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING; vm->caller = caller; - vm_area_add_early(vm); + add_static_vm_early(svm); } #ifndef CONFIG_ARM_LPAE @@ -810,14 +816,13 @@ static void __init pmd_empty_section_gap(unsigned long addr) static void __init fill_pmd_gaps(void) { + struct static_vm *svm; struct vm_struct *vm; unsigned long addr, next = 0; pmd_t *pmd; - /* we're still single threaded hence no lock needed here */ - for (vm = vmlist; vm; vm = vm->next) { - if (!(vm->flags & (VM_ARM_STATIC_MAPPING | VM_ARM_EMPTY_MAPPING))) - continue; + list_for_each_entry(svm, &static_vmlist, list) { + vm = &svm->vm; addr = (unsigned long)vm->addr; if (addr < next) continue; @@ -859,17 +864,12 @@ static void __init pci_reserve_io(void) { struct vm_struct *vm; unsigned long addr; + struct static_vm *svm; - /* we're still single threaded hence no lock needed here */ - for (vm = vmlist; vm; vm = vm->next) { - if (!(vm->flags & VM_ARM_STATIC_MAPPING)) - continue; - addr = (unsigned long)vm->addr; - addr &= ~(SZ_2M - 1); - if (addr == PCI_IO_VIRT_BASE) - return; + svm = find_static_vm_vaddr((void *)PCI_IO_VIRT_BASE); + if (svm) + return; - } vm_reserve_area_early(PCI_IO_VIRT_BASE, SZ_2M, pci_reserve_io); } #else
A static mapped area is ARM-specific, so it is better not to use generic vmalloc data structure, that is, vmlist and vmlist_lock for managing static mapped area. And it causes some needless overhead and reducing this overhead is better idea. Now, we have newly introduced static_vm infrastructure. With it, we don't need to iterate all mapped areas. Instead, we just iterate static mapped areas. It helps to reduce an overhead of finding matched area. And architecture dependency on vmalloc layer is removed, so it will help to maintainability for vmalloc layer. Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>