Message ID | 20230620131356.25440-13-bhe@redhat.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | mm: ioremap: Convert architectures to take GENERIC_IOREMAP way | expand |
Hi Baoquan! On Tue, 2023-06-20 at 21:13 +0800, Baoquan He wrote: > By taking GENERIC_IOREMAP method, the generic generic_ioremap_prot(), > generic_iounmap(), and their generic wrapper ioremap_prot(), ioremap() > and iounmap() are all visible and available to arch. Arch needs to > provide wrapper functions to override the generic versions if there's > arch specific handling in its ioremap_prot(), ioremap() or iounmap(). > This change will simplify implementation by removing duplicated codes ^^^^^ Nit-pick: It should be "code", not "codes". I'll review and test the rest tomorrow. There are quite some changes. Adrian > with generic_ioremap_prot() and generic_iounmap(), and has the equivalent > functioality as before. > > Here, add wrapper functions ioremap_prot() and iounmap() for SuperH's > special operation when ioremap() and iounmap(). > > Signed-off-by: Baoquan He <bhe@redhat.com> > Cc: Yoshinori Sato <ysato@users.sourceforge.jp> > Cc: Rich Felker <dalias@libc.org> > Cc: linux-sh@vger.kernel.org > --- > arch/sh/Kconfig | 1 + > arch/sh/include/asm/io.h | 40 +++++-------------------- > arch/sh/mm/ioremap.c | 65 +++++++--------------------------------- > 3 files changed, 20 insertions(+), 86 deletions(-) > > diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig > index 9652d367fc37..f326985e46e0 100644 > --- a/arch/sh/Kconfig > +++ b/arch/sh/Kconfig > @@ -28,6 +28,7 @@ config SUPERH > select GENERIC_SMP_IDLE_THREAD > select GUP_GET_PXX_LOW_HIGH if X2TLB > select HAS_IOPORT if HAS_IOPORT_MAP > + select GENERIC_IOREMAP if MMU > select HAVE_ARCH_AUDITSYSCALL > select HAVE_ARCH_KGDB > select HAVE_ARCH_SECCOMP_FILTER > diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h > index 270e7952950c..b3a26b405c8d 100644 > --- a/arch/sh/include/asm/io.h > +++ b/arch/sh/include/asm/io.h > @@ -266,40 +266,16 @@ unsigned long long poke_real_address_q(unsigned long long addr, > #endif > > #ifdef CONFIG_MMU > -void iounmap(void __iomem *addr); > -void __iomem *__ioremap_caller(phys_addr_t offset, unsigned long size, > - pgprot_t prot, void *caller); > - > -static inline void __iomem *ioremap(phys_addr_t offset, unsigned long size) > -{ > - return __ioremap_caller(offset, size, PAGE_KERNEL_NOCACHE, > - __builtin_return_address(0)); > -} > - > -static inline void __iomem * > -ioremap_cache(phys_addr_t offset, unsigned long size) > -{ > - return __ioremap_caller(offset, size, PAGE_KERNEL, > - __builtin_return_address(0)); > -} > -#define ioremap_cache ioremap_cache > - > -#ifdef CONFIG_HAVE_IOREMAP_PROT > -static inline void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size, > - unsigned long flags) > -{ > - return __ioremap_caller(offset, size, __pgprot(flags), > - __builtin_return_address(0)); > -} > -#endif /* CONFIG_HAVE_IOREMAP_PROT */ > +/* > + * I/O memory mapping functions. > + */ > +#define ioremap_prot ioremap_prot > +#define iounmap iounmap > > -#else /* CONFIG_MMU */ > -static inline void __iomem *ioremap(phys_addr_t offset, size_t size) > -{ > - return (void __iomem *)(unsigned long)offset; > -} > +#define _PAGE_IOREMAP pgprot_val(PAGE_KERNEL_NOCACHE) > > -static inline void iounmap(volatile void __iomem *addr) { } > +#define ioremap_cache(addr, size) \ > + ioremap_prot((addr), (size), pgprot_val(PAGE_KERNEL)) > #endif /* CONFIG_MMU */ > > #define ioremap_uc ioremap > diff --git a/arch/sh/mm/ioremap.c b/arch/sh/mm/ioremap.c > index 21342581144d..c33b3daa4ad1 100644 > --- a/arch/sh/mm/ioremap.c > +++ b/arch/sh/mm/ioremap.c > @@ -72,22 +72,11 @@ __ioremap_29bit(phys_addr_t offset, unsigned long size, pgprot_t prot) > #define __ioremap_29bit(offset, size, prot) NULL > #endif /* CONFIG_29BIT */ > > -/* > - * Remap an arbitrary physical address space into the kernel virtual > - * address space. Needed when the kernel wants to access high addresses > - * directly. > - * > - * NOTE! We need to allow non-page-aligned mappings too: we will obviously > - * have to convert them into an offset in a page-aligned mapping, but the > - * caller shouldn't need to know that small detail. > - */ > -void __iomem * __ref > -__ioremap_caller(phys_addr_t phys_addr, unsigned long size, > - pgprot_t pgprot, void *caller) > +void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size, > + unsigned long prot) > { > - struct vm_struct *area; > - unsigned long offset, last_addr, addr, orig_addr; > void __iomem *mapped; > + pgprot_t pgprot = __pgprot(prot); > > mapped = __ioremap_trapped(phys_addr, size); > if (mapped) > @@ -97,11 +86,6 @@ __ioremap_caller(phys_addr_t phys_addr, unsigned long size, > if (mapped) > return mapped; > > - /* Don't allow wraparound or zero size */ > - last_addr = phys_addr + size - 1; > - if (!size || last_addr < phys_addr) > - return NULL; > - > /* > * If we can't yet use the regular approach, go the fixmap route. > */ > @@ -112,34 +96,14 @@ __ioremap_caller(phys_addr_t phys_addr, unsigned long size, > * First try to remap through the PMB. > * PMB entries are all pre-faulted. > */ > - mapped = pmb_remap_caller(phys_addr, size, pgprot, caller); > + mapped = pmb_remap_caller(phys_addr, size, pgprot, > + __builtin_return_address(0)); > if (mapped && !IS_ERR(mapped)) > return mapped; > > - /* > - * Mappings have to be page-aligned > - */ > - offset = phys_addr & ~PAGE_MASK; > - phys_addr &= PAGE_MASK; > - size = PAGE_ALIGN(last_addr+1) - phys_addr; > - > - /* > - * Ok, go for it.. > - */ > - area = get_vm_area_caller(size, VM_IOREMAP, caller); > - if (!area) > - return NULL; > - area->phys_addr = phys_addr; > - orig_addr = addr = (unsigned long)area->addr; > - > - if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) { > - vunmap((void *)orig_addr); > - return NULL; > - } > - > - return (void __iomem *)(offset + (char *)orig_addr); > + return generic_ioremap_prot(phys_addr, size, pgprot); > } > -EXPORT_SYMBOL(__ioremap_caller); > +EXPORT_SYMBOL(ioremap_prot); > > /* > * Simple checks for non-translatable mappings. > @@ -158,10 +122,9 @@ static inline int iomapping_nontranslatable(unsigned long offset) > return 0; > } > > -void iounmap(void __iomem *addr) > +void iounmap(volatile void __iomem *addr) > { > unsigned long vaddr = (unsigned long __force)addr; > - struct vm_struct *p; > > /* > * Nothing to do if there is no translatable mapping. > @@ -172,21 +135,15 @@ void iounmap(void __iomem *addr) > /* > * There's no VMA if it's from an early fixed mapping. > */ > - if (iounmap_fixed(addr) == 0) > + if (iounmap_fixed((void __iomem *)addr) == 0) > return; > > /* > * If the PMB handled it, there's nothing else to do. > */ > - if (pmb_unmap(addr) == 0) > + if (pmb_unmap((void __iomem *)addr) == 0) > return; > > - p = remove_vm_area((void *)(vaddr & PAGE_MASK)); > - if (!p) { > - printk(KERN_ERR "%s: bad address %p\n", __func__, addr); > - return; > - } > - > - kfree(p); > + generic_iounmap(addr); > } > EXPORT_SYMBOL(iounmap);
On 06/25/23 at 11:12pm, John Paul Adrian Glaubitz wrote: > Hi Baoquan! > > On Tue, 2023-06-20 at 21:13 +0800, Baoquan He wrote: > > By taking GENERIC_IOREMAP method, the generic generic_ioremap_prot(), > > generic_iounmap(), and their generic wrapper ioremap_prot(), ioremap() > > and iounmap() are all visible and available to arch. Arch needs to > > provide wrapper functions to override the generic versions if there's > > arch specific handling in its ioremap_prot(), ioremap() or iounmap(). > > This change will simplify implementation by removing duplicated codes > ^^^^^ > Nit-pick: It should be "code", not "codes". Will change. > > I'll review and test the rest tomorrow. There are quite some changes. That would be great, thanks a lot for your help. > > > with generic_ioremap_prot() and generic_iounmap(), and has the equivalent > > functioality as before. > > > > Here, add wrapper functions ioremap_prot() and iounmap() for SuperH's > > special operation when ioremap() and iounmap(). > > > > Signed-off-by: Baoquan He <bhe@redhat.com> > > Cc: Yoshinori Sato <ysato@users.sourceforge.jp> > > Cc: Rich Felker <dalias@libc.org> > > Cc: linux-sh@vger.kernel.org > > --- > > arch/sh/Kconfig | 1 + > > arch/sh/include/asm/io.h | 40 +++++-------------------- > > arch/sh/mm/ioremap.c | 65 +++++++--------------------------------- > > 3 files changed, 20 insertions(+), 86 deletions(-) > > > > diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig > > index 9652d367fc37..f326985e46e0 100644 > > --- a/arch/sh/Kconfig > > +++ b/arch/sh/Kconfig > > @@ -28,6 +28,7 @@ config SUPERH > > select GENERIC_SMP_IDLE_THREAD > > select GUP_GET_PXX_LOW_HIGH if X2TLB > > select HAS_IOPORT if HAS_IOPORT_MAP > > + select GENERIC_IOREMAP if MMU > > select HAVE_ARCH_AUDITSYSCALL > > select HAVE_ARCH_KGDB > > select HAVE_ARCH_SECCOMP_FILTER > > diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h > > index 270e7952950c..b3a26b405c8d 100644 > > --- a/arch/sh/include/asm/io.h > > +++ b/arch/sh/include/asm/io.h > > @@ -266,40 +266,16 @@ unsigned long long poke_real_address_q(unsigned long long addr, > > #endif > > > > #ifdef CONFIG_MMU > > -void iounmap(void __iomem *addr); > > -void __iomem *__ioremap_caller(phys_addr_t offset, unsigned long size, > > - pgprot_t prot, void *caller); > > - > > -static inline void __iomem *ioremap(phys_addr_t offset, unsigned long size) > > -{ > > - return __ioremap_caller(offset, size, PAGE_KERNEL_NOCACHE, > > - __builtin_return_address(0)); > > -} > > - > > -static inline void __iomem * > > -ioremap_cache(phys_addr_t offset, unsigned long size) > > -{ > > - return __ioremap_caller(offset, size, PAGE_KERNEL, > > - __builtin_return_address(0)); > > -} > > -#define ioremap_cache ioremap_cache > > - > > -#ifdef CONFIG_HAVE_IOREMAP_PROT > > -static inline void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size, > > - unsigned long flags) > > -{ > > - return __ioremap_caller(offset, size, __pgprot(flags), > > - __builtin_return_address(0)); > > -} > > -#endif /* CONFIG_HAVE_IOREMAP_PROT */ > > +/* > > + * I/O memory mapping functions. > > + */ > > +#define ioremap_prot ioremap_prot > > +#define iounmap iounmap > > > > -#else /* CONFIG_MMU */ > > -static inline void __iomem *ioremap(phys_addr_t offset, size_t size) > > -{ > > - return (void __iomem *)(unsigned long)offset; > > -} > > +#define _PAGE_IOREMAP pgprot_val(PAGE_KERNEL_NOCACHE) > > > > -static inline void iounmap(volatile void __iomem *addr) { } > > +#define ioremap_cache(addr, size) \ > > + ioremap_prot((addr), (size), pgprot_val(PAGE_KERNEL)) > > #endif /* CONFIG_MMU */ > > > > #define ioremap_uc ioremap > > diff --git a/arch/sh/mm/ioremap.c b/arch/sh/mm/ioremap.c > > index 21342581144d..c33b3daa4ad1 100644 > > --- a/arch/sh/mm/ioremap.c > > +++ b/arch/sh/mm/ioremap.c > > @@ -72,22 +72,11 @@ __ioremap_29bit(phys_addr_t offset, unsigned long size, pgprot_t prot) > > #define __ioremap_29bit(offset, size, prot) NULL > > #endif /* CONFIG_29BIT */ > > > > -/* > > - * Remap an arbitrary physical address space into the kernel virtual > > - * address space. Needed when the kernel wants to access high addresses > > - * directly. > > - * > > - * NOTE! We need to allow non-page-aligned mappings too: we will obviously > > - * have to convert them into an offset in a page-aligned mapping, but the > > - * caller shouldn't need to know that small detail. > > - */ > > -void __iomem * __ref > > -__ioremap_caller(phys_addr_t phys_addr, unsigned long size, > > - pgprot_t pgprot, void *caller) > > +void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size, > > + unsigned long prot) > > { > > - struct vm_struct *area; > > - unsigned long offset, last_addr, addr, orig_addr; > > void __iomem *mapped; > > + pgprot_t pgprot = __pgprot(prot); > > > > mapped = __ioremap_trapped(phys_addr, size); > > if (mapped) > > @@ -97,11 +86,6 @@ __ioremap_caller(phys_addr_t phys_addr, unsigned long size, > > if (mapped) > > return mapped; > > > > - /* Don't allow wraparound or zero size */ > > - last_addr = phys_addr + size - 1; > > - if (!size || last_addr < phys_addr) > > - return NULL; > > - > > /* > > * If we can't yet use the regular approach, go the fixmap route. > > */ > > @@ -112,34 +96,14 @@ __ioremap_caller(phys_addr_t phys_addr, unsigned long size, > > * First try to remap through the PMB. > > * PMB entries are all pre-faulted. > > */ > > - mapped = pmb_remap_caller(phys_addr, size, pgprot, caller); > > + mapped = pmb_remap_caller(phys_addr, size, pgprot, > > + __builtin_return_address(0)); > > if (mapped && !IS_ERR(mapped)) > > return mapped; > > > > - /* > > - * Mappings have to be page-aligned > > - */ > > - offset = phys_addr & ~PAGE_MASK; > > - phys_addr &= PAGE_MASK; > > - size = PAGE_ALIGN(last_addr+1) - phys_addr; > > - > > - /* > > - * Ok, go for it.. > > - */ > > - area = get_vm_area_caller(size, VM_IOREMAP, caller); > > - if (!area) > > - return NULL; > > - area->phys_addr = phys_addr; > > - orig_addr = addr = (unsigned long)area->addr; > > - > > - if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) { > > - vunmap((void *)orig_addr); > > - return NULL; > > - } > > - > > - return (void __iomem *)(offset + (char *)orig_addr); > > + return generic_ioremap_prot(phys_addr, size, pgprot); > > } > > -EXPORT_SYMBOL(__ioremap_caller); > > +EXPORT_SYMBOL(ioremap_prot); > > > > /* > > * Simple checks for non-translatable mappings. > > @@ -158,10 +122,9 @@ static inline int iomapping_nontranslatable(unsigned long offset) > > return 0; > > } > > > > -void iounmap(void __iomem *addr) > > +void iounmap(volatile void __iomem *addr) > > { > > unsigned long vaddr = (unsigned long __force)addr; > > - struct vm_struct *p; > > > > /* > > * Nothing to do if there is no translatable mapping. > > @@ -172,21 +135,15 @@ void iounmap(void __iomem *addr) > > /* > > * There's no VMA if it's from an early fixed mapping. > > */ > > - if (iounmap_fixed(addr) == 0) > > + if (iounmap_fixed((void __iomem *)addr) == 0) > > return; > > > > /* > > * If the PMB handled it, there's nothing else to do. > > */ > > - if (pmb_unmap(addr) == 0) > > + if (pmb_unmap((void __iomem *)addr) == 0) > > return; > > > > - p = remove_vm_area((void *)(vaddr & PAGE_MASK)); > > - if (!p) { > > - printk(KERN_ERR "%s: bad address %p\n", __func__, addr); > > - return; > > - } > > - > > - kfree(p); > > + generic_iounmap(addr); > > } > > EXPORT_SYMBOL(iounmap); > > -- > .''`. John Paul Adrian Glaubitz > : :' : Debian Developer > `. `' Physicist > `- GPG: 62FF 8A75 84E0 2956 9546 0006 7426 3B37 F5B5 F913 >
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 9652d367fc37..f326985e46e0 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -28,6 +28,7 @@ config SUPERH select GENERIC_SMP_IDLE_THREAD select GUP_GET_PXX_LOW_HIGH if X2TLB select HAS_IOPORT if HAS_IOPORT_MAP + select GENERIC_IOREMAP if MMU select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_KGDB select HAVE_ARCH_SECCOMP_FILTER diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h index 270e7952950c..b3a26b405c8d 100644 --- a/arch/sh/include/asm/io.h +++ b/arch/sh/include/asm/io.h @@ -266,40 +266,16 @@ unsigned long long poke_real_address_q(unsigned long long addr, #endif #ifdef CONFIG_MMU -void iounmap(void __iomem *addr); -void __iomem *__ioremap_caller(phys_addr_t offset, unsigned long size, - pgprot_t prot, void *caller); - -static inline void __iomem *ioremap(phys_addr_t offset, unsigned long size) -{ - return __ioremap_caller(offset, size, PAGE_KERNEL_NOCACHE, - __builtin_return_address(0)); -} - -static inline void __iomem * -ioremap_cache(phys_addr_t offset, unsigned long size) -{ - return __ioremap_caller(offset, size, PAGE_KERNEL, - __builtin_return_address(0)); -} -#define ioremap_cache ioremap_cache - -#ifdef CONFIG_HAVE_IOREMAP_PROT -static inline void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size, - unsigned long flags) -{ - return __ioremap_caller(offset, size, __pgprot(flags), - __builtin_return_address(0)); -} -#endif /* CONFIG_HAVE_IOREMAP_PROT */ +/* + * I/O memory mapping functions. + */ +#define ioremap_prot ioremap_prot +#define iounmap iounmap -#else /* CONFIG_MMU */ -static inline void __iomem *ioremap(phys_addr_t offset, size_t size) -{ - return (void __iomem *)(unsigned long)offset; -} +#define _PAGE_IOREMAP pgprot_val(PAGE_KERNEL_NOCACHE) -static inline void iounmap(volatile void __iomem *addr) { } +#define ioremap_cache(addr, size) \ + ioremap_prot((addr), (size), pgprot_val(PAGE_KERNEL)) #endif /* CONFIG_MMU */ #define ioremap_uc ioremap diff --git a/arch/sh/mm/ioremap.c b/arch/sh/mm/ioremap.c index 21342581144d..c33b3daa4ad1 100644 --- a/arch/sh/mm/ioremap.c +++ b/arch/sh/mm/ioremap.c @@ -72,22 +72,11 @@ __ioremap_29bit(phys_addr_t offset, unsigned long size, pgprot_t prot) #define __ioremap_29bit(offset, size, prot) NULL #endif /* CONFIG_29BIT */ -/* - * Remap an arbitrary physical address space into the kernel virtual - * address space. Needed when the kernel wants to access high addresses - * directly. - * - * NOTE! We need to allow non-page-aligned mappings too: we will obviously - * have to convert them into an offset in a page-aligned mapping, but the - * caller shouldn't need to know that small detail. - */ -void __iomem * __ref -__ioremap_caller(phys_addr_t phys_addr, unsigned long size, - pgprot_t pgprot, void *caller) +void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size, + unsigned long prot) { - struct vm_struct *area; - unsigned long offset, last_addr, addr, orig_addr; void __iomem *mapped; + pgprot_t pgprot = __pgprot(prot); mapped = __ioremap_trapped(phys_addr, size); if (mapped) @@ -97,11 +86,6 @@ __ioremap_caller(phys_addr_t phys_addr, unsigned long size, if (mapped) return mapped; - /* Don't allow wraparound or zero size */ - last_addr = phys_addr + size - 1; - if (!size || last_addr < phys_addr) - return NULL; - /* * If we can't yet use the regular approach, go the fixmap route. */ @@ -112,34 +96,14 @@ __ioremap_caller(phys_addr_t phys_addr, unsigned long size, * First try to remap through the PMB. * PMB entries are all pre-faulted. */ - mapped = pmb_remap_caller(phys_addr, size, pgprot, caller); + mapped = pmb_remap_caller(phys_addr, size, pgprot, + __builtin_return_address(0)); if (mapped && !IS_ERR(mapped)) return mapped; - /* - * Mappings have to be page-aligned - */ - offset = phys_addr & ~PAGE_MASK; - phys_addr &= PAGE_MASK; - size = PAGE_ALIGN(last_addr+1) - phys_addr; - - /* - * Ok, go for it.. - */ - area = get_vm_area_caller(size, VM_IOREMAP, caller); - if (!area) - return NULL; - area->phys_addr = phys_addr; - orig_addr = addr = (unsigned long)area->addr; - - if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) { - vunmap((void *)orig_addr); - return NULL; - } - - return (void __iomem *)(offset + (char *)orig_addr); + return generic_ioremap_prot(phys_addr, size, pgprot); } -EXPORT_SYMBOL(__ioremap_caller); +EXPORT_SYMBOL(ioremap_prot); /* * Simple checks for non-translatable mappings. @@ -158,10 +122,9 @@ static inline int iomapping_nontranslatable(unsigned long offset) return 0; } -void iounmap(void __iomem *addr) +void iounmap(volatile void __iomem *addr) { unsigned long vaddr = (unsigned long __force)addr; - struct vm_struct *p; /* * Nothing to do if there is no translatable mapping. @@ -172,21 +135,15 @@ void iounmap(void __iomem *addr) /* * There's no VMA if it's from an early fixed mapping. */ - if (iounmap_fixed(addr) == 0) + if (iounmap_fixed((void __iomem *)addr) == 0) return; /* * If the PMB handled it, there's nothing else to do. */ - if (pmb_unmap(addr) == 0) + if (pmb_unmap((void __iomem *)addr) == 0) return; - p = remove_vm_area((void *)(vaddr & PAGE_MASK)); - if (!p) { - printk(KERN_ERR "%s: bad address %p\n", __func__, addr); - return; - } - - kfree(p); + generic_iounmap(addr); } EXPORT_SYMBOL(iounmap);
By taking GENERIC_IOREMAP method, the generic generic_ioremap_prot(), generic_iounmap(), and their generic wrapper ioremap_prot(), ioremap() and iounmap() are all visible and available to arch. Arch needs to provide wrapper functions to override the generic versions if there's arch specific handling in its ioremap_prot(), ioremap() or iounmap(). This change will simplify implementation by removing duplicated codes with generic_ioremap_prot() and generic_iounmap(), and has the equivalent functioality as before. Here, add wrapper functions ioremap_prot() and iounmap() for SuperH's special operation when ioremap() and iounmap(). Signed-off-by: Baoquan He <bhe@redhat.com> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Cc: Rich Felker <dalias@libc.org> Cc: linux-sh@vger.kernel.org --- arch/sh/Kconfig | 1 + arch/sh/include/asm/io.h | 40 +++++-------------------- arch/sh/mm/ioremap.c | 65 +++++++--------------------------------- 3 files changed, 20 insertions(+), 86 deletions(-)