Message ID | 20230819031837.3160096-1-willy@infradead.org (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | mm: Remove checks for pte_index | expand |
On Sat, Aug 19, 2023 at 04:18:37AM +0100, Matthew Wilcox (Oracle) wrote: > Since pte_index is always defined, we don't need to check whether it's > defined or not. Delete the slow version that doesn't depend on it and > remove the #define since nobody needs to test for it. > > Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> > Cc: Mike Rapoport <rppt@linux.ibm.com> > Cc: Christian Dietrich <stettberger@dokucode.de> Reviewed-by: Mike Rapoport (IBM) <rppt@kernel.org> > --- > include/linux/pgtable.h | 1 - > mm/memory.c | 17 +---------------- > 2 files changed, 1 insertion(+), 17 deletions(-) > > diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h > index cb5c1fad1078..1fba072b3dac 100644 > --- a/include/linux/pgtable.h > +++ b/include/linux/pgtable.h > @@ -66,7 +66,6 @@ static inline unsigned long pte_index(unsigned long address) > { > return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); > } > -#define pte_index pte_index > > #ifndef pmd_index > static inline unsigned long pmd_index(unsigned long address) > diff --git a/mm/memory.c b/mm/memory.c > index 2947fbc558f6..b7ce04cb058d 100644 > --- a/mm/memory.c > +++ b/mm/memory.c > @@ -1870,7 +1870,6 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr, > return retval; > } > > -#ifdef pte_index > static int insert_page_in_batch_locked(struct vm_area_struct *vma, pte_t *pte, > unsigned long addr, struct page *page, pgprot_t prot) > { > @@ -1885,7 +1884,7 @@ static int insert_page_in_batch_locked(struct vm_area_struct *vma, pte_t *pte, > } > > /* insert_pages() amortizes the cost of spinlock operations > - * when inserting pages in a loop. Arch *must* define pte_index. > + * when inserting pages in a loop. > */ > static int insert_pages(struct vm_area_struct *vma, unsigned long addr, > struct page **pages, unsigned long *num, pgprot_t prot) > @@ -1944,7 +1943,6 @@ static int insert_pages(struct vm_area_struct *vma, unsigned long addr, > *num = remaining_pages_total; > return ret; > } > -#endif /* ifdef pte_index */ > > /** > * vm_insert_pages - insert multiple pages into user vma, batching the pmd lock. > @@ -1964,7 +1962,6 @@ static int insert_pages(struct vm_area_struct *vma, unsigned long addr, > int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr, > struct page **pages, unsigned long *num) > { > -#ifdef pte_index > const unsigned long end_addr = addr + (*num * PAGE_SIZE) - 1; > > if (addr < vma->vm_start || end_addr >= vma->vm_end) > @@ -1976,18 +1973,6 @@ int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr, > } > /* Defer page refcount checking till we're about to map that page. */ > return insert_pages(vma, addr, pages, num, vma->vm_page_prot); > -#else > - unsigned long idx = 0, pgcount = *num; > - int err = -EINVAL; > - > - for (; idx < pgcount; ++idx) { > - err = vm_insert_page(vma, addr + (PAGE_SIZE * idx), pages[idx]); > - if (err) > - break; > - } > - *num = pgcount - idx; > - return err; > -#endif /* ifdef pte_index */ > } > EXPORT_SYMBOL(vm_insert_pages); > > -- > 2.40.1 >
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index cb5c1fad1078..1fba072b3dac 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -66,7 +66,6 @@ static inline unsigned long pte_index(unsigned long address) { return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); } -#define pte_index pte_index #ifndef pmd_index static inline unsigned long pmd_index(unsigned long address) diff --git a/mm/memory.c b/mm/memory.c index 2947fbc558f6..b7ce04cb058d 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1870,7 +1870,6 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr, return retval; } -#ifdef pte_index static int insert_page_in_batch_locked(struct vm_area_struct *vma, pte_t *pte, unsigned long addr, struct page *page, pgprot_t prot) { @@ -1885,7 +1884,7 @@ static int insert_page_in_batch_locked(struct vm_area_struct *vma, pte_t *pte, } /* insert_pages() amortizes the cost of spinlock operations - * when inserting pages in a loop. Arch *must* define pte_index. + * when inserting pages in a loop. */ static int insert_pages(struct vm_area_struct *vma, unsigned long addr, struct page **pages, unsigned long *num, pgprot_t prot) @@ -1944,7 +1943,6 @@ static int insert_pages(struct vm_area_struct *vma, unsigned long addr, *num = remaining_pages_total; return ret; } -#endif /* ifdef pte_index */ /** * vm_insert_pages - insert multiple pages into user vma, batching the pmd lock. @@ -1964,7 +1962,6 @@ static int insert_pages(struct vm_area_struct *vma, unsigned long addr, int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr, struct page **pages, unsigned long *num) { -#ifdef pte_index const unsigned long end_addr = addr + (*num * PAGE_SIZE) - 1; if (addr < vma->vm_start || end_addr >= vma->vm_end) @@ -1976,18 +1973,6 @@ int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr, } /* Defer page refcount checking till we're about to map that page. */ return insert_pages(vma, addr, pages, num, vma->vm_page_prot); -#else - unsigned long idx = 0, pgcount = *num; - int err = -EINVAL; - - for (; idx < pgcount; ++idx) { - err = vm_insert_page(vma, addr + (PAGE_SIZE * idx), pages[idx]); - if (err) - break; - } - *num = pgcount - idx; - return err; -#endif /* ifdef pte_index */ } EXPORT_SYMBOL(vm_insert_pages);
Since pte_index is always defined, we don't need to check whether it's defined or not. Delete the slow version that doesn't depend on it and remove the #define since nobody needs to test for it. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Mike Rapoport <rppt@linux.ibm.com> Cc: Christian Dietrich <stettberger@dokucode.de> --- include/linux/pgtable.h | 1 - mm/memory.c | 17 +---------------- 2 files changed, 1 insertion(+), 17 deletions(-)