Message ID | 20241017141457.1169092-2-wangkefeng.wang@huawei.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | [v2] mm: shmem: remove __shmem_huge_global_enabled() | expand |
On 17.10.24 16:14, Kefeng Wang wrote: > Remove __shmem_huge_global_enabled() since only one caller, > and remove repeated check of VM_NOHUGEPAGE/MMF_DISABLE_THP > as they are checked in shmem_allowable_huge_orders(), also > remove unnecessary vma parameter. > > Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com> > Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> > --- > v2: > - Add RB and send separately > > mm/shmem.c | 33 ++++++++++----------------------- > 1 file changed, 10 insertions(+), 23 deletions(-) > > diff --git a/mm/shmem.c b/mm/shmem.c > index 247c0403af83..e933327d8dac 100644 > --- a/mm/shmem.c > +++ b/mm/shmem.c > @@ -548,17 +548,15 @@ static bool shmem_confirm_swap(struct address_space *mapping, > > static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER; > > -static bool __shmem_huge_global_enabled(struct inode *inode, pgoff_t index, > - loff_t write_end, bool shmem_huge_force, > - struct vm_area_struct *vma, > - unsigned long vm_flags) > +static bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index, > + loff_t write_end, bool shmem_huge_force, > + unsigned long vm_flags) Please use double tabs on the second+ parameter list. > { > - struct mm_struct *mm = vma ? vma->vm_mm : NULL; > loff_t i_size; > > - if (!S_ISREG(inode->i_mode)) > + if (HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER) > return false; > - if (mm && ((vm_flags & VM_NOHUGEPAGE) || test_bit(MMF_DISABLE_THP, &mm->flags))) But where is this check for shmem_getattr()?
On 2024/10/30 21:09, David Hildenbrand wrote: > On 17.10.24 16:14, Kefeng Wang wrote: >> Remove __shmem_huge_global_enabled() since only one caller, >> and remove repeated check of VM_NOHUGEPAGE/MMF_DISABLE_THP >> as they are checked in shmem_allowable_huge_orders(), also >> remove unnecessary vma parameter. >> >> Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com> >> Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> >> --- >> v2: >> - Add RB and send separately >> >> mm/shmem.c | 33 ++++++++++----------------------- >> 1 file changed, 10 insertions(+), 23 deletions(-) >> >> diff --git a/mm/shmem.c b/mm/shmem.c >> index 247c0403af83..e933327d8dac 100644 >> --- a/mm/shmem.c >> +++ b/mm/shmem.c >> @@ -548,17 +548,15 @@ static bool shmem_confirm_swap(struct >> address_space *mapping, >> static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER; >> -static bool __shmem_huge_global_enabled(struct inode *inode, pgoff_t >> index, >> - loff_t write_end, bool shmem_huge_force, >> - struct vm_area_struct *vma, >> - unsigned long vm_flags) >> +static bool shmem_huge_global_enabled(struct inode *inode, pgoff_t >> index, >> + loff_t write_end, bool shmem_huge_force, >> + unsigned long vm_flags) > > Please use double tabs on the second+ parameter list. Ok, I will send a tab fix patch. > >> { >> - struct mm_struct *mm = vma ? vma->vm_mm : NULL; >> loff_t i_size; >> - if (!S_ISREG(inode->i_mode)) >> + if (HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER) >> return false; >> - if (mm && ((vm_flags & VM_NOHUGEPAGE) || >> test_bit(MMF_DISABLE_THP, &mm->flags))) > > But where is this check for shmem_getattr()? In shmem_getattr(), the vma=NULL/vm_flags=0, so only vm_flags check is enough. - if (shmem_huge_global_enabled(inode, 0, 0, false, NULL, 0)) + if (shmem_huge_global_enabled(inode, 0, 0, false, 0))
On 01.11.24 02:07, Kefeng Wang wrote: > > > On 2024/10/30 21:09, David Hildenbrand wrote: >> On 17.10.24 16:14, Kefeng Wang wrote: >>> Remove __shmem_huge_global_enabled() since only one caller, >>> and remove repeated check of VM_NOHUGEPAGE/MMF_DISABLE_THP >>> as they are checked in shmem_allowable_huge_orders(), also >>> remove unnecessary vma parameter. >>> >>> Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com> >>> Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> >>> --- >>> v2: >>> - Add RB and send separately >>> >>> mm/shmem.c | 33 ++++++++++----------------------- >>> 1 file changed, 10 insertions(+), 23 deletions(-) >>> >>> diff --git a/mm/shmem.c b/mm/shmem.c >>> index 247c0403af83..e933327d8dac 100644 >>> --- a/mm/shmem.c >>> +++ b/mm/shmem.c >>> @@ -548,17 +548,15 @@ static bool shmem_confirm_swap(struct >>> address_space *mapping, >>> static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER; >>> -static bool __shmem_huge_global_enabled(struct inode *inode, pgoff_t >>> index, >>> - loff_t write_end, bool shmem_huge_force, >>> - struct vm_area_struct *vma, >>> - unsigned long vm_flags) >>> +static bool shmem_huge_global_enabled(struct inode *inode, pgoff_t >>> index, >>> + loff_t write_end, bool shmem_huge_force, >>> + unsigned long vm_flags) >> >> Please use double tabs on the second+ parameter list. > > Ok, I will send a tab fix patch. > >> >>> { >>> - struct mm_struct *mm = vma ? vma->vm_mm : NULL; >>> loff_t i_size; >>> - if (!S_ISREG(inode->i_mode)) >>> + if (HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER) >>> return false; >>> - if (mm && ((vm_flags & VM_NOHUGEPAGE) || >>> test_bit(MMF_DISABLE_THP, &mm->flags))) >> >> But where is this check for shmem_getattr()? > > > In shmem_getattr(), the vma=NULL/vm_flags=0, so only vm_flags check is > enough. > > - if (shmem_huge_global_enabled(inode, 0, 0, false, NULL, 0)) > + if (shmem_huge_global_enabled(inode, 0, 0, false, 0)) > Ah, missed that, makes perfect sense! Acked-by: David Hildenbrand <david@redhat.com>
diff --git a/mm/shmem.c b/mm/shmem.c index 247c0403af83..e933327d8dac 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -548,17 +548,15 @@ static bool shmem_confirm_swap(struct address_space *mapping, static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER; -static bool __shmem_huge_global_enabled(struct inode *inode, pgoff_t index, - loff_t write_end, bool shmem_huge_force, - struct vm_area_struct *vma, - unsigned long vm_flags) +static bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index, + loff_t write_end, bool shmem_huge_force, + unsigned long vm_flags) { - struct mm_struct *mm = vma ? vma->vm_mm : NULL; loff_t i_size; - if (!S_ISREG(inode->i_mode)) + if (HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER) return false; - if (mm && ((vm_flags & VM_NOHUGEPAGE) || test_bit(MMF_DISABLE_THP, &mm->flags))) + if (!S_ISREG(inode->i_mode)) return false; if (shmem_huge == SHMEM_HUGE_DENY) return false; @@ -576,7 +574,7 @@ static bool __shmem_huge_global_enabled(struct inode *inode, pgoff_t index, return true; fallthrough; case SHMEM_HUGE_ADVISE: - if (mm && (vm_flags & VM_HUGEPAGE)) + if (vm_flags & VM_HUGEPAGE) return true; fallthrough; default: @@ -584,17 +582,6 @@ static bool __shmem_huge_global_enabled(struct inode *inode, pgoff_t index, } } -static bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index, - loff_t write_end, bool shmem_huge_force, - struct vm_area_struct *vma, unsigned long vm_flags) -{ - if (HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER) - return false; - - return __shmem_huge_global_enabled(inode, index, write_end, - shmem_huge_force, vma, vm_flags); -} - #if defined(CONFIG_SYSFS) static int shmem_parse_huge(const char *str) { @@ -772,8 +759,8 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, } static bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index, - loff_t write_end, bool shmem_huge_force, - struct vm_area_struct *vma, unsigned long vm_flags) + loff_t write_end, bool shmem_huge_force, + unsigned long vm_flags) { return false; } @@ -1170,7 +1157,7 @@ static int shmem_getattr(struct mnt_idmap *idmap, generic_fillattr(idmap, request_mask, inode, stat); inode_unlock_shared(inode); - if (shmem_huge_global_enabled(inode, 0, 0, false, NULL, 0)) + if (shmem_huge_global_enabled(inode, 0, 0, false, 0)) stat->blksize = HPAGE_PMD_SIZE; if (request_mask & STATX_BTIME) { @@ -1686,7 +1673,7 @@ unsigned long shmem_allowable_huge_orders(struct inode *inode, return 0; global_huge = shmem_huge_global_enabled(inode, index, write_end, - shmem_huge_force, vma, vm_flags); + shmem_huge_force, vm_flags); if (!vma || !vma_is_anon_shmem(vma)) { /* * For tmpfs, we now only support PMD sized THP if huge page