Message ID | 20241011102445.934409-2-david@redhat.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | mm: don't install PMD mappings when THPs are disabled by the hw/process/vma | expand |
On 11/10/2024 11:24, David Hildenbrand wrote: > From: Kefeng Wang <wangkefeng.wang@huawei.com> > > Add vma_thp_disabled() and thp_disabled_by_hw() helpers to be shared by > shmem_allowable_huge_orders() and __thp_vma_allowable_orders(). > > Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> > [ rename to vma_thp_disabled(), split out thp_disabled_by_hw() ] > Signed-off-by: David Hildenbrand <david@redhat.com> Looks like a nice tidy up on its own: Reviewed-by: Ryan Roberts <ryan.roberts@arm.com> > --- > include/linux/huge_mm.h | 18 ++++++++++++++++++ > mm/huge_memory.c | 13 +------------ > mm/shmem.c | 7 +------ > 3 files changed, 20 insertions(+), 18 deletions(-) > > diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h > index 67d0ab3c3bba..ef5b80e48599 100644 > --- a/include/linux/huge_mm.h > +++ b/include/linux/huge_mm.h > @@ -322,6 +322,24 @@ struct thpsize { > (transparent_hugepage_flags & \ > (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG)) > > +static inline bool vma_thp_disabled(struct vm_area_struct *vma, > + unsigned long vm_flags) > +{ > + /* > + * Explicitly disabled through madvise or prctl, or some > + * architectures may disable THP for some mappings, for > + * example, s390 kvm. > + */ > + return (vm_flags & VM_NOHUGEPAGE) || > + test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags); > +} > + > +static inline bool thp_disabled_by_hw(void) > +{ > + /* If the hardware/firmware marked hugepage support disabled. */ > + return transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED); > +} > + > unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr, > unsigned long len, unsigned long pgoff, unsigned long flags); > unsigned long thp_get_unmapped_area_vmflags(struct file *filp, unsigned long addr, > diff --git a/mm/huge_memory.c b/mm/huge_memory.c > index 87b49ecc7b1e..2fb328880b50 100644 > --- a/mm/huge_memory.c > +++ b/mm/huge_memory.c > @@ -109,18 +109,7 @@ unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma, > if (!vma->vm_mm) /* vdso */ > return 0; > > - /* > - * Explicitly disabled through madvise or prctl, or some > - * architectures may disable THP for some mappings, for > - * example, s390 kvm. > - * */ > - if ((vm_flags & VM_NOHUGEPAGE) || > - test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) > - return 0; > - /* > - * If the hardware/firmware marked hugepage support disabled. > - */ > - if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED)) > + if (thp_disabled_by_hw() || vma_thp_disabled(vma, vm_flags)) > return 0; > > /* khugepaged doesn't collapse DAX vma, but page fault is fine. */ > diff --git a/mm/shmem.c b/mm/shmem.c > index 4f11b5506363..c5adb987b23c 100644 > --- a/mm/shmem.c > +++ b/mm/shmem.c > @@ -1664,12 +1664,7 @@ unsigned long shmem_allowable_huge_orders(struct inode *inode, > loff_t i_size; > int order; > > - if (vma && ((vm_flags & VM_NOHUGEPAGE) || > - test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))) > - return 0; > - > - /* If the hardware/firmware marked hugepage support disabled. */ > - if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED)) > + if (thp_disabled_by_hw() || (vma && vma_thp_disabled(vma, vm_flags))) > return 0; > > global_huge = shmem_huge_global_enabled(inode, index, write_end,
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 67d0ab3c3bba..ef5b80e48599 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -322,6 +322,24 @@ struct thpsize { (transparent_hugepage_flags & \ (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG)) +static inline bool vma_thp_disabled(struct vm_area_struct *vma, + unsigned long vm_flags) +{ + /* + * Explicitly disabled through madvise or prctl, or some + * architectures may disable THP for some mappings, for + * example, s390 kvm. + */ + return (vm_flags & VM_NOHUGEPAGE) || + test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags); +} + +static inline bool thp_disabled_by_hw(void) +{ + /* If the hardware/firmware marked hugepage support disabled. */ + return transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED); +} + unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags); unsigned long thp_get_unmapped_area_vmflags(struct file *filp, unsigned long addr, diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 87b49ecc7b1e..2fb328880b50 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -109,18 +109,7 @@ unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma, if (!vma->vm_mm) /* vdso */ return 0; - /* - * Explicitly disabled through madvise or prctl, or some - * architectures may disable THP for some mappings, for - * example, s390 kvm. - * */ - if ((vm_flags & VM_NOHUGEPAGE) || - test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) - return 0; - /* - * If the hardware/firmware marked hugepage support disabled. - */ - if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED)) + if (thp_disabled_by_hw() || vma_thp_disabled(vma, vm_flags)) return 0; /* khugepaged doesn't collapse DAX vma, but page fault is fine. */ diff --git a/mm/shmem.c b/mm/shmem.c index 4f11b5506363..c5adb987b23c 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1664,12 +1664,7 @@ unsigned long shmem_allowable_huge_orders(struct inode *inode, loff_t i_size; int order; - if (vma && ((vm_flags & VM_NOHUGEPAGE) || - test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))) - return 0; - - /* If the hardware/firmware marked hugepage support disabled. */ - if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED)) + if (thp_disabled_by_hw() || (vma && vma_thp_disabled(vma, vm_flags))) return 0; global_huge = shmem_huge_global_enabled(inode, index, write_end,