Message ID | 20220824175757.20590-6-mike.kravetz@oracle.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | hugetlb: Use new vma mutex for huge pmd sharing synchronization | expand |
On 2022/8/25 1:57, Mike Kravetz wrote: > Rename the routine vma_shareable to vma_addr_pmd_shareable as it is > checking a specific address within the vma. Refactor code to check if > an aligned range is shareable as this will be needed in a subsequent > patch. > > Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com> LGTM. Thanks. Reviewed-by: Miaohe Lin <linmiaohe@huawei.com> Thanks, Miaohe Lin > --- > mm/hugetlb.c | 19 +++++++++++++------ > 1 file changed, 13 insertions(+), 6 deletions(-) > > diff --git a/mm/hugetlb.c b/mm/hugetlb.c > index a6eb46c64baf..758b6844d566 100644 > --- a/mm/hugetlb.c > +++ b/mm/hugetlb.c > @@ -6648,26 +6648,33 @@ static unsigned long page_table_shareable(struct vm_area_struct *svma, > return saddr; > } > > -static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr) > +static bool __vma_aligned_range_pmd_shareable(struct vm_area_struct *vma, > + unsigned long start, unsigned long end) > { > - unsigned long base = addr & PUD_MASK; > - unsigned long end = base + PUD_SIZE; > - > /* > * check on proper vm_flags and page table alignment > */ > - if (vma->vm_flags & VM_MAYSHARE && range_in_vma(vma, base, end)) > + if (vma->vm_flags & VM_MAYSHARE && range_in_vma(vma, start, end)) > return true; > return false; > } > > +static bool vma_addr_pmd_shareable(struct vm_area_struct *vma, > + unsigned long addr) > +{ > + unsigned long start = addr & PUD_MASK; > + unsigned long end = start + PUD_SIZE; > + > + return __vma_aligned_range_pmd_shareable(vma, start, end); > +} > + > bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr) > { > #ifdef CONFIG_USERFAULTFD > if (uffd_disable_huge_pmd_share(vma)) > return false; > #endif > - return vma_shareable(vma, addr); > + return vma_addr_pmd_shareable(vma, addr); > } > > /* >
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index a6eb46c64baf..758b6844d566 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -6648,26 +6648,33 @@ static unsigned long page_table_shareable(struct vm_area_struct *svma, return saddr; } -static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr) +static bool __vma_aligned_range_pmd_shareable(struct vm_area_struct *vma, + unsigned long start, unsigned long end) { - unsigned long base = addr & PUD_MASK; - unsigned long end = base + PUD_SIZE; - /* * check on proper vm_flags and page table alignment */ - if (vma->vm_flags & VM_MAYSHARE && range_in_vma(vma, base, end)) + if (vma->vm_flags & VM_MAYSHARE && range_in_vma(vma, start, end)) return true; return false; } +static bool vma_addr_pmd_shareable(struct vm_area_struct *vma, + unsigned long addr) +{ + unsigned long start = addr & PUD_MASK; + unsigned long end = start + PUD_SIZE; + + return __vma_aligned_range_pmd_shareable(vma, start, end); +} + bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr) { #ifdef CONFIG_USERFAULTFD if (uffd_disable_huge_pmd_share(vma)) return false; #endif - return vma_shareable(vma, addr); + return vma_addr_pmd_shareable(vma, addr); } /*
Rename the routine vma_shareable to vma_addr_pmd_shareable as it is checking a specific address within the vma. Refactor code to check if an aligned range is shareable as this will be needed in a subsequent patch. Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com> --- mm/hugetlb.c | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-)