@@ -479,7 +479,7 @@ int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
struct vm_area_struct *vma)
{
if (radix_enabled())
- return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma);
+ return (new_pmd_ptl != old_pmd_ptl) && vma_is_private_anon(vma);
return true;
}
@@ -1269,7 +1269,7 @@ static inline bool vma_can_userfault(struct vm_area_struct *vma,
return false;
}
- return vma_is_anonymous(vma) || is_vm_hugetlb_page(vma) ||
+ return vma_is_private_anon(vma) || is_vm_hugetlb_page(vma) ||
vma_is_shmem(vma);
}
@@ -119,7 +119,7 @@ static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
unsigned long haddr)
{
/* Don't have to check pgoff for anonymous vma */
- if (!vma_is_anonymous(vma)) {
+ if (!vma_is_private_anon(vma)) {
if (!IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
HPAGE_PMD_NR))
return false;
@@ -623,7 +623,7 @@ static inline void vma_set_anonymous(struct vm_area_struct *vma)
vma->vm_ops = NULL;
}
-static inline bool vma_is_anonymous(struct vm_area_struct *vma)
+static inline bool vma_is_private_anon(struct vm_area_struct *vma)
{
return !vma->vm_ops;
}
@@ -398,7 +398,7 @@ static struct page *no_page_table(struct vm_area_struct *vma,
* be zero-filled if handle_mm_fault() actually did handle it.
*/
if ((flags & FOLL_DUMP) &&
- (vma_is_anonymous(vma) || !vma->vm_ops->fault))
+ (vma_is_private_anon(vma) || !vma->vm_ops->fault))
return ERR_PTR(-EFAULT);
return NULL;
}
@@ -913,7 +913,7 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
if (vm_flags & (VM_IO | VM_PFNMAP))
return -EFAULT;
- if (gup_flags & FOLL_ANON && !vma_is_anonymous(vma))
+ if (gup_flags & FOLL_ANON && !vma_is_private_anon(vma))
return -EFAULT;
if ((gup_flags & FOLL_LONGTERM) && vma_is_fsdax(vma))
@@ -82,7 +82,7 @@ bool transparent_hugepage_active(struct vm_area_struct *vma)
if (!transhuge_vma_suitable(vma, addr))
return false;
- if (vma_is_anonymous(vma))
+ if (vma_is_private_anon(vma))
return __transparent_hugepage_enabled(vma);
if (vma_is_shmem(vma))
return shmem_huge_enabled(vma);
@@ -1035,7 +1035,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
int ret = -ENOMEM;
/* Skip if can be re-fill on fault */
- if (!vma_is_anonymous(dst_vma))
+ if (!vma_is_private_anon(dst_vma))
return 0;
pgtable = pte_alloc_one(dst_mm);
@@ -1622,7 +1622,7 @@ static inline int pmd_move_must_withdraw(spinlock_t *new_pmd_ptl,
*
* We also don't deposit and withdraw tables for file pages.
*/
- return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma);
+ return (new_pmd_ptl != old_pmd_ptl) && vma_is_private_anon(vma);
}
#endif
@@ -1799,7 +1799,7 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
}
ret = HPAGE_PMD_NR;
set_pmd_at(mm, addr, pmd, entry);
- BUG_ON(vma_is_anonymous(vma) && !preserve_write && pmd_write(entry));
+ BUG_ON(vma_is_private_anon(vma) && !preserve_write && pmd_write(entry));
unlock:
spin_unlock(ptl);
return ret;
@@ -1957,7 +1957,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
count_vm_event(THP_SPLIT_PMD);
- if (!vma_is_anonymous(vma)) {
+ if (!vma_is_private_anon(vma)) {
old_pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd);
/*
* We are going to unmap this huge page. So
@@ -543,7 +543,7 @@ static void madvise_pageout_page_range(struct mmu_gather *tlb,
static inline bool can_do_pageout(struct vm_area_struct *vma)
{
- if (vma_is_anonymous(vma))
+ if (vma_is_private_anon(vma))
return true;
if (!vma->vm_file)
return false;
@@ -725,7 +725,7 @@ static int madvise_free_single_vma(struct vm_area_struct *vma,
struct mmu_gather tlb;
/* MADV_FREE works for only anon vma at the moment */
- if (!vma_is_anonymous(vma))
+ if (!vma_is_private_anon(vma))
return -EINVAL;
range.start = max(vma->vm_start, start_addr);
@@ -4494,7 +4494,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
static inline vm_fault_t create_huge_pmd(struct vm_fault *vmf)
{
- if (vma_is_anonymous(vmf->vma))
+ if (vma_is_private_anon(vmf->vma))
return do_huge_pmd_anonymous_page(vmf);
if (vmf->vma->vm_ops->huge_fault)
return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD);
@@ -4504,7 +4504,7 @@ static inline vm_fault_t create_huge_pmd(struct vm_fault *vmf)
/* `inline' is required to avoid gcc 4.1.2 build error */
static inline vm_fault_t wp_huge_pmd(struct vm_fault *vmf)
{
- if (vma_is_anonymous(vmf->vma)) {
+ if (vma_is_private_anon(vmf->vma)) {
if (userfaultfd_huge_pmd_wp(vmf->vma, vmf->orig_pmd))
return handle_userfault(vmf, VM_UFFD_WP);
return do_huge_pmd_wp_page(vmf);
@@ -4527,7 +4527,7 @@ static vm_fault_t create_huge_pud(struct vm_fault *vmf)
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
/* No support for anonymous transparent PUD pages yet */
- if (vma_is_anonymous(vmf->vma))
+ if (vma_is_private_anon(vmf->vma))
goto split;
if (vmf->vma->vm_ops->huge_fault) {
vm_fault_t ret = vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD);
@@ -4546,7 +4546,7 @@ static vm_fault_t wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud)
{
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/* No support for anonymous transparent PUD pages yet */
- if (vma_is_anonymous(vmf->vma))
+ if (vma_is_private_anon(vmf->vma))
return VM_FAULT_FALLBACK;
if (vmf->vma->vm_ops->huge_fault)
return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD);
@@ -4621,7 +4621,7 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
}
if (!vmf->pte) {
- if (vma_is_anonymous(vmf->vma))
+ if (vma_is_private_anon(vmf->vma))
return do_anonymous_page(vmf);
else
return do_fault(vmf);
@@ -40,7 +40,7 @@ static int migrate_vma_collect_hole(unsigned long start,
unsigned long addr;
/* Only allow populating anonymous memory. */
- if (!vma_is_anonymous(walk->vma))
+ if (!vma_is_private_anon(walk->vma))
return migrate_vma_collect_skip(start, end, walk);
for (addr = start; addr < end; addr += PAGE_SIZE) {
@@ -120,7 +120,7 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp,
pte = *ptep;
if (pte_none(pte)) {
- if (vma_is_anonymous(vma)) {
+ if (vma_is_private_anon(vma)) {
mpfn = MIGRATE_PFN_MIGRATE;
migrate->cpages++;
}
@@ -518,7 +518,7 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
pte_t *ptep;
/* Only allow populating anonymous memory */
- if (!vma_is_anonymous(vma))
+ if (!vma_is_private_anon(vma))
goto abort;
pgdp = pgd_offset(mm, addr);
@@ -156,7 +156,7 @@ static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
static inline bool can_do_mincore(struct vm_area_struct *vma)
{
- if (vma_is_anonymous(vma))
+ if (vma_is_private_anon(vma))
return true;
if (!vma->vm_file)
return false;
@@ -3189,7 +3189,7 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
* using the existing file pgoff checks and manipulations.
* Similarly in do_mmap and in do_brk_flags.
*/
- if (vma_is_anonymous(vma)) {
+ if (vma_is_private_anon(vma)) {
BUG_ON(vma->anon_vma);
vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
}
@@ -3217,7 +3217,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
* If anonymous vma has not yet been faulted, update new pgoff
* to match new location, to increase its chance of merging.
*/
- if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma)) {
+ if (unlikely(vma_is_private_anon(vma) && !vma->anon_vma)) {
pgoff = addr >> PAGE_SHIFT;
faulted_in_anon_vma = false;
}
@@ -536,7 +536,7 @@ bool __oom_reap_task_mm(struct mm_struct *mm)
* we do not want to block exit_mmap by keeping mm ref
* count elevated without a good reason.
*/
- if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) {
+ if (vma_is_private_anon(vma) || !(vma->vm_flags & VM_SHARED)) {
struct mmu_notifier_range range;
struct mmu_gather tlb;
@@ -540,9 +540,9 @@ static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm,
err = -EINVAL;
/*
* shmem_zero_setup is invoked in mmap for MAP_ANONYMOUS|MAP_SHARED but
- * it will overwrite vm_ops, so vma_is_anonymous must return false.
+ * it will overwrite vm_ops, so vma_is_private_anon must return false.
*/
- if (WARN_ON_ONCE(vma_is_anonymous(dst_vma) &&
+ if (WARN_ON_ONCE(vma_is_private_anon(dst_vma) &&
dst_vma->vm_flags & VM_SHARED))
goto out_unlock;
@@ -561,7 +561,7 @@ static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm,
return __mcopy_atomic_hugetlb(dst_mm, dst_vma, dst_start,
src_start, len, mcopy_mode);
- if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma))
+ if (!vma_is_private_anon(dst_vma) && !vma_is_shmem(dst_vma))
goto out_unlock;
if (!vma_is_shmem(dst_vma) && mcopy_mode == MCOPY_ATOMIC_CONTINUE)
goto out_unlock;
@@ -717,7 +717,7 @@ int mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start,
goto out_unlock;
if (!userfaultfd_wp(dst_vma))
goto out_unlock;
- if (!vma_is_anonymous(dst_vma))
+ if (!vma_is_private_anon(dst_vma))
goto out_unlock;
if (enable_wp)
The vma_is_anonymous function isn't fully indicative of what it checks. Without having full knowledge of the mmap process, one may incorrectly assume this covers all types of anonymous memory; which is not the case. No functional changes. Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Paul Mackerras <paulus@samba.org> Cc: Alexander Viro <viro@zeniv.linux.org.uk> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Logan Gunthorpe <logang@deltatee.com> Cc: Hari Bathini <hbathini@linux.ibm.com> Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.ibm.com> Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org> Cc: Yang Shi <shy828301@gmail.com> Cc: Miaohe Lin <linmiaohe@huawei.com> Cc: William Kucharski <william.kucharski@oracle.com> Cc: Hugh Dickins <hughd@google.com> Signed-off-by: Nico Pache <npache@redhat.com> --- arch/powerpc/mm/book3s64/pgtable.c | 2 +- fs/userfaultfd.c | 2 +- include/linux/huge_mm.h | 2 +- include/linux/mm.h | 2 +- mm/gup.c | 4 ++-- mm/huge_memory.c | 10 +++++----- mm/madvise.c | 4 ++-- mm/memory.c | 10 +++++----- mm/migrate_device.c | 6 +++--- mm/mincore.c | 2 +- mm/mmap.c | 4 ++-- mm/oom_kill.c | 2 +- mm/userfaultfd.c | 8 ++++---- 13 files changed, 29 insertions(+), 29 deletions(-)