@@ -316,7 +316,7 @@ static int __init gate_vma_init(void)
gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
gate_vma.vm_start = 0xffff0000;
gate_vma.vm_end = 0xffff0000 + PAGE_SIZE;
- gate_vma.vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
+ init_vm_flags(&gate_vma, VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC);
return 0;
}
arch_initcall(gate_vma_init);
@@ -109,7 +109,7 @@ ia64_init_addr_space (void)
vma_set_anonymous(vma);
vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
vma->vm_end = vma->vm_start + PAGE_SIZE;
- vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
+ init_vm_flags(vma, VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT);
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
mmap_write_lock(current->mm);
if (insert_vm_struct(current->mm, vma)) {
@@ -127,8 +127,8 @@ ia64_init_addr_space (void)
vma_set_anonymous(vma);
vma->vm_end = PAGE_SIZE;
vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
- vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO |
- VM_DONTEXPAND | VM_DONTDUMP;
+ init_vm_flags(vma, VM_READ | VM_MAYREAD | VM_IO |
+ VM_DONTEXPAND | VM_DONTDUMP);
mmap_write_lock(current->mm);
if (insert_vm_struct(current->mm, vma)) {
mmap_write_unlock(current->mm);
@@ -272,7 +272,7 @@ static int __init gate_vma_init(void)
vma_init(&gate_vma, NULL);
gate_vma.vm_start = FIXADDR_USER_START;
gate_vma.vm_end = FIXADDR_USER_END;
- gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
+ init_vm_flags(&gate_vma, VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC);
gate_vma.vm_page_prot = __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX);
return 0;
@@ -149,7 +149,7 @@ static inline void tlb_flush(struct mmu_gather *tlb)
struct vm_area_struct vma;
vma.vm_mm = tlb->mm;
- vma.vm_flags = 0;
+ init_vm_flags(&vma, 0);
if (tlb->fullmm) {
flush_tlb_mm(tlb->mm);
return;
@@ -324,7 +324,7 @@ static int kvmppc_xive_native_mmap(struct kvm_device *dev,
return -EINVAL;
}
- vma->vm_flags |= VM_IO | VM_PFNMAP;
+ set_vm_flags(vma, VM_IO | VM_PFNMAP);
vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
/*
@@ -156,7 +156,7 @@ static void subpage_mark_vma_nohuge(struct mm_struct *mm, unsigned long addr,
* VM_NOHUGEPAGE and split them.
*/
for_each_vma_range(vmi, vma, addr + len) {
- vma->vm_flags |= VM_NOHUGEPAGE;
+ set_vm_flags(vma, VM_NOHUGEPAGE);
walk_page_vma(vma, &subpage_walk_ops, NULL);
}
}
@@ -525,7 +525,7 @@ static int coproc_mmap(struct file *fp, struct vm_area_struct *vma)
pfn = paste_addr >> PAGE_SHIFT;
/* flags, page_prot from cxl_mmap(), except we want cachable */
- vma->vm_flags |= VM_IO | VM_PFNMAP;
+ set_vm_flags(vma, VM_IO | VM_PFNMAP);
vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
prot = __pgprot(pgprot_val(vma->vm_page_prot) | _PAGE_DIRTY);
@@ -291,7 +291,7 @@ static int spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
if (!(vma->vm_flags & VM_SHARED))
return -EINVAL;
- vma->vm_flags |= VM_IO | VM_PFNMAP;
+ set_vm_flags(vma, VM_IO | VM_PFNMAP);
vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
vma->vm_ops = &spufs_mem_mmap_vmops;
@@ -381,7 +381,7 @@ static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma)
if (!(vma->vm_flags & VM_SHARED))
return -EINVAL;
- vma->vm_flags |= VM_IO | VM_PFNMAP;
+ set_vm_flags(vma, VM_IO | VM_PFNMAP);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_ops = &spufs_cntl_mmap_vmops;
@@ -1043,7 +1043,7 @@ static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma)
if (!(vma->vm_flags & VM_SHARED))
return -EINVAL;
- vma->vm_flags |= VM_IO | VM_PFNMAP;
+ set_vm_flags(vma, VM_IO | VM_PFNMAP);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_ops = &spufs_signal1_mmap_vmops;
@@ -1179,7 +1179,7 @@ static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma)
if (!(vma->vm_flags & VM_SHARED))
return -EINVAL;
- vma->vm_flags |= VM_IO | VM_PFNMAP;
+ set_vm_flags(vma, VM_IO | VM_PFNMAP);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_ops = &spufs_signal2_mmap_vmops;
@@ -1302,7 +1302,7 @@ static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma)
if (!(vma->vm_flags & VM_SHARED))
return -EINVAL;
- vma->vm_flags |= VM_IO | VM_PFNMAP;
+ set_vm_flags(vma, VM_IO | VM_PFNMAP);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_ops = &spufs_mss_mmap_vmops;
@@ -1364,7 +1364,7 @@ static int spufs_psmap_mmap(struct file *file, struct vm_area_struct *vma)
if (!(vma->vm_flags & VM_SHARED))
return -EINVAL;
- vma->vm_flags |= VM_IO | VM_PFNMAP;
+ set_vm_flags(vma, VM_IO | VM_PFNMAP);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_ops = &spufs_psmap_mmap_vmops;
@@ -1424,7 +1424,7 @@ static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma)
if (!(vma->vm_flags & VM_SHARED))
return -EINVAL;
- vma->vm_flags |= VM_IO | VM_PFNMAP;
+ set_vm_flags(vma, VM_IO | VM_PFNMAP);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_ops = &spufs_mfc_mmap_vmops;
@@ -2522,8 +2522,7 @@ static inline void thp_split_mm(struct mm_struct *mm)
VMA_ITERATOR(vmi, mm, 0);
for_each_vma(vmi, vma) {
- vma->vm_flags &= ~VM_HUGEPAGE;
- vma->vm_flags |= VM_NOHUGEPAGE;
+ mod_vm_flags(vma, VM_NOHUGEPAGE, VM_HUGEPAGE);
walk_page_vma(vma, &thp_split_walk_ops, NULL);
}
mm->def_flags |= VM_NOHUGEPAGE;
@@ -391,7 +391,7 @@ void __init map_vsyscall(void)
}
if (vsyscall_mode == XONLY)
- gate_vma.vm_flags = VM_EXEC;
+ init_vm_flags(&gate_vma, VM_EXEC);
BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) !=
(unsigned long)VSYSCALL_ADDR);
@@ -95,7 +95,7 @@ static int sgx_mmap(struct file *file, struct vm_area_struct *vma)
return ret;
vma->vm_ops = &sgx_vm_ops;
- vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO;
+ set_vm_flags(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO);
vma->vm_private_data = encl;
return 0;
@@ -105,7 +105,7 @@ static int sgx_vepc_mmap(struct file *file, struct vm_area_struct *vma)
vma->vm_ops = &sgx_vepc_vm_ops;
/* Don't copy VMA in fork() */
- vma->vm_flags |= VM_PFNMAP | VM_IO | VM_DONTDUMP | VM_DONTCOPY;
+ set_vm_flags(vma, VM_PFNMAP | VM_IO | VM_DONTDUMP | VM_DONTCOPY);
vma->vm_private_data = vepc;
return 0;
@@ -999,7 +999,7 @@ int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
ret = reserve_pfn_range(paddr, size, prot, 0);
if (ret == 0 && vma)
- vma->vm_flags |= VM_PAT;
+ set_vm_flags(vma, VM_PAT);
return ret;
}
@@ -1065,7 +1065,7 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
}
free_pfn_range(paddr, size);
if (vma)
- vma->vm_flags &= ~VM_PAT;
+ clear_vm_flags(vma, VM_PAT);
}
/*
@@ -1075,7 +1075,7 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
*/
void untrack_pfn_moved(struct vm_area_struct *vma)
{
- vma->vm_flags &= ~VM_PAT;
+ clear_vm_flags(vma, VM_PAT);
}
pgprot_t pgprot_writecombine(pgprot_t prot)
@@ -16,7 +16,7 @@ static int __init gate_vma_init(void)
vma_init(&gate_vma, NULL);
gate_vma.vm_start = FIXADDR_USER_START;
gate_vma.vm_end = FIXADDR_USER_END;
- gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
+ init_vm_flags(&gate_vma, VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC);
gate_vma.vm_page_prot = PAGE_READONLY;
return 0;
@@ -310,7 +310,7 @@ pfrt_log_mmap(struct file *file, struct vm_area_struct *vma)
return -EROFS;
/* changing from read to write with mprotect is not allowed */
- vma->vm_flags &= ~VM_MAYWRITE;
+ clear_vm_flags(vma, VM_MAYWRITE);
pfrt_log_dev = to_pfrt_log_dev(file);
@@ -5572,8 +5572,7 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM);
return -EPERM;
}
- vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP;
- vma->vm_flags &= ~VM_MAYWRITE;
+ mod_vm_flags(vma, VM_DONTCOPY | VM_MIXEDMAP, VM_MAYWRITE);
vma->vm_ops = &binder_vm_ops;
vma->vm_private_data = proc;
@@ -206,7 +206,7 @@ mspec_mmap(struct file *file, struct vm_area_struct *vma,
refcount_set(&vdata->refcnt, 1);
vma->vm_private_data = vdata;
- vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+ set_vm_flags(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
if (vdata->type == MSPEC_UNCACHED)
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_ops = &mspec_vm_ops;
@@ -2363,7 +2363,7 @@ static int hisi_qm_uacce_mmap(struct uacce_queue *q,
return -EINVAL;
}
- vma->vm_flags |= VM_IO;
+ set_vm_flags(vma, VM_IO);
return remap_pfn_range(vma, vma->vm_start,
phys_base >> PAGE_SHIFT,
@@ -308,7 +308,7 @@ static int dax_mmap(struct file *filp, struct vm_area_struct *vma)
return rc;
vma->vm_ops = &dax_vm_ops;
- vma->vm_flags |= VM_HUGEPAGE;
+ set_vm_flags(vma, VM_HUGEPAGE);
return 0;
}
@@ -201,7 +201,7 @@ static int idxd_cdev_mmap(struct file *filp, struct vm_area_struct *vma)
if (rc < 0)
return rc;
- vma->vm_flags |= VM_DONTCOPY;
+ set_vm_flags(vma, VM_DONTCOPY);
pfn = (base + idxd_get_wq_portal_full_offset(wq->id,
IDXD_PORTAL_LIMITED)) >> PAGE_SHIFT;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
@@ -257,7 +257,7 @@ static int amdgpu_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_str
*/
if (is_cow_mapping(vma->vm_flags) &&
!(vma->vm_flags & VM_ACCESS_FLAGS))
- vma->vm_flags &= ~VM_MAYWRITE;
+ clear_vm_flags(vma, VM_MAYWRITE);
return drm_gem_ttm_mmap(obj, vma);
}
@@ -2879,8 +2879,8 @@ static int kfd_mmio_mmap(struct kfd_dev *dev, struct kfd_process *process,
address = dev->adev->rmmio_remap.bus_addr;
- vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
- VM_DONTDUMP | VM_PFNMAP;
+ set_vm_flags(vma, VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
+ VM_DONTDUMP | VM_PFNMAP);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
@@ -159,8 +159,8 @@ int kfd_doorbell_mmap(struct kfd_dev *dev, struct kfd_process *process,
address = kfd_get_process_doorbells(pdd);
if (!address)
return -ENOMEM;
- vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
- VM_DONTDUMP | VM_PFNMAP;
+ set_vm_flags(vma, VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
+ VM_DONTDUMP | VM_PFNMAP);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
@@ -1052,8 +1052,8 @@ int kfd_event_mmap(struct kfd_process *p, struct vm_area_struct *vma)
pfn = __pa(page->kernel_address);
pfn >>= PAGE_SHIFT;
- vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE
- | VM_DONTDUMP | VM_PFNMAP;
+ set_vm_flags(vma, VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE
+ | VM_DONTDUMP | VM_PFNMAP);
pr_debug("Mapping signal page\n");
pr_debug(" start user address == 0x%08lx\n", vma->vm_start);
@@ -1978,8 +1978,8 @@ int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process,
return -ENOMEM;
}
- vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND
- | VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP;
+ set_vm_flags(vma, VM_IO | VM_DONTCOPY | VM_DONTEXPAND
+ | VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP);
/* Mapping pages to user process */
return remap_pfn_range(vma, vma->vm_start,
PFN_DOWN(__pa(qpd->cwsr_kaddr)),
@@ -1047,7 +1047,7 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
goto err_drm_gem_object_put;
}
- vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+ set_vm_flags(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
}
@@ -530,8 +530,7 @@ int drm_gem_dma_mmap(struct drm_gem_dma_object *dma_obj, struct vm_area_struct *
* the whole buffer.
*/
vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
- vma->vm_flags &= ~VM_PFNMAP;
- vma->vm_flags |= VM_DONTEXPAND;
+ mod_vm_flags(vma, VM_DONTEXPAND, VM_PFNMAP);
if (dma_obj->map_noncoherent) {
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
@@ -633,7 +633,7 @@ int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct
if (ret)
return ret;
- vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+ set_vm_flags(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
if (shmem->map_wc)
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
@@ -476,7 +476,7 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
if (!capable(CAP_SYS_ADMIN) &&
(dma->flags & _DRM_DMA_USE_PCI_RO)) {
- vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
+ clear_vm_flags(vma, VM_WRITE | VM_MAYWRITE);
#if defined(__i386__) || defined(__x86_64__)
pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
#else
@@ -492,7 +492,7 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
vma->vm_ops = &drm_vm_dma_ops;
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ set_vm_flags(vma, VM_DONTEXPAND | VM_DONTDUMP);
drm_vm_open_locked(dev, vma);
return 0;
@@ -560,7 +560,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
return -EINVAL;
if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
- vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
+ clear_vm_flags(vma, VM_WRITE | VM_MAYWRITE);
#if defined(__i386__) || defined(__x86_64__)
pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
#else
@@ -628,7 +628,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
default:
return -EINVAL; /* This should never happen. */
}
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ set_vm_flags(vma, VM_DONTEXPAND | VM_DONTDUMP);
drm_vm_open_locked(dev, vma);
return 0;
@@ -130,7 +130,7 @@ static int etnaviv_gem_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
{
pgprot_t vm_page_prot;
- vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+ set_vm_flags(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
vm_page_prot = vm_get_page_prot(vma->vm_flags);
@@ -274,7 +274,7 @@ static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem *exynos_gem,
unsigned long vm_size;
int ret;
- vma->vm_flags &= ~VM_PFNMAP;
+ clear_vm_flags(vma, VM_PFNMAP);
vma->vm_pgoff = 0;
vm_size = vma->vm_end - vma->vm_start;
@@ -368,7 +368,7 @@ static int exynos_drm_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct
if (obj->import_attach)
return dma_buf_mmap(obj->dma_buf, vma, 0);
- vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+ set_vm_flags(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "flags = 0x%x\n",
exynos_gem->flags);
@@ -139,7 +139,7 @@ static int psbfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
*/
vma->vm_ops = &psbfb_vm_ops;
vma->vm_private_data = (void *)fb;
- vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
+ set_vm_flags(vma, VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP);
return 0;
}
@@ -102,7 +102,7 @@ static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
buf = dev_priv->mmap_buffer;
buf_priv = buf->dev_private;
- vma->vm_flags |= VM_DONTCOPY;
+ set_vm_flags(vma, VM_DONTCOPY);
buf_priv->currently_mapped = I810_BUF_MAPPED;
@@ -979,7 +979,7 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
i915_gem_object_put(obj);
return -EINVAL;
}
- vma->vm_flags &= ~VM_MAYWRITE;
+ clear_vm_flags(vma, VM_MAYWRITE);
}
anon = mmap_singleton(to_i915(dev));
@@ -988,7 +988,7 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
return PTR_ERR(anon);
}
- vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO;
+ set_vm_flags(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO);
/*
* We keep the ref on mmo->obj, not vm_file, but we require
@@ -158,7 +158,7 @@ static int mtk_drm_gem_object_mmap(struct drm_gem_object *obj,
* dma_alloc_attrs() allocated a struct page table for mtk_gem, so clear
* VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
*/
- vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+ set_vm_flags(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
@@ -1012,7 +1012,7 @@ static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+ set_vm_flags(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags));
return 0;
@@ -543,8 +543,7 @@ int omap_gem_mmap_obj(struct drm_gem_object *obj,
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
- vma->vm_flags &= ~VM_PFNMAP;
- vma->vm_flags |= VM_MIXEDMAP;
+ mod_vm_flags(vma, VM_MIXEDMAP, VM_PFNMAP);
if (omap_obj->flags & OMAP_BO_WC) {
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
@@ -251,8 +251,7 @@ static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
* We allocated a struct page table for rk_obj, so clear
* VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
*/
- vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
- vma->vm_flags &= ~VM_PFNMAP;
+ mod_vm_flags(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP, VM_PFNMAP);
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
@@ -574,7 +574,7 @@ int __tegra_gem_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma)
* and set the vm_pgoff (used as a fake buffer offset by DRM)
* to 0 as we want to map the whole buffer.
*/
- vma->vm_flags &= ~VM_PFNMAP;
+ clear_vm_flags(vma, VM_PFNMAP);
vma->vm_pgoff = 0;
err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->iova,
@@ -588,8 +588,7 @@ int __tegra_gem_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma)
} else {
pgprot_t prot = vm_get_page_prot(vma->vm_flags);
- vma->vm_flags |= VM_MIXEDMAP;
- vma->vm_flags &= ~VM_PFNMAP;
+ mod_vm_flags(vma, VM_MIXEDMAP, VM_PFNMAP);
vma->vm_page_prot = pgprot_writecombine(prot);
}
@@ -468,8 +468,7 @@ int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
vma->vm_private_data = bo;
- vma->vm_flags |= VM_PFNMAP;
- vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+ set_vm_flags(vma, VM_PFNMAP | VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
return 0;
}
EXPORT_SYMBOL(ttm_bo_mmap_obj);
@@ -46,7 +46,7 @@ static int virtio_gpu_vram_mmap(struct drm_gem_object *obj,
return -EINVAL;
vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
- vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
+ set_vm_flags(vma, VM_MIXEDMAP | VM_DONTEXPAND);
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
vma->vm_ops = &virtio_gpu_vram_vm_ops;
@@ -97,7 +97,7 @@ int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
/* Use VM_PFNMAP rather than VM_MIXEDMAP if not a COW mapping */
if (!is_cow_mapping(vma->vm_flags))
- vma->vm_flags = (vma->vm_flags & ~VM_MIXEDMAP) | VM_PFNMAP;
+ mod_vm_flags(vma, VM_PFNMAP, VM_MIXEDMAP);
ttm_bo_put(bo); /* release extra ref taken by ttm_bo_mmap_obj() */
@@ -69,8 +69,7 @@ static int xen_drm_front_gem_object_mmap(struct drm_gem_object *gem_obj,
* vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
* the whole buffer.
*/
- vma->vm_flags &= ~VM_PFNMAP;
- vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
+ mod_vm_flags(vma, VM_MIXEDMAP | VM_DONTEXPAND, VM_PFNMAP);
vma->vm_pgoff = 0;
/*
@@ -1264,7 +1264,7 @@ static int cs_char_mmap(struct file *file, struct vm_area_struct *vma)
if (vma_pages(vma) != 1)
return -EINVAL;
- vma->vm_flags |= VM_IO | VM_DONTDUMP | VM_DONTEXPAND;
+ set_vm_flags(vma, VM_IO | VM_DONTDUMP | VM_DONTEXPAND);
vma->vm_ops = &cs_char_vm_ops;
vma->vm_private_data = file->private_data;
@@ -1659,7 +1659,7 @@ static int intel_th_msc_mmap(struct file *file, struct vm_area_struct *vma)
atomic_dec(&msc->user_count);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTCOPY;
+ set_vm_flags(vma, VM_DONTEXPAND | VM_DONTCOPY);
vma->vm_ops = &msc_mmap_ops;
return ret;
}
@@ -715,7 +715,7 @@ static int stm_char_mmap(struct file *file, struct vm_area_struct *vma)
pm_runtime_get_sync(&stm->dev);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+ set_vm_flags(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_ops = &stm_mmap_vmops;
vm_iomap_memory(vma, phys, size);
@@ -403,7 +403,7 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
ret = -EPERM;
goto done;
}
- vma->vm_flags &= ~VM_MAYWRITE;
+ clear_vm_flags(vma, VM_MAYWRITE);
addr = vma->vm_start;
for (i = 0 ; i < uctxt->egrbufs.numbufs; i++) {
memlen = uctxt->egrbufs.buffers[i].len;
@@ -528,7 +528,7 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
goto done;
}
- vma->vm_flags = flags;
+ reset_vm_flags(vma, flags);
hfi1_cdbg(PROC,
"%u:%u type:%u io/vf:%d/%d, addr:0x%llx, len:%lu(%lu), flags:0x%lx\n",
ctxt, subctxt, type, mapio, vmf, memaddr, memlen,
@@ -2087,7 +2087,7 @@ static int mlx5_ib_mmap_clock_info_page(struct mlx5_ib_dev *dev,
if (vma->vm_flags & (VM_WRITE | VM_EXEC))
return -EPERM;
- vma->vm_flags &= ~VM_MAYWRITE;
+ clear_vm_flags(vma, VM_MAYWRITE);
if (!dev->mdev->clock_info)
return -EOPNOTSUPP;
@@ -2311,7 +2311,7 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm
if (vma->vm_flags & VM_WRITE)
return -EPERM;
- vma->vm_flags &= ~VM_MAYWRITE;
+ clear_vm_flags(vma, VM_MAYWRITE);
/* Don't expose to user-space information it shouldn't have */
if (PAGE_SIZE > 4096)
@@ -733,7 +733,7 @@ static int qib_mmap_mem(struct vm_area_struct *vma, struct qib_ctxtdata *rcd,
}
/* don't allow them to later change with mprotect */
- vma->vm_flags &= ~VM_MAYWRITE;
+ clear_vm_flags(vma, VM_MAYWRITE);
}
pfn = virt_to_phys(kvaddr) >> PAGE_SHIFT;
@@ -769,7 +769,7 @@ static int mmap_ureg(struct vm_area_struct *vma, struct qib_devdata *dd,
phys = dd->physaddr + ureg;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
+ set_vm_flags(vma, VM_DONTCOPY | VM_DONTEXPAND);
ret = io_remap_pfn_range(vma, vma->vm_start,
phys >> PAGE_SHIFT,
vma->vm_end - vma->vm_start,
@@ -810,8 +810,7 @@ static int mmap_piobufs(struct vm_area_struct *vma,
* don't allow them to later change to readable with mprotect (for when
* not initially mapped readable, as is normally the case)
*/
- vma->vm_flags &= ~VM_MAYREAD;
- vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
+ mod_vm_flags(vma, VM_DONTCOPY | VM_DONTEXPAND, VM_MAYREAD);
/* We used PAT if wc_cookie == 0 */
if (!dd->wc_cookie)
@@ -852,7 +851,7 @@ static int mmap_rcvegrbufs(struct vm_area_struct *vma,
goto bail;
}
/* don't allow them to later change to writable with mprotect */
- vma->vm_flags &= ~VM_MAYWRITE;
+ clear_vm_flags(vma, VM_MAYWRITE);
start = vma->vm_start;
@@ -944,7 +943,7 @@ static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
* Don't allow permission to later change to writable
* with mprotect.
*/
- vma->vm_flags &= ~VM_MAYWRITE;
+ clear_vm_flags(vma, VM_MAYWRITE);
} else
goto bail;
len = vma->vm_end - vma->vm_start;
@@ -955,7 +954,7 @@ static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
vma->vm_pgoff = (unsigned long) addr >> PAGE_SHIFT;
vma->vm_ops = &qib_file_vm_ops;
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ set_vm_flags(vma, VM_DONTEXPAND | VM_DONTDUMP);
ret = 1;
bail:
@@ -672,7 +672,7 @@ int usnic_ib_mmap(struct ib_ucontext *context,
usnic_dbg("\n");
us_ibdev = to_usdev(context->device);
- vma->vm_flags |= VM_IO;
+ set_vm_flags(vma, VM_IO);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vfid = vma->vm_pgoff;
usnic_dbg("Page Offset %lu PAGE_SHIFT %u VFID %u\n",
@@ -408,7 +408,7 @@ int pvrdma_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
}
/* Map UAR to kernel space, VM_LOCKED? */
- vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
+ set_vm_flags(vma, VM_DONTCOPY | VM_DONTEXPAND);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
if (io_remap_pfn_range(vma, start, context->uar.pfn, size,
vma->vm_page_prot))
@@ -293,7 +293,7 @@ static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
return ret;
}
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ set_vm_flags(vma, VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_private_data = &buf->handler;
vma->vm_ops = &vb2_common_vm_ops;
@@ -185,7 +185,7 @@ static int vb2_vmalloc_mmap(void *buf_priv, struct vm_area_struct *vma)
/*
* Make sure that vm_areas for 2 buffers won't be merged together
*/
- vma->vm_flags |= VM_DONTEXPAND;
+ set_vm_flags(vma, VM_DONTEXPAND);
/*
* Use common vm_area operations to track buffer refcount.
@@ -314,7 +314,7 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q,
}
vma->vm_ops = &videobuf_vm_ops;
- vma->vm_flags |= VM_DONTEXPAND;
+ set_vm_flags(vma, VM_DONTEXPAND);
vma->vm_private_data = map;
dev_dbg(q->dev, "mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",
@@ -630,8 +630,8 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q,
map->count = 1;
map->q = q;
vma->vm_ops = &videobuf_vm_ops;
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
- vma->vm_flags &= ~VM_IO; /* using shared anonymous pages */
+ /* using shared anonymous pages */
+ mod_vm_flags(vma, VM_DONTEXPAND | VM_DONTDUMP, VM_IO);
vma->vm_private_data = map;
dprintk(1, "mmap %p: q=%p %08lx-%08lx pgoff %08lx bufs %d-%d\n",
map, q, vma->vm_start, vma->vm_end, vma->vm_pgoff, first, last);
@@ -247,7 +247,7 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q,
}
vma->vm_ops = &videobuf_vm_ops;
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ set_vm_flags(vma, VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_private_data = map;
dprintk(1, "mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",
@@ -220,7 +220,7 @@ int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma)
pr_devel("%s: mmio physical: %llx pe: %i master:%i\n", __func__,
ctx->psn_phys, ctx->pe , ctx->master);
- vma->vm_flags |= VM_IO | VM_PFNMAP;
+ set_vm_flags(vma, VM_IO | VM_PFNMAP);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_ops = &cxl_mmap_vmops;
return 0;
@@ -2082,7 +2082,7 @@ static int hl_ts_mmap(struct hl_mmap_mem_buf *buf, struct vm_area_struct *vma, v
{
struct hl_ts_buff *ts_buff = buf->private;
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_DONTCOPY | VM_NORESERVE;
+ set_vm_flags(vma, VM_DONTEXPAND | VM_DONTDUMP | VM_DONTCOPY | VM_NORESERVE);
return remap_vmalloc_range(vma, ts_buff->user_buff_address, 0);
}
@@ -4236,8 +4236,8 @@ static int gaudi_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
{
int rc;
- vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
- VM_DONTCOPY | VM_NORESERVE;
+ set_vm_flags(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
+ VM_DONTCOPY | VM_NORESERVE);
rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr,
(dma_addr - HOST_PHYS_BASE), size);
@@ -5538,8 +5538,8 @@ static int gaudi2_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
{
int rc;
- vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
- VM_DONTCOPY | VM_NORESERVE;
+ set_vm_flags(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
+ VM_DONTCOPY | VM_NORESERVE);
#ifdef _HAS_DMA_MMAP_COHERENT
@@ -10116,8 +10116,8 @@ static int gaudi2_block_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
address = pci_resource_start(hdev->pdev, SRAM_CFG_BAR_ID) + offset_in_bar;
- vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
- VM_DONTCOPY | VM_NORESERVE;
+ set_vm_flags(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
+ VM_DONTCOPY | VM_NORESERVE);
rc = remap_pfn_range(vma, vma->vm_start, address >> PAGE_SHIFT,
block_size, vma->vm_page_prot);
@@ -2880,8 +2880,8 @@ static int goya_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
{
int rc;
- vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
- VM_DONTCOPY | VM_NORESERVE;
+ set_vm_flags(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
+ VM_DONTCOPY | VM_NORESERVE);
rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr,
(dma_addr - HOST_PHYS_BASE), size);
@@ -180,7 +180,7 @@ static int check_mmap_afu_irq(struct ocxl_context *ctx,
if ((vma->vm_flags & VM_READ) || (vma->vm_flags & VM_EXEC) ||
!(vma->vm_flags & VM_WRITE))
return -EINVAL;
- vma->vm_flags &= ~(VM_MAYREAD | VM_MAYEXEC);
+ clear_vm_flags(vma, VM_MAYREAD | VM_MAYEXEC);
return 0;
}
@@ -204,7 +204,7 @@ int ocxl_context_mmap(struct ocxl_context *ctx, struct vm_area_struct *vma)
if (rc)
return rc;
- vma->vm_flags |= VM_IO | VM_PFNMAP;
+ set_vm_flags(vma, VM_IO | VM_PFNMAP);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_ops = &ocxl_vmops;
return 0;
@@ -134,7 +134,7 @@ static int global_mmio_mmap(struct file *filp, struct kobject *kobj,
(afu->config.global_mmio_size >> PAGE_SHIFT))
return -EINVAL;
- vma->vm_flags |= VM_IO | VM_PFNMAP;
+ set_vm_flags(vma, VM_IO | VM_PFNMAP);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_ops = &global_mmio_vmops;
vma->vm_private_data = afu;
@@ -96,13 +96,13 @@ static int open_dice_mmap(struct file *filp, struct vm_area_struct *vma)
/* Ensure userspace cannot acquire VM_WRITE + VM_SHARED later. */
if (vma->vm_flags & VM_WRITE)
- vma->vm_flags &= ~VM_MAYSHARE;
+ clear_vm_flags(vma, VM_MAYSHARE);
else if (vma->vm_flags & VM_SHARED)
- vma->vm_flags &= ~VM_MAYWRITE;
+ clear_vm_flags(vma, VM_MAYWRITE);
/* Create write-combine mapping so all clients observe a wipe. */
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
- vma->vm_flags |= VM_DONTCOPY | VM_DONTDUMP;
+ set_vm_flags(vma, VM_DONTCOPY | VM_DONTDUMP);
return vm_iomap_memory(vma, drvdata->rmem->base, drvdata->rmem->size);
}
@@ -101,8 +101,8 @@ static int gru_file_mmap(struct file *file, struct vm_area_struct *vma)
vma->vm_end & (GRU_GSEG_PAGESIZE - 1))
return -EINVAL;
- vma->vm_flags |= VM_IO | VM_PFNMAP | VM_LOCKED |
- VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP;
+ set_vm_flags(vma, VM_IO | VM_PFNMAP | VM_LOCKED |
+ VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_page_prot = PAGE_SHARED;
vma->vm_ops = &gru_vm_ops;
@@ -229,7 +229,7 @@ static int uacce_fops_mmap(struct file *filep, struct vm_area_struct *vma)
if (!qfr)
return -ENOMEM;
- vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_WIPEONFORK;
+ set_vm_flags(vma, VM_DONTCOPY | VM_DONTEXPAND | VM_WIPEONFORK);
vma->vm_ops = &uacce_vm_ops;
vma->vm_private_data = q;
qfr->type = type;
@@ -389,7 +389,7 @@ static int dax_devmap(struct file *f, struct vm_area_struct *vma)
/* completion area is mapped read-only for user */
if (vma->vm_flags & VM_WRITE)
return -EPERM;
- vma->vm_flags &= ~VM_MAYWRITE;
+ clear_vm_flags(vma, VM_MAYWRITE);
if (remap_pfn_range(vma, vma->vm_start, ctx->ca_buf_ra >> PAGE_SHIFT,
len, vma->vm_page_prot))
@@ -1167,7 +1167,7 @@ static int afu_mmap(struct file *file, struct vm_area_struct *vma)
(ctx->psn_size >> PAGE_SHIFT))
return -EINVAL;
- vma->vm_flags |= VM_IO | VM_PFNMAP;
+ set_vm_flags(vma, VM_IO | VM_PFNMAP);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_ops = &ocxlflash_vmops;
return 0;
@@ -1288,7 +1288,7 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
}
sfp->mmap_called = 1;
- vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+ set_vm_flags(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_private_data = sfp;
vma->vm_ops = &sg_mmap_vm_ops;
out:
@@ -1072,7 +1072,7 @@ int hmm_bo_mmap(struct vm_area_struct *vma, struct hmm_buffer_object *bo)
vma->vm_private_data = bo;
vma->vm_ops = &hmm_bo_vm_ops;
- vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+ set_vm_flags(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
/*
* call hmm_bo_vm_open explicitly.
@@ -1476,8 +1476,8 @@ static int meye_mmap(struct file *file, struct vm_area_struct *vma)
}
vma->vm_ops = &meye_vm_ops;
- vma->vm_flags &= ~VM_IO; /* not I/O memory */
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ /* not I/O memory */
+ mod_vm_flags(vma, VM_DONTEXPAND | VM_DONTDUMP, VM_IO);
vma->vm_private_data = (void *) (offset / gbufsize);
meye_vm_open(vma);
@@ -779,7 +779,7 @@ static int v4l_stk_mmap(struct file *fp, struct vm_area_struct *vma)
ret = remap_vmalloc_range(vma, sbuf->buffer, 0);
if (ret)
return ret;
- vma->vm_flags |= VM_DONTEXPAND;
+ set_vm_flags(vma, VM_DONTEXPAND);
vma->vm_private_data = sbuf;
vma->vm_ops = &stk_v4l_vm_ops;
sbuf->v4lbuf.flags |= V4L2_BUF_FLAG_MAPPED;
@@ -1928,7 +1928,7 @@ static int tcmu_mmap(struct uio_info *info, struct vm_area_struct *vma)
{
struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ set_vm_flags(vma, VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_ops = &tcmu_vm_ops;
vma->vm_private_data = udev;
@@ -713,7 +713,7 @@ static const struct vm_operations_struct uio_logical_vm_ops = {
static int uio_mmap_logical(struct vm_area_struct *vma)
{
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ set_vm_flags(vma, VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_ops = &uio_logical_vm_ops;
return 0;
}
@@ -279,8 +279,7 @@ static int usbdev_mmap(struct file *file, struct vm_area_struct *vma)
}
}
- vma->vm_flags |= VM_IO;
- vma->vm_flags |= (VM_DONTEXPAND | VM_DONTDUMP);
+ set_vm_flags(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_ops = &usbdev_vm_ops;
vma->vm_private_data = usbm;
@@ -1272,8 +1272,7 @@ static int mon_bin_mmap(struct file *filp, struct vm_area_struct *vma)
if (vma->vm_flags & VM_WRITE)
return -EPERM;
- vma->vm_flags &= ~VM_MAYWRITE;
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ mod_vm_flags(vma, VM_DONTEXPAND | VM_DONTDUMP, VM_MAYWRITE);
vma->vm_private_data = filp->private_data;
mon_bin_vma_open(vma);
return 0;
@@ -512,7 +512,7 @@ static int vduse_domain_mmap(struct file *file, struct vm_area_struct *vma)
{
struct vduse_iova_domain *domain = file->private_data;
- vma->vm_flags |= VM_DONTDUMP | VM_DONTEXPAND;
+ set_vm_flags(vma, VM_DONTDUMP | VM_DONTEXPAND);
vma->vm_private_data = domain;
vma->vm_ops = &vduse_domain_mmap_ops;
@@ -1799,7 +1799,7 @@ int vfio_pci_core_mmap(struct vfio_device *core_vdev, struct vm_area_struct *vma
* See remap_pfn_range(), called from vfio_pci_fault() but we can't
* change vm_flags within the fault handler. Set them now.
*/
- vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+ set_vm_flags(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_ops = &vfio_pci_mmap_ops;
return 0;
@@ -1315,7 +1315,7 @@ static int vhost_vdpa_mmap(struct file *file, struct vm_area_struct *vma)
if (vma->vm_end - vma->vm_start != notify.size)
return -ENOTSUPP;
- vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+ set_vm_flags(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_ops = &vhost_vdpa_vm_ops;
return 0;
}
@@ -391,7 +391,7 @@ static int mc68x328fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
#ifndef MMU
/* this is uClinux (no MMU) specific code */
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ set_vm_flags(vma, VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_start = videomemory;
return 0;
@@ -232,9 +232,9 @@ static const struct address_space_operations fb_deferred_io_aops = {
int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
vma->vm_ops = &fb_deferred_io_vm_ops;
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ set_vm_flags(vma, VM_DONTEXPAND | VM_DONTDUMP);
if (!(info->flags & FBINFO_VIRTFB))
- vma->vm_flags |= VM_IO;
+ set_vm_flags(vma, VM_IO);
vma->vm_private_data = info;
return 0;
}
@@ -525,7 +525,7 @@ static int gntalloc_mmap(struct file *filp, struct vm_area_struct *vma)
vma->vm_private_data = vm_priv;
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ set_vm_flags(vma, VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_ops = &gntalloc_vmops;
@@ -1055,10 +1055,10 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
vma->vm_ops = &gntdev_vmops;
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_MIXEDMAP;
+ set_vm_flags(vma, VM_DONTEXPAND | VM_DONTDUMP | VM_MIXEDMAP);
if (use_ptemod)
- vma->vm_flags |= VM_DONTCOPY;
+ set_vm_flags(vma, VM_DONTCOPY);
vma->vm_private_data = map;
if (map->flags) {
@@ -156,7 +156,7 @@ static int privcmd_buf_mmap(struct file *file, struct vm_area_struct *vma)
vma_priv->file_priv = file_priv;
vma_priv->users = 1;
- vma->vm_flags |= VM_IO | VM_DONTEXPAND;
+ set_vm_flags(vma, VM_IO | VM_DONTEXPAND);
vma->vm_ops = &privcmd_buf_vm_ops;
vma->vm_private_data = vma_priv;
@@ -934,8 +934,8 @@ static int privcmd_mmap(struct file *file, struct vm_area_struct *vma)
{
/* DONTCOPY is essential for Xen because copy_page_range doesn't know
* how to recreate these mappings */
- vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTCOPY |
- VM_DONTEXPAND | VM_DONTDUMP;
+ set_vm_flags(vma, VM_IO | VM_PFNMAP | VM_DONTCOPY |
+ VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_ops = &privcmd_vm_ops;
vma->vm_private_data = NULL;
@@ -390,7 +390,7 @@ static const struct vm_operations_struct aio_ring_vm_ops = {
static int aio_ring_mmap(struct file *file, struct vm_area_struct *vma)
{
- vma->vm_flags |= VM_DONTEXPAND;
+ set_vm_flags(vma, VM_DONTEXPAND);
vma->vm_ops = &aio_ring_vm_ops;
return 0;
}
@@ -408,7 +408,7 @@ static int cramfs_physmem_mmap(struct file *file, struct vm_area_struct *vma)
* unpopulated ptes via cramfs_read_folio().
*/
int i;
- vma->vm_flags |= VM_MIXEDMAP;
+ set_vm_flags(vma, VM_MIXEDMAP);
for (i = 0; i < pages && !ret; i++) {
vm_fault_t vmf;
unsigned long off = i * PAGE_SIZE;
@@ -429,7 +429,7 @@ static int erofs_file_mmap(struct file *file, struct vm_area_struct *vma)
return -EINVAL;
vma->vm_ops = &erofs_dax_vm_ops;
- vma->vm_flags |= VM_HUGEPAGE;
+ set_vm_flags(vma, VM_HUGEPAGE);
return 0;
}
#else
@@ -270,7 +270,7 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
BUILD_BUG_ON(VM_STACK_FLAGS & VM_STACK_INCOMPLETE_SETUP);
vma->vm_end = STACK_TOP_MAX;
vma->vm_start = vma->vm_end - PAGE_SIZE;
- vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
+ init_vm_flags(vma, VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP);
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
err = insert_vm_struct(mm, vma);
@@ -834,7 +834,7 @@ int setup_arg_pages(struct linux_binprm *bprm,
}
/* mprotect_fixup is overkill to remove the temporary stack flags */
- vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
+ clear_vm_flags(vma, VM_STACK_INCOMPLETE_SETUP);
stack_expand = 131072UL; /* randomly 32*4k (or 2*64k) pages */
stack_size = vma->vm_end - vma->vm_start;
@@ -801,7 +801,7 @@ static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
file_accessed(file);
if (IS_DAX(file_inode(file))) {
vma->vm_ops = &ext4_dax_vm_ops;
- vma->vm_flags |= VM_HUGEPAGE;
+ set_vm_flags(vma, VM_HUGEPAGE);
} else {
vma->vm_ops = &ext4_file_vm_ops;
}
@@ -860,7 +860,7 @@ int fuse_dax_mmap(struct file *file, struct vm_area_struct *vma)
{
file_accessed(file);
vma->vm_ops = &fuse_dax_vm_ops;
- vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
+ set_vm_flags(vma, VM_MIXEDMAP | VM_HUGEPAGE);
return 0;
}
@@ -132,7 +132,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
* way when do_mmap unwinds (may be important on powerpc
* and ia64).
*/
- vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND;
+ set_vm_flags(vma, VM_HUGETLB | VM_DONTEXPAND);
vma->vm_ops = &hugetlb_vm_ops;
ret = seal_check_future_write(info->seals, vma);
@@ -813,7 +813,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
* as input to create an allocation policy.
*/
vma_init(&pseudo_vma, mm);
- pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED);
+ init_vm_flags(&pseudo_vma, VM_HUGETLB | VM_MAYSHARE | VM_SHARED);
pseudo_vma.vm_file = file;
for (index = start; index < end; index++) {
@@ -389,8 +389,7 @@ static int orangefs_file_mmap(struct file *file, struct vm_area_struct *vma)
"orangefs_file_mmap: called on %pD\n", file);
/* set the sequential readahead hint */
- vma->vm_flags |= VM_SEQ_READ;
- vma->vm_flags &= ~VM_RAND_READ;
+ mod_vm_flags(vma, VM_SEQ_READ, VM_RAND_READ);
file_accessed(file);
vma->vm_ops = &orangefs_file_vm_ops;
@@ -1302,7 +1302,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
mas_for_each(&mas, vma, ULONG_MAX) {
if (!(vma->vm_flags & VM_SOFTDIRTY))
continue;
- vma->vm_flags &= ~VM_SOFTDIRTY;
+ clear_vm_flags(vma, VM_SOFTDIRTY);
vma_set_page_prot(vma);
}
@@ -582,8 +582,7 @@ static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
if (vma->vm_flags & (VM_WRITE | VM_EXEC))
return -EPERM;
- vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC);
- vma->vm_flags |= VM_MIXEDMAP;
+ mod_vm_flags(vma, VM_MIXEDMAP, VM_MAYWRITE | VM_MAYEXEC);
vma->vm_ops = &vmcore_mmap_ops;
len = 0;
@@ -618,7 +618,7 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
for_each_vma(vmi, vma) {
if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx) {
vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
- vma->vm_flags &= ~__VM_UFFD_FLAGS;
+ clear_vm_flags(vma, __VM_UFFD_FLAGS);
}
}
mmap_write_unlock(mm);
@@ -652,7 +652,7 @@ int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs)
octx = vma->vm_userfaultfd_ctx.ctx;
if (!octx || !(octx->features & UFFD_FEATURE_EVENT_FORK)) {
vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
- vma->vm_flags &= ~__VM_UFFD_FLAGS;
+ clear_vm_flags(vma, __VM_UFFD_FLAGS);
return 0;
}
@@ -733,7 +733,7 @@ void mremap_userfaultfd_prep(struct vm_area_struct *vma,
} else {
/* Drop uffd context if remap feature not enabled */
vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
- vma->vm_flags &= ~__VM_UFFD_FLAGS;
+ clear_vm_flags(vma, __VM_UFFD_FLAGS);
}
}
@@ -895,7 +895,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
prev = vma;
}
- vma->vm_flags = new_flags;
+ reset_vm_flags(vma, new_flags);
vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
}
mmap_write_unlock(mm);
@@ -1463,7 +1463,7 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
* the next vma was merged into the current one and
* the current one has not been updated yet.
*/
- vma->vm_flags = new_flags;
+ reset_vm_flags(vma, new_flags);
vma->vm_userfaultfd_ctx.ctx = ctx;
if (is_vm_hugetlb_page(vma) && uffd_disable_huge_pmd_share(vma))
@@ -1651,7 +1651,7 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
* the next vma was merged into the current one and
* the current one has not been updated yet.
*/
- vma->vm_flags = new_flags;
+ reset_vm_flags(vma, new_flags);
vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
skip:
@@ -1429,7 +1429,7 @@ xfs_file_mmap(
file_accessed(file);
vma->vm_ops = &xfs_file_vm_ops;
if (IS_DAX(inode))
- vma->vm_flags |= VM_HUGEPAGE;
+ set_vm_flags(vma, VM_HUGEPAGE);
return 0;
}
@@ -3756,7 +3756,7 @@ static inline int seal_check_future_write(int seals, struct vm_area_struct *vma)
* VM_MAYWRITE as we still want them to be COW-writable.
*/
if (vma->vm_flags & VM_SHARED)
- vma->vm_flags &= ~(VM_MAYWRITE);
+ clear_vm_flags(vma, VM_MAYWRITE);
}
return 0;
@@ -269,7 +269,7 @@ static int ringbuf_map_mmap_kern(struct bpf_map *map, struct vm_area_struct *vma
if (vma->vm_pgoff != 0 || vma->vm_end - vma->vm_start != PAGE_SIZE)
return -EPERM;
} else {
- vma->vm_flags &= ~VM_MAYWRITE;
+ clear_vm_flags(vma, VM_MAYWRITE);
}
/* remap_vmalloc_range() checks size and offset constraints */
return remap_vmalloc_range(vma, rb_map->rb,
@@ -290,7 +290,7 @@ static int ringbuf_map_mmap_user(struct bpf_map *map, struct vm_area_struct *vma
*/
return -EPERM;
} else {
- vma->vm_flags &= ~VM_MAYWRITE;
+ clear_vm_flags(vma, VM_MAYWRITE);
}
/* remap_vmalloc_range() checks size and offset constraints */
return remap_vmalloc_range(vma, rb_map->rb, vma->vm_pgoff + RINGBUF_PGOFF);
@@ -882,10 +882,10 @@ static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma)
/* set default open/close callbacks */
vma->vm_ops = &bpf_map_default_vmops;
vma->vm_private_data = map;
- vma->vm_flags &= ~VM_MAYEXEC;
+ clear_vm_flags(vma, VM_MAYEXEC);
if (!(vma->vm_flags & VM_WRITE))
/* disallow re-mapping with PROT_WRITE */
- vma->vm_flags &= ~VM_MAYWRITE;
+ clear_vm_flags(vma, VM_MAYWRITE);
err = map->ops->map_mmap(map, vma);
if (err)
@@ -6573,7 +6573,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
* Since pinned accounting is per vm we cannot allow fork() to copy our
* vma.
*/
- vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP;
+ set_vm_flags(vma, VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_ops = &perf_mmap_vmops;
if (event->pmu->event_mapped)
@@ -489,7 +489,7 @@ static int kcov_mmap(struct file *filep, struct vm_area_struct *vma)
goto exit;
}
spin_unlock_irqrestore(&kcov->lock, flags);
- vma->vm_flags |= VM_DONTEXPAND;
+ set_vm_flags(vma, VM_DONTEXPAND);
for (off = 0; off < size; off += PAGE_SIZE) {
page = vmalloc_to_page(kcov->area + off);
res = vm_insert_page(vma, vma->vm_start + off, page);
@@ -91,7 +91,7 @@ static int relay_mmap_buf(struct rchan_buf *buf, struct vm_area_struct *vma)
return -EINVAL;
vma->vm_ops = &relay_file_mmap_ops;
- vma->vm_flags |= VM_DONTEXPAND;
+ set_vm_flags(vma, VM_DONTEXPAND);
vma->vm_private_data = buf;
return 0;
@@ -179,7 +179,7 @@ static int madvise_update_vma(struct vm_area_struct *vma,
/*
* vm_flags is protected by the mmap_lock held in write mode.
*/
- vma->vm_flags = new_flags;
+ reset_vm_flags(vma, new_flags);
if (!vma->vm_file || vma_is_anon_shmem(vma)) {
error = replace_anon_vma_name(vma, anon_name);
if (error)
@@ -1951,7 +1951,7 @@ int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
if (!(vma->vm_flags & VM_MIXEDMAP)) {
BUG_ON(mmap_read_trylock(vma->vm_mm));
BUG_ON(vma->vm_flags & VM_PFNMAP);
- vma->vm_flags |= VM_MIXEDMAP;
+ set_vm_flags(vma, VM_MIXEDMAP);
}
/* Defer page refcount checking till we're about to map that page. */
return insert_pages(vma, addr, pages, num, vma->vm_page_prot);
@@ -2009,7 +2009,7 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
if (!(vma->vm_flags & VM_MIXEDMAP)) {
BUG_ON(mmap_read_trylock(vma->vm_mm));
BUG_ON(vma->vm_flags & VM_PFNMAP);
- vma->vm_flags |= VM_MIXEDMAP;
+ set_vm_flags(vma, VM_MIXEDMAP);
}
return insert_page(vma, addr, page, vma->vm_page_prot);
}
@@ -2475,7 +2475,7 @@ int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
vma->vm_pgoff = pfn;
}
- vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+ set_vm_flags(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
BUG_ON(addr >= end);
pfn -= addr >> PAGE_SHIFT;
@@ -380,7 +380,7 @@ static void mlock_vma_pages_range(struct vm_area_struct *vma,
*/
if (newflags & VM_LOCKED)
newflags |= VM_IO;
- WRITE_ONCE(vma->vm_flags, newflags);
+ reset_vm_flags(vma, newflags);
lru_add_drain();
walk_page_range(vma->vm_mm, start, end, &mlock_walk_ops, NULL);
@@ -388,7 +388,7 @@ static void mlock_vma_pages_range(struct vm_area_struct *vma,
if (newflags & VM_IO) {
newflags &= ~VM_IO;
- WRITE_ONCE(vma->vm_flags, newflags);
+ reset_vm_flags(vma, newflags);
}
}
@@ -456,7 +456,7 @@ static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev,
if ((newflags & VM_LOCKED) && (oldflags & VM_LOCKED)) {
/* No work to do, and mlocking twice would be wrong */
- vma->vm_flags = newflags;
+ reset_vm_flags(vma, newflags);
} else {
mlock_vma_pages_range(vma, start, end, newflags);
}
@@ -2607,7 +2607,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
vma->vm_start = addr;
vma->vm_end = end;
- vma->vm_flags = vm_flags;
+ init_vm_flags(vma, vm_flags);
vma->vm_page_prot = vm_get_page_prot(vm_flags);
vma->vm_pgoff = pgoff;
@@ -2736,7 +2736,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
* then new mapped in-place (which must be aimed as
* a completely new data area).
*/
- vma->vm_flags |= VM_SOFTDIRTY;
+ set_vm_flags(vma, VM_SOFTDIRTY);
vma_set_page_prot(vma);
@@ -2959,7 +2959,7 @@ static int do_brk_flags(struct ma_state *mas, struct vm_area_struct *vma,
anon_vma_interval_tree_pre_update_vma(vma);
}
vma->vm_end = addr + len;
- vma->vm_flags |= VM_SOFTDIRTY;
+ set_vm_flags(vma, VM_SOFTDIRTY);
mas_store_prealloc(mas, vma);
if (vma->anon_vma) {
@@ -2979,7 +2979,7 @@ static int do_brk_flags(struct ma_state *mas, struct vm_area_struct *vma,
vma->vm_start = addr;
vma->vm_end = addr + len;
vma->vm_pgoff = addr >> PAGE_SHIFT;
- vma->vm_flags = flags;
+ init_vm_flags(vma, flags);
vma->vm_page_prot = vm_get_page_prot(flags);
mas_set_range(mas, vma->vm_start, addr + len - 1);
if (mas_store_gfp(mas, vma, GFP_KERNEL))
@@ -2992,7 +2992,7 @@ static int do_brk_flags(struct ma_state *mas, struct vm_area_struct *vma,
mm->data_vm += len >> PAGE_SHIFT;
if (flags & VM_LOCKED)
mm->locked_vm += (len >> PAGE_SHIFT);
- vma->vm_flags |= VM_SOFTDIRTY;
+ set_vm_flags(vma, VM_SOFTDIRTY);
validate_mm(mm);
return 0;
@@ -633,7 +633,7 @@ mprotect_fixup(struct mmu_gather *tlb, struct vm_area_struct *vma,
* vm_flags and vm_page_prot are protected by the mmap_lock
* held in write mode.
*/
- vma->vm_flags = newflags;
+ reset_vm_flags(vma, newflags);
if (vma_wants_manual_pte_write_upgrade(vma))
mm_cp_flags |= MM_CP_TRY_CHANGE_WRITABLE;
vma_set_page_prot(vma);
@@ -661,7 +661,7 @@ static unsigned long move_vma(struct vm_area_struct *vma,
/* Conceal VM_ACCOUNT so old reservation is not undone */
if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP)) {
- vma->vm_flags &= ~VM_ACCOUNT;
+ clear_vm_flags(vma, VM_ACCOUNT);
excess = vma->vm_end - vma->vm_start - old_len;
if (old_addr > vma->vm_start &&
old_addr + old_len < vma->vm_end)
@@ -716,9 +716,9 @@ static unsigned long move_vma(struct vm_area_struct *vma,
/* Restore VM_ACCOUNT if one or two pieces of vma left */
if (excess) {
- vma->vm_flags |= VM_ACCOUNT;
+ set_vm_flags(vma, VM_ACCOUNT);
if (split)
- find_vma(mm, vma->vm_end)->vm_flags |= VM_ACCOUNT;
+ set_vm_flags(find_vma(mm, vma->vm_end), VM_ACCOUNT);
}
return new_addr;
@@ -173,7 +173,7 @@ static void *__vmalloc_user_flags(unsigned long size, gfp_t flags)
mmap_write_lock(current->mm);
vma = find_vma(current->mm, (unsigned long)ret);
if (vma)
- vma->vm_flags |= VM_USERMAP;
+ set_vm_flags(vma, VM_USERMAP);
mmap_write_unlock(current->mm);
}
@@ -991,7 +991,8 @@ static int do_mmap_private(struct vm_area_struct *vma,
atomic_long_add(total, &mmap_pages_allocated);
- region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY;
+ set_vm_flags(vma, VM_MAPPED_COPY);
+ region->vm_flags = vma->flags;
region->vm_start = (unsigned long) base;
region->vm_end = region->vm_start + len;
region->vm_top = region->vm_start + (total << PAGE_SHIFT);
@@ -1088,7 +1089,7 @@ unsigned long do_mmap(struct file *file,
region->vm_flags = vm_flags;
region->vm_pgoff = pgoff;
- vma->vm_flags = vm_flags;
+ init_vm_flags(vma, vm_flags);
vma->vm_pgoff = pgoff;
if (file) {
@@ -1152,7 +1153,7 @@ unsigned long do_mmap(struct file *file,
vma->vm_end = start + len;
if (pregion->vm_flags & VM_MAPPED_COPY)
- vma->vm_flags |= VM_MAPPED_COPY;
+ set_vm_flags(vma, VM_MAPPED_COPY);
else {
ret = do_mmap_shared_file(vma);
if (ret < 0) {
@@ -1632,7 +1633,7 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
if (addr != (pfn << PAGE_SHIFT))
return -EINVAL;
- vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+ set_vm_flags(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
return 0;
}
EXPORT_SYMBOL(remap_pfn_range);
@@ -128,7 +128,7 @@ static int secretmem_mmap(struct file *file, struct vm_area_struct *vma)
if (mlock_future_check(vma->vm_mm, vma->vm_flags | VM_LOCKED, len))
return -EAGAIN;
- vma->vm_flags |= VM_LOCKED | VM_DONTDUMP;
+ set_vm_flags(vma, VM_LOCKED | VM_DONTDUMP);
vma->vm_ops = &secretmem_vm_ops;
return 0;
@@ -2289,7 +2289,7 @@ static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
return ret;
/* arm64 - allow memory tagging on RAM-based files */
- vma->vm_flags |= VM_MTE_ALLOWED;
+ set_vm_flags(vma, VM_MTE_ALLOWED);
file_accessed(file);
/* This is anonymous shared memory if it is unlinked at the time of mmap */
@@ -3657,7 +3657,7 @@ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
size -= PAGE_SIZE;
} while (size > 0);
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ set_vm_flags(vma, VM_DONTEXPAND | VM_DONTDUMP);
return 0;
}
@@ -1890,10 +1890,10 @@ int tcp_mmap(struct file *file, struct socket *sock,
{
if (vma->vm_flags & (VM_WRITE | VM_EXEC))
return -EPERM;
- vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC);
+ clear_vm_flags(vma, VM_MAYWRITE | VM_MAYEXEC);
/* Instruct vm_insert_page() to not mmap_read_lock(mm) */
- vma->vm_flags |= VM_MIXEDMAP;
+ set_vm_flags(vma, VM_MIXEDMAP);
vma->vm_ops = &tcp_vm_ops;
return 0;
@@ -262,7 +262,7 @@ static int sel_mmap_handle_status(struct file *filp,
if (vma->vm_flags & VM_WRITE)
return -EPERM;
/* disallow mprotect() turns it into writable */
- vma->vm_flags &= ~VM_MAYWRITE;
+ clear_vm_flags(vma, VM_MAYWRITE);
return remap_pfn_range(vma, vma->vm_start,
page_to_pfn(status),
@@ -506,13 +506,13 @@ static int sel_mmap_policy(struct file *filp, struct vm_area_struct *vma)
{
if (vma->vm_flags & VM_SHARED) {
/* do not allow mprotect to make mapping writable */
- vma->vm_flags &= ~VM_MAYWRITE;
+ clear_vm_flags(vma, VM_MAYWRITE);
if (vma->vm_flags & VM_WRITE)
return -EACCES;
}
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ set_vm_flags(vma, VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_ops = &sel_mmap_policy_ops;
return 0;
@@ -2910,7 +2910,7 @@ static int snd_pcm_oss_mmap(struct file *file, struct vm_area_struct *area)
}
/* set VM_READ access as well to fix memset() routines that do
reads before writes (to improve performance) */
- area->vm_flags |= VM_READ;
+ set_vm_flags(area, VM_READ);
if (substream == NULL)
return -ENXIO;
runtime = substream->runtime;
@@ -3675,8 +3675,9 @@ static int snd_pcm_mmap_status(struct snd_pcm_substream *substream, struct file
return -EINVAL;
area->vm_ops = &snd_pcm_vm_ops_status;
area->vm_private_data = substream;
- area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
- area->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
+ mod_vm_flags(area, VM_DONTEXPAND | VM_DONTDUMP,
+ VM_WRITE | VM_MAYWRITE);
+
return 0;
}
@@ -3712,7 +3713,7 @@ static int snd_pcm_mmap_control(struct snd_pcm_substream *substream, struct file
return -EINVAL;
area->vm_ops = &snd_pcm_vm_ops_control;
area->vm_private_data = substream;
- area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ set_vm_flags(area, VM_DONTEXPAND | VM_DONTDUMP);
return 0;
}
@@ -3828,7 +3829,7 @@ static const struct vm_operations_struct snd_pcm_vm_ops_data_fault = {
int snd_pcm_lib_default_mmap(struct snd_pcm_substream *substream,
struct vm_area_struct *area)
{
- area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ set_vm_flags(area, VM_DONTEXPAND | VM_DONTDUMP);
if (!substream->ops->page &&
!snd_dma_buffer_mmap(snd_pcm_get_dma_buf(substream), area))
return 0;
@@ -404,7 +404,7 @@ static int mmp_pcm_mmap(struct snd_soc_component *component,
struct snd_pcm_substream *substream,
struct vm_area_struct *vma)
{
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ set_vm_flags(vma, VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
return remap_pfn_range(vma, vma->vm_start,
substream->dma_buffer.addr >> PAGE_SHIFT,
@@ -224,9 +224,9 @@ static int usb_stream_hwdep_mmap(struct snd_hwdep *hw,
}
area->vm_ops = &usb_stream_hwdep_vm_ops;
- area->vm_flags |= VM_DONTDUMP;
+ set_vm_flags(area, VM_DONTDUMP);
if (!read)
- area->vm_flags |= VM_DONTEXPAND;
+ set_vm_flags(area, VM_DONTEXPAND);
area->vm_private_data = us122l;
atomic_inc(&us122l->mmap_count);
out:
@@ -61,7 +61,7 @@ static int snd_us428ctls_mmap(struct snd_hwdep *hw, struct file *filp, struct vm
}
area->vm_ops = &us428ctls_vm_ops;
- area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ set_vm_flags(area, VM_DONTEXPAND | VM_DONTDUMP);
area->vm_private_data = hw->private_data;
return 0;
}
@@ -706,7 +706,7 @@ static int snd_usx2y_hwdep_pcm_mmap(struct snd_hwdep *hw, struct file *filp, str
return -ENODEV;
area->vm_ops = &snd_usx2y_hwdep_pcm_vm_ops;
- area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ set_vm_flags(area, VM_DONTEXPAND | VM_DONTDUMP);
area->vm_private_data = hw->private_data;
return 0;
}
Replace direct modifications to vma->vm_flags with calls to modifier functions to be able to track flag changes and to keep vma locking correctness. Signed-off-by: Suren Baghdasaryan <surenb@google.com> --- arch/arm/kernel/process.c | 2 +- arch/ia64/mm/init.c | 8 ++++---- arch/loongarch/include/asm/tlb.h | 2 +- arch/powerpc/kvm/book3s_xive_native.c | 2 +- arch/powerpc/mm/book3s64/subpage_prot.c | 2 +- arch/powerpc/platforms/book3s/vas-api.c | 2 +- arch/powerpc/platforms/cell/spufs/file.c | 14 +++++++------- arch/s390/mm/gmap.c | 3 +-- arch/x86/entry/vsyscall/vsyscall_64.c | 2 +- arch/x86/kernel/cpu/sgx/driver.c | 2 +- arch/x86/kernel/cpu/sgx/virt.c | 2 +- arch/x86/mm/pat/memtype.c | 6 +++--- arch/x86/um/mem_32.c | 2 +- drivers/acpi/pfr_telemetry.c | 2 +- drivers/android/binder.c | 3 +-- drivers/char/mspec.c | 2 +- drivers/crypto/hisilicon/qm.c | 2 +- drivers/dax/device.c | 2 +- drivers/dma/idxd/cdev.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 2 +- drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 4 ++-- drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c | 4 ++-- drivers/gpu/drm/amd/amdkfd/kfd_events.c | 4 ++-- drivers/gpu/drm/amd/amdkfd/kfd_process.c | 4 ++-- drivers/gpu/drm/drm_gem.c | 2 +- drivers/gpu/drm/drm_gem_dma_helper.c | 3 +-- drivers/gpu/drm/drm_gem_shmem_helper.c | 2 +- drivers/gpu/drm/drm_vm.c | 8 ++++---- drivers/gpu/drm/etnaviv/etnaviv_gem.c | 2 +- drivers/gpu/drm/exynos/exynos_drm_gem.c | 4 ++-- drivers/gpu/drm/gma500/framebuffer.c | 2 +- drivers/gpu/drm/i810/i810_dma.c | 2 +- drivers/gpu/drm/i915/gem/i915_gem_mman.c | 4 ++-- drivers/gpu/drm/mediatek/mtk_drm_gem.c | 2 +- drivers/gpu/drm/msm/msm_gem.c | 2 +- drivers/gpu/drm/omapdrm/omap_gem.c | 3 +-- drivers/gpu/drm/rockchip/rockchip_drm_gem.c | 3 +-- drivers/gpu/drm/tegra/gem.c | 5 ++--- drivers/gpu/drm/ttm/ttm_bo_vm.c | 3 +-- drivers/gpu/drm/virtio/virtgpu_vram.c | 2 +- drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c | 2 +- drivers/gpu/drm/xen/xen_drm_front_gem.c | 3 +-- drivers/hsi/clients/cmt_speech.c | 2 +- drivers/hwtracing/intel_th/msu.c | 2 +- drivers/hwtracing/stm/core.c | 2 +- drivers/infiniband/hw/hfi1/file_ops.c | 4 ++-- drivers/infiniband/hw/mlx5/main.c | 4 ++-- drivers/infiniband/hw/qib/qib_file_ops.c | 13 ++++++------- drivers/infiniband/hw/usnic/usnic_ib_verbs.c | 2 +- drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c | 2 +- .../media/common/videobuf2/videobuf2-dma-contig.c | 2 +- drivers/media/common/videobuf2/videobuf2-vmalloc.c | 2 +- drivers/media/v4l2-core/videobuf-dma-contig.c | 2 +- drivers/media/v4l2-core/videobuf-dma-sg.c | 4 ++-- drivers/media/v4l2-core/videobuf-vmalloc.c | 2 +- drivers/misc/cxl/context.c | 2 +- drivers/misc/habanalabs/common/memory.c | 2 +- drivers/misc/habanalabs/gaudi/gaudi.c | 4 ++-- drivers/misc/habanalabs/gaudi2/gaudi2.c | 8 ++++---- drivers/misc/habanalabs/goya/goya.c | 4 ++-- drivers/misc/ocxl/context.c | 4 ++-- drivers/misc/ocxl/sysfs.c | 2 +- drivers/misc/open-dice.c | 6 +++--- drivers/misc/sgi-gru/grufile.c | 4 ++-- drivers/misc/uacce/uacce.c | 2 +- drivers/sbus/char/oradax.c | 2 +- drivers/scsi/cxlflash/ocxl_hw.c | 2 +- drivers/scsi/sg.c | 2 +- drivers/staging/media/atomisp/pci/hmm/hmm_bo.c | 2 +- drivers/staging/media/deprecated/meye/meye.c | 4 ++-- .../media/deprecated/stkwebcam/stk-webcam.c | 2 +- drivers/target/target_core_user.c | 2 +- drivers/uio/uio.c | 2 +- drivers/usb/core/devio.c | 3 +-- drivers/usb/mon/mon_bin.c | 3 +-- drivers/vdpa/vdpa_user/iova_domain.c | 2 +- drivers/vfio/pci/vfio_pci_core.c | 2 +- drivers/vhost/vdpa.c | 2 +- drivers/video/fbdev/68328fb.c | 2 +- drivers/video/fbdev/core/fb_defio.c | 4 ++-- drivers/xen/gntalloc.c | 2 +- drivers/xen/gntdev.c | 4 ++-- drivers/xen/privcmd-buf.c | 2 +- drivers/xen/privcmd.c | 4 ++-- fs/aio.c | 2 +- fs/cramfs/inode.c | 2 +- fs/erofs/data.c | 2 +- fs/exec.c | 4 ++-- fs/ext4/file.c | 2 +- fs/fuse/dax.c | 2 +- fs/hugetlbfs/inode.c | 4 ++-- fs/orangefs/file.c | 3 +-- fs/proc/task_mmu.c | 2 +- fs/proc/vmcore.c | 3 +-- fs/userfaultfd.c | 12 ++++++------ fs/xfs/xfs_file.c | 2 +- include/linux/mm.h | 2 +- kernel/bpf/ringbuf.c | 4 ++-- kernel/bpf/syscall.c | 4 ++-- kernel/events/core.c | 2 +- kernel/kcov.c | 2 +- kernel/relay.c | 2 +- mm/madvise.c | 2 +- mm/memory.c | 6 +++--- mm/mlock.c | 6 +++--- mm/mmap.c | 10 +++++----- mm/mprotect.c | 2 +- mm/mremap.c | 6 +++--- mm/nommu.c | 11 ++++++----- mm/secretmem.c | 2 +- mm/shmem.c | 2 +- mm/vmalloc.c | 2 +- net/ipv4/tcp.c | 4 ++-- security/selinux/selinuxfs.c | 6 +++--- sound/core/oss/pcm_oss.c | 2 +- sound/core/pcm_native.c | 9 +++++---- sound/soc/pxa/mmp-sspa.c | 2 +- sound/usb/usx2y/us122l.c | 4 ++-- sound/usb/usx2y/usX2Yhwdep.c | 2 +- sound/usb/usx2y/usx2yhwdeppcm.c | 2 +- 120 files changed, 194 insertions(+), 205 deletions(-)