@@ -957,12 +957,12 @@ do_entUnaUser(void __user * va, unsigned long opcode,
si_code = SEGV_ACCERR;
else {
struct mm_struct *mm = current->mm;
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
if (find_vma(mm, (unsigned long)va))
si_code = SEGV_ACCERR;
else
si_code = SEGV_MAPERR;
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
}
send_sig_fault(SIGSEGV, si_code, va, 0, current);
return;
@@ -117,7 +117,7 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
if (user_mode(regs))
flags |= FAULT_FLAG_USER;
retry:
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
vma = find_vma(mm, address);
if (!vma)
goto bad_area;
@@ -180,14 +180,14 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
}
}
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
return;
/* Something tried to access memory that isn't in our memory map.
Fix it, but check if it's kernel or user first. */
bad_area:
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
if (user_mode(regs))
goto do_sigsegv;
@@ -211,14 +211,14 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
/* We ran out of memory, or some other thing happened to us that
made us unable to handle the page fault gracefully. */
out_of_memory:
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
if (!user_mode(regs))
goto no_context;
pagefault_out_of_memory();
return;
do_sigbus:
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
/* Send a sigbus, regardless of whether we were in kernel
or user mode. */
force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *) address, 0);
@@ -88,10 +88,10 @@ SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new)
if (unlikely(ret != -EFAULT))
goto fail;
- down_read(¤t->mm->mmap_sem);
+ mm_read_lock(current->mm);
ret = fixup_user_fault(current, current->mm, (unsigned long) uaddr,
FAULT_FLAG_WRITE, NULL);
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm);
if (likely(!ret))
goto again;
@@ -89,7 +89,7 @@ static void show_faulting_vma(unsigned long address)
/* can't use print_vma_addr() yet as it doesn't check for
* non-inclusive vma
*/
- down_read(&active_mm->mmap_sem);
+ mm_read_lock(active_mm);
vma = find_vma(active_mm, address);
/* check against the find_vma( ) behaviour which returns the next VMA
@@ -112,7 +112,7 @@ static void show_faulting_vma(unsigned long address)
} else
pr_info(" @No matching VMA found\n");
- up_read(&active_mm->mmap_sem);
+ mm_read_unlock(active_mm);
}
static void show_ecr_verbose(struct pt_regs *regs)
@@ -107,7 +107,7 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
flags |= FAULT_FLAG_WRITE;
retry:
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
vma = find_vma(mm, address);
if (!vma)
@@ -159,7 +159,7 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
}
bad_area:
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
/*
* Major/minor page fault accounting
@@ -431,7 +431,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
npages = 1; /* for sigpage */
npages += vdso_total_pages;
- if (down_write_killable(&mm->mmap_sem))
+ if (mm_write_lock_killable(mm))
return -EINTR;
hint = sigpage_addr(mm, npages);
addr = get_unmapped_area(NULL, hint, npages << PAGE_SHIFT, 0, 0);
@@ -458,7 +458,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
arm_install_vdso(mm, addr + PAGE_SIZE);
up_fail:
- up_write(&mm->mmap_sem);
+ mm_write_unlock(mm);
return ret;
}
#endif
@@ -97,12 +97,12 @@ static void set_segfault(struct pt_regs *regs, unsigned long addr)
{
int si_code;
- down_read(¤t->mm->mmap_sem);
+ mm_read_lock(current->mm);
if (find_vma(current->mm, addr) == NULL)
si_code = SEGV_MAPERR;
else
si_code = SEGV_ACCERR;
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm);
pr_debug("SWP{B} emulation: access caused memory abort!\n");
arm_notify_die("Illegal memory access", regs,
@@ -96,7 +96,7 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
atomic = faulthandler_disabled();
if (!atomic)
- down_read(¤t->mm->mmap_sem);
+ mm_read_lock(current->mm);
while (n) {
pte_t *pte;
spinlock_t *ptl;
@@ -104,11 +104,11 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
while (!pin_page_for_write(to, &pte, &ptl)) {
if (!atomic)
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm);
if (__put_user(0, (char __user *)to))
goto out;
if (!atomic)
- down_read(¤t->mm->mmap_sem);
+ mm_read_lock(current->mm);
}
tocopy = (~(unsigned long)to & ~PAGE_MASK) + 1;
@@ -128,7 +128,7 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
spin_unlock(ptl);
}
if (!atomic)
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm);
out:
return n;
@@ -165,17 +165,17 @@ __clear_user_memset(void __user *addr, unsigned long n)
return 0;
}
- down_read(¤t->mm->mmap_sem);
+ mm_read_lock(current->mm);
while (n) {
pte_t *pte;
spinlock_t *ptl;
int tocopy;
while (!pin_page_for_write(addr, &pte, &ptl)) {
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm);
if (__put_user(0, (char __user *)addr))
goto out;
- down_read(¤t->mm->mmap_sem);
+ mm_read_lock(current->mm);
}
tocopy = (~(unsigned long)addr & ~PAGE_MASK) + 1;
@@ -193,7 +193,7 @@ __clear_user_memset(void __user *addr, unsigned long n)
else
spin_unlock(ptl);
}
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm);
out:
return n;
@@ -270,11 +270,11 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
* validly references user space from well defined areas of the code,
* we can bug out early if this is from code which shouldn't.
*/
- if (!down_read_trylock(&mm->mmap_sem)) {
+ if (!mm_read_trylock(mm)) {
if (!user_mode(regs) && !search_exception_tables(regs->ARM_pc))
goto no_context;
retry:
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
} else {
/*
* The above down_read_trylock() might have succeeded in
@@ -327,7 +327,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
}
}
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
/*
* Handle the "normal" case first - VM_FAULT_MAJOR
@@ -384,12 +384,12 @@ void arm64_notify_segfault(unsigned long addr)
{
int code;
- down_read(¤t->mm->mmap_sem);
+ mm_read_lock(current->mm);
if (find_vma(current->mm, addr) == NULL)
code = SEGV_MAPERR;
else
code = SEGV_ACCERR;
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm);
force_signal_inject(SIGSEGV, code, addr);
}
@@ -357,7 +357,7 @@ int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
struct mm_struct *mm = current->mm;
int ret;
- if (down_write_killable(&mm->mmap_sem))
+ if (mm_write_lock_killable(mm))
return -EINTR;
ret = aarch32_kuser_helpers_setup(mm);
@@ -374,7 +374,7 @@ int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
#endif /* CONFIG_COMPAT_VDSO */
out:
- up_write(&mm->mmap_sem);
+ mm_write_unlock(mm);
return ret;
}
#endif /* CONFIG_COMPAT */
@@ -418,7 +418,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
struct mm_struct *mm = current->mm;
int ret;
- if (down_write_killable(&mm->mmap_sem))
+ if (mm_write_lock_killable(mm))
return -EINTR;
ret = __setup_additional_pages(ARM64_VDSO,
@@ -426,7 +426,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
bprm,
uses_interp);
- up_write(&mm->mmap_sem);
+ mm_write_unlock(mm);
return ret;
}
@@ -491,11 +491,11 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
* validly references user space from well defined areas of the code,
* we can bug out early if this is from code which shouldn't.
*/
- if (!down_read_trylock(&mm->mmap_sem)) {
+ if (!mm_read_trylock(mm)) {
if (!user_mode(regs) && !search_exception_tables(regs->pc))
goto no_context;
retry:
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
} else {
/*
* The above down_read_trylock() might have succeeded in which
@@ -504,7 +504,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
might_sleep();
#ifdef CONFIG_DEBUG_VM
if (!user_mode(regs) && !search_exception_tables(regs->pc)) {
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
goto no_context;
}
#endif
@@ -536,7 +536,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
goto retry;
}
}
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
/*
* Handle the "normal" (no error) case first.
@@ -50,7 +50,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
unsigned long addr;
struct mm_struct *mm = current->mm;
- down_write(&mm->mmap_sem);
+ mm_write_lock(mm);
addr = get_unmapped_area(NULL, STACK_TOP, PAGE_SIZE, 0, 0);
if (IS_ERR_VALUE(addr)) {
@@ -70,7 +70,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
mm->context.vdso = (void *)addr;
up_fail:
- up_write(&mm->mmap_sem);
+ mm_write_unlock(mm);
return ret;
}
@@ -116,7 +116,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
if (in_atomic() || !mm)
goto bad_area_nosemaphore;
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
vma = find_vma(mm, address);
if (!vma)
goto bad_area;
@@ -166,7 +166,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
address);
}
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
return;
/*
@@ -174,7 +174,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
* Fix it, but check if it's kernel or user first..
*/
bad_area:
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
bad_area_nosemaphore:
/* User mode accesses just cause a SIGSEGV */
@@ -206,7 +206,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
return;
do_sigbus:
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
/* Kernel mode? Handle exceptions or die */
if (!user_mode(regs))
@@ -52,7 +52,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
unsigned long vdso_base;
struct mm_struct *mm = current->mm;
- if (down_write_killable(&mm->mmap_sem))
+ if (mm_write_lock_killable(mm))
return -EINTR;
/* Try to get it loaded right near ld.so/glibc. */
@@ -76,7 +76,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
mm->context.vdso = (void *)vdso_base;
up_fail:
- up_write(&mm->mmap_sem);
+ mm_write_unlock(mm);
return ret;
}
@@ -55,7 +55,7 @@ void do_page_fault(unsigned long address, long cause, struct pt_regs *regs)
if (user_mode(regs))
flags |= FAULT_FLAG_USER;
retry:
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
vma = find_vma(mm, address);
if (!vma)
goto bad_area;
@@ -108,11 +108,11 @@ void do_page_fault(unsigned long address, long cause, struct pt_regs *regs)
}
}
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
return;
}
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
/* Handle copyin/out exception cases */
if (!user_mode(regs))
@@ -139,7 +139,7 @@ void do_page_fault(unsigned long address, long cause, struct pt_regs *regs)
return;
bad_area:
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
if (user_mode(regs)) {
force_sig_fault(SIGSEGV, si_code, (void __user *)address);
@@ -2258,13 +2258,13 @@ pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t
* now we atomically find some area in the address space and
* remap the buffer in it.
*/
- down_write(&task->mm->mmap_sem);
+ mm_write_lock(task->mm);
/* find some free area in address space, must have mmap sem held */
vma->vm_start = get_unmapped_area(NULL, 0, size, 0, MAP_PRIVATE|MAP_ANONYMOUS);
if (IS_ERR_VALUE(vma->vm_start)) {
DPRINT(("Cannot find unmapped area for size %ld\n", size));
- up_write(&task->mm->mmap_sem);
+ mm_write_unlock(task->mm);
goto error;
}
vma->vm_end = vma->vm_start + size;
@@ -2275,7 +2275,7 @@ pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t
/* can only be applied to current task, need to have the mm semaphore held when called */
if (pfm_remap_buffer(vma, (unsigned long)smpl_buf, vma->vm_start, size)) {
DPRINT(("Can't remap buffer\n"));
- up_write(&task->mm->mmap_sem);
+ mm_write_unlock(task->mm);
goto error;
}
@@ -2286,7 +2286,7 @@ pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t
insert_vm_struct(mm, vma);
vm_stat_account(vma->vm_mm, vma->vm_flags, vma_pages(vma));
- up_write(&task->mm->mmap_sem);
+ mm_write_unlock(task->mm);
/*
* keep track of user level virtual address
@@ -102,7 +102,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
if (mask & VM_WRITE)
flags |= FAULT_FLAG_WRITE;
retry:
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
vma = find_vma_prev(mm, address, &prev_vma);
if (!vma && !prev_vma )
@@ -179,7 +179,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
}
}
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
return;
check_expansion:
@@ -210,7 +210,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
goto good_area;
bad_area:
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
#ifdef CONFIG_VIRTUAL_MEM_MAP
bad_area_no_up:
#endif
@@ -276,7 +276,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
return;
out_of_memory:
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
if (!user_mode(regs))
goto no_context;
pagefault_out_of_memory();
@@ -118,13 +118,13 @@ ia64_init_addr_space (void)
vma->vm_end = vma->vm_start + PAGE_SIZE;
vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
- down_write(¤t->mm->mmap_sem);
+ mm_write_lock(current->mm);
if (insert_vm_struct(current->mm, vma)) {
- up_write(¤t->mm->mmap_sem);
+ mm_write_unlock(current->mm);
vm_area_free(vma);
return;
}
- up_write(¤t->mm->mmap_sem);
+ mm_write_unlock(current->mm);
}
/* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
@@ -136,13 +136,13 @@ ia64_init_addr_space (void)
vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO |
VM_DONTEXPAND | VM_DONTDUMP;
- down_write(¤t->mm->mmap_sem);
+ mm_write_lock(current->mm);
if (insert_vm_struct(current->mm, vma)) {
- up_write(¤t->mm->mmap_sem);
+ mm_write_unlock(current->mm);
vm_area_free(vma);
return;
}
- up_write(¤t->mm->mmap_sem);
+ mm_write_unlock(current->mm);
}
}
}
@@ -399,7 +399,7 @@ sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
* Verify that the specified address region actually belongs
* to this process.
*/
- down_read(¤t->mm->mmap_sem);
+ mm_read_lock(current->mm);
vma = find_vma(current->mm, addr);
if (!vma || addr < vma->vm_start || addr + len > vma->vm_end)
goto out_unlock;
@@ -450,7 +450,7 @@ sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
}
}
out_unlock:
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm);
out:
return ret;
}
@@ -472,7 +472,7 @@ sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5,
spinlock_t *ptl;
unsigned long mem_value;
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
pgd = pgd_offset(mm, (unsigned long)mem);
if (!pgd_present(*pgd))
goto bad_access;
@@ -501,11 +501,11 @@ sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5,
__put_user(newval, mem);
pte_unmap_unlock(pte, ptl);
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
return mem_value;
bad_access:
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
/* This is not necessarily a bad access, we can get here if
a memory we're trying to write to should be copied-on-write.
Make the kernel do the necessary page stuff, then re-iterate.
@@ -545,13 +545,13 @@ sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5,
struct mm_struct *mm = current->mm;
unsigned long mem_value;
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
mem_value = *mem;
if (mem_value == oldval)
*mem = newval;
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
return mem_value;
}
@@ -86,7 +86,7 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
if (user_mode(regs))
flags |= FAULT_FLAG_USER;
retry:
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
vma = find_vma(mm, address);
if (!vma)
@@ -177,7 +177,7 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
}
}
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
return 0;
/*
@@ -185,7 +185,7 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
* us unable to handle the page fault gracefully.
*/
out_of_memory:
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
if (!user_mode(regs))
goto no_context;
pagefault_out_of_memory();
@@ -214,6 +214,6 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
current->thread.faddr = address;
send_sig:
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
return send_fault_sig(regs);
}
@@ -137,12 +137,12 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
* source. If this is invalid we can skip the address space check,
* thus avoiding the deadlock.
*/
- if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
+ if (unlikely(!mm_read_trylock(mm))) {
if (kernel_mode(regs) && !search_exception_tables(regs->pc))
goto bad_area_nosemaphore;
retry:
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
}
vma = find_vma(mm, address);
@@ -249,7 +249,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
}
}
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
/*
* keep track of tlb+htab misses that are good addrs but
@@ -260,7 +260,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
return;
bad_area:
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
bad_area_nosemaphore:
pte_errors++;
@@ -279,7 +279,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
* us unable to handle the page fault gracefully.
*/
out_of_memory:
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
if (!user_mode(regs))
bad_page_fault(regs, address, SIGKILL);
else
@@ -287,7 +287,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
return;
do_sigbus:
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
if (user_mode(regs)) {
force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address);
return;
@@ -754,13 +754,13 @@ int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
return 1;
case SIGSEGV:
- down_read(¤t->mm->mmap_sem);
+ mm_read_lock(current->mm);
vma = find_vma(current->mm, (unsigned long)fault_addr);
if (vma && (vma->vm_start <= (unsigned long)fault_addr))
si_code = SEGV_ACCERR;
else
si_code = SEGV_MAPERR;
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm);
force_sig_fault(SIGSEGV, si_code, fault_addr);
return 1;
@@ -92,7 +92,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
struct vm_area_struct *vma;
int ret;
- if (down_write_killable(&mm->mmap_sem))
+ if (mm_write_lock_killable(mm))
return -EINTR;
/* Map delay slot emulation page */
@@ -183,6 +183,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
ret = 0;
out:
- up_write(&mm->mmap_sem);
+ mm_write_unlock(mm);
return ret;
}
@@ -130,7 +130,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
vdso_mapping_len += L1_cache_info[DCACHE].aliasing_num - 1;
#endif
- if (down_write_killable(&mm->mmap_sem))
+ if (mm_write_lock_killable(mm))
return -EINTR;
addr = vdso_random_addr(vdso_mapping_len);
@@ -185,12 +185,12 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
goto up_fail;
}
- up_write(&mm->mmap_sem);
+ mm_write_unlock(mm);
return 0;
up_fail:
mm->context.vdso = NULL;
- up_write(&mm->mmap_sem);
+ mm_write_unlock(mm);
return ret;
}
@@ -127,12 +127,12 @@ void do_page_fault(unsigned long entry, unsigned long addr,
* validly references user space from well defined areas of the code,
* we can bug out early if this is from code which shouldn't.
*/
- if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
+ if (unlikely(!mm_read_trylock(mm))) {
if (!user_mode(regs) &&
!search_exception_tables(instruction_pointer(regs)))
goto no_context;
retry:
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
} else {
/*
* The above down_read_trylock() might have succeeded in which
@@ -257,7 +257,7 @@ void do_page_fault(unsigned long entry, unsigned long addr,
}
}
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
return;
/*
@@ -265,7 +265,7 @@ void do_page_fault(unsigned long entry, unsigned long addr,
* Fix it, but check if it's kernel or user first..
*/
bad_area:
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
bad_area_nosemaphore:
@@ -325,14 +325,14 @@ void do_page_fault(unsigned long entry, unsigned long addr,
*/
out_of_memory:
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
if (!user_mode(regs))
goto no_context;
pagefault_out_of_memory();
return;
do_sigbus:
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
/* Kernel mode? Handle exceptions or die */
if (!user_mode(regs))
@@ -83,11 +83,11 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long cause,
if (user_mode(regs))
flags |= FAULT_FLAG_USER;
- if (!down_read_trylock(&mm->mmap_sem)) {
+ if (!mm_read_trylock(mm)) {
if (!user_mode(regs) && !search_exception_tables(regs->ea))
goto bad_area_nosemaphore;
retry:
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
}
vma = find_vma(mm, address);
@@ -172,7 +172,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long cause,
}
}
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
return;
/*
@@ -180,7 +180,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long cause,
* Fix it, but check if it's kernel or user first..
*/
bad_area:
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
bad_area_nosemaphore:
/* User mode accesses just cause a SIGSEGV */
@@ -218,14 +218,14 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long cause,
* us unable to handle the page fault gracefully.
*/
out_of_memory:
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
if (!user_mode(regs))
goto no_context;
pagefault_out_of_memory();
return;
do_sigbus:
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
/* Kernel mode? Handle exceptions or die */
if (!user_mode(regs))
@@ -112,14 +112,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
struct mm_struct *mm = current->mm;
int ret;
- down_write(&mm->mmap_sem);
+ mm_write_lock(mm);
/* Map kuser helpers to user space address */
ret = install_special_mapping(mm, KUSER_BASE, KUSER_SIZE,
VM_READ | VM_EXEC | VM_MAYREAD |
VM_MAYEXEC, kuser_page);
- up_write(&mm->mmap_sem);
+ mm_write_unlock(mm);
return ret;
}
@@ -104,7 +104,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long address,
goto no_context;
retry:
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
vma = find_vma(mm, address);
if (!vma)
@@ -193,7 +193,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long address,
}
}
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
return;
/*
@@ -202,7 +202,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long address,
*/
bad_area:
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
bad_area_nosemaphore:
@@ -261,14 +261,14 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long address,
__asm__ __volatile__("l.nop 42");
__asm__ __volatile__("l.nop 1");
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
if (!user_mode(regs))
goto no_context;
pagefault_out_of_memory();
return;
do_sigbus:
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
/*
* Send a sigbus, regardless of whether we were in kernel
@@ -717,7 +717,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
if (user_mode(regs)) {
struct vm_area_struct *vma;
- down_read(¤t->mm->mmap_sem);
+ mm_read_lock(current->mm);
vma = find_vma(current->mm,regs->iaoq[0]);
if (vma && (regs->iaoq[0] >= vma->vm_start)
&& (vma->vm_flags & VM_EXEC)) {
@@ -725,10 +725,10 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
fault_address = regs->iaoq[0];
fault_space = regs->iasq[0];
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm);
break; /* call do_page_fault() */
}
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm);
}
/* Fall Through */
case 27:
@@ -282,7 +282,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,
if (acc_type & VM_WRITE)
flags |= FAULT_FLAG_WRITE;
retry:
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
vma = find_vma_prev(mm, address, &prev_vma);
if (!vma || address < vma->vm_start)
goto check_expansion;
@@ -339,7 +339,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,
goto retry;
}
}
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
return;
check_expansion:
@@ -351,7 +351,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,
* Something tried to access memory that isn't in our memory map..
*/
bad_area:
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
if (user_mode(regs)) {
int signo, si_code;
@@ -423,7 +423,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,
parisc_terminate("Bad Address (null pointer deref?)", regs, code, address);
out_of_memory:
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
if (!user_mode(regs))
goto no_context;
pagefault_out_of_memory();
@@ -171,7 +171,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
* and end up putting it elsewhere.
* Add enough to the size so that the result can be aligned.
*/
- if (down_write_killable(&mm->mmap_sem))
+ if (mm_write_lock_killable(mm))
return -EINTR;
vdso_base = get_unmapped_area(NULL, vdso_base,
(vdso_pages << PAGE_SHIFT) +
@@ -211,11 +211,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
goto fail_mmapsem;
}
- up_write(&mm->mmap_sem);
+ mm_write_unlock(mm);
return 0;
fail_mmapsem:
- up_write(&mm->mmap_sem);
+ mm_write_unlock(mm);
return rc;
}
@@ -582,7 +582,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
npages = get_user_pages_fast(hva, 1, writing ? FOLL_WRITE : 0, pages);
if (npages < 1) {
/* Check if it's an I/O mapping */
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
vma = find_vma(mm, hva);
if (vma && vma->vm_start <= hva && hva + psize <= vma->vm_end &&
(vma->vm_flags & VM_PFNMAP)) {
@@ -592,7 +592,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
is_ci = pte_ci(__pte((pgprot_val(vma->vm_page_prot))));
write_ok = vma->vm_flags & VM_WRITE;
}
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
if (!pfn)
goto out_put;
} else {
@@ -4640,14 +4640,14 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
/* Look up the VMA for the start of this memory slot */
hva = memslot->userspace_addr;
- down_read(¤t->mm->mmap_sem);
+ mm_read_lock(current->mm);
vma = find_vma(current->mm, hva);
if (!vma || vma->vm_start > hva || (vma->vm_flags & VM_IO))
goto up_out;
psize = vma_kernel_pagesize(vma);
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm);
/* We can handle 4k, 64k or 16M pages in the VRMA */
if (psize >= 0x1000000)
@@ -4680,7 +4680,7 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
return err;
up_out:
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm);
goto out_srcu;
}
@@ -366,7 +366,7 @@ kvmppc_svm_page_in(struct vm_area_struct *vma, unsigned long start,
*/
ret = ksm_madvise(vma, vma->vm_start, vma->vm_end,
MADV_UNMERGEABLE, &vma->vm_flags);
- downgrade_write(&kvm->mm->mmap_sem);
+ mm_downgrade_write_lock(kvm->mm);
*downgrade = true;
if (ret)
return ret;
@@ -483,7 +483,7 @@ kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gpa,
ret = H_PARAMETER;
srcu_idx = srcu_read_lock(&kvm->srcu);
- down_write(&kvm->mm->mmap_sem);
+ mm_write_lock(kvm->mm);
start = gfn_to_hva(kvm, gfn);
if (kvm_is_error_hva(start))
@@ -506,9 +506,9 @@ kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gpa,
mutex_unlock(&kvm->arch.uvmem_lock);
out:
if (downgrade)
- up_read(&kvm->mm->mmap_sem);
+ mm_read_unlock(kvm->mm);
else
- up_write(&kvm->mm->mmap_sem);
+ mm_write_unlock(kvm->mm);
srcu_read_unlock(&kvm->srcu, srcu_idx);
return ret;
}
@@ -660,7 +660,7 @@ kvmppc_h_svm_page_out(struct kvm *kvm, unsigned long gpa,
ret = H_PARAMETER;
srcu_idx = srcu_read_lock(&kvm->srcu);
- down_read(&kvm->mm->mmap_sem);
+ mm_read_lock(kvm->mm);
start = gfn_to_hva(kvm, gfn);
if (kvm_is_error_hva(start))
goto out;
@@ -673,7 +673,7 @@ kvmppc_h_svm_page_out(struct kvm *kvm, unsigned long gpa,
if (!kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa))
ret = H_SUCCESS;
out:
- up_read(&kvm->mm->mmap_sem);
+ mm_read_unlock(kvm->mm);
srcu_read_unlock(&kvm->srcu, srcu_idx);
return ret;
}
@@ -355,7 +355,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
if (tlbsel == 1) {
struct vm_area_struct *vma;
- down_read(&kvm->mm->mmap_sem);
+ mm_read_lock(kvm->mm);
vma = find_vma(kvm->mm, hva);
if (vma && hva >= vma->vm_start &&
@@ -441,7 +441,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1);
}
- up_read(&kvm->mm->mmap_sem);
+ mm_read_unlock(kvm->mm);
}
if (likely(!pfnmap)) {
@@ -96,7 +96,7 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
goto unlock_exit;
}
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
chunk = (1UL << (PAGE_SHIFT + MAX_ORDER - 1)) /
sizeof(struct vm_area_struct *);
chunk = min(chunk, entries);
@@ -114,7 +114,7 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
pinned += ret;
break;
}
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
if (pinned != entries) {
if (!ret)
ret = -EFAULT;
@@ -92,7 +92,7 @@ static void subpage_prot_clear(unsigned long addr, unsigned long len)
size_t nw;
unsigned long next, limit;
- down_write(&mm->mmap_sem);
+ mm_write_lock(mm);
spt = mm_ctx_subpage_prot(&mm->context);
if (!spt)
@@ -127,7 +127,7 @@ static void subpage_prot_clear(unsigned long addr, unsigned long len)
}
err_out:
- up_write(&mm->mmap_sem);
+ mm_write_unlock(mm);
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -217,7 +217,7 @@ SYSCALL_DEFINE3(subpage_prot, unsigned long, addr,
if (!access_ok(map, (len >> PAGE_SHIFT) * sizeof(u32)))
return -EFAULT;
- down_write(&mm->mmap_sem);
+ mm_write_lock(mm);
spt = mm_ctx_subpage_prot(&mm->context);
if (!spt) {
@@ -267,11 +267,11 @@ SYSCALL_DEFINE3(subpage_prot, unsigned long, addr,
if (addr + (nw << PAGE_SHIFT) > next)
nw = (next - addr) >> PAGE_SHIFT;
- up_write(&mm->mmap_sem);
+ mm_write_unlock(mm);
if (__copy_from_user(spp, map, nw * sizeof(u32)))
return -EFAULT;
map += nw;
- down_write(&mm->mmap_sem);
+ mm_write_lock(mm);
/* now flush any existing HPTEs for the range */
hpte_flush_range(mm, addr, nw);
@@ -280,6 +280,6 @@ SYSCALL_DEFINE3(subpage_prot, unsigned long, addr,
spt->maxaddr = limit;
err = 0;
out:
- up_write(&mm->mmap_sem);
+ mm_write_unlock(mm);
return err;
}
@@ -33,7 +33,7 @@ int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
if (mm->pgd == NULL)
return -EFAULT;
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
ret = -EFAULT;
vma = find_vma(mm, ea);
if (!vma)
@@ -82,7 +82,7 @@ int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
current->min_flt++;
out_unlock:
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
return ret;
}
EXPORT_SYMBOL_GPL(copro_handle_mm_fault);
@@ -108,7 +108,7 @@ static int __bad_area(struct pt_regs *regs, unsigned long address, int si_code)
* Something tried to access memory that isn't in our memory map..
* Fix it, but check if it's kernel or user first..
*/
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
return __bad_area_nosemaphore(regs, address, si_code);
}
@@ -515,12 +515,12 @@ static int __do_page_fault(struct pt_regs *regs, unsigned long address,
* source. If this is invalid we can skip the address space check,
* thus avoiding the deadlock.
*/
- if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
+ if (unlikely(!mm_read_trylock(mm))) {
if (!is_user && !search_exception_tables(regs->nip))
return bad_area_nosemaphore(regs, address);
retry:
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
} else {
/*
* The above down_read_trylock() might have succeeded in
@@ -544,7 +544,7 @@ static int __do_page_fault(struct pt_regs *regs, unsigned long address,
if (!must_retry)
return bad_area(regs, address);
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
if (fault_in_pages_readable((const char __user *)regs->nip,
sizeof(unsigned int)))
return bad_area_nosemaphore(regs, address);
@@ -576,7 +576,7 @@ static int __do_page_fault(struct pt_regs *regs, unsigned long address,
int pkey = vma_pkey(vma);
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
return bad_key_fault_exception(regs, address, pkey);
}
#endif /* CONFIG_PPC_MEM_KEYS */
@@ -607,7 +607,7 @@ static int __do_page_fault(struct pt_regs *regs, unsigned long address,
return is_user ? 0 : SIGBUS;
}
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm);
if (unlikely(fault & VM_FAULT_ERROR))
return mm_fault_error(regs, address, fault);
@@ -332,7 +332,7 @@ get_exec_dcookie_and_offset(struct spu *spu, unsigned int *offsetp,
fput(exe_file);
}
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
for (vma = mm->mmap; vma; vma = vma->vm_next) {
if (vma->vm_start > spu_ref || vma->vm_end <= spu_ref)
continue;
@@ -349,13 +349,13 @@ get_exec_dcookie_and_offset(struct spu *spu, unsigned int *offsetp,
*spu_bin_dcookie = fast_get_dcookie(&vma->vm_file->f_path);
pr_debug("got dcookie for %pD\n", vma->vm_file);
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
out:
return app_cookie;
fail_no_image_cookie:
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
printk(KERN_ERR "SPU_PROF: "
"%s, line %d: Cannot find dcookie for SPU binary\n",
@@ -336,11 +336,11 @@ static vm_fault_t spufs_ps_fault(struct vm_fault *vmf,
goto refault;
if (ctx->state == SPU_STATE_SAVED) {
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm);
spu_context_nospu_trace(spufs_ps_fault__sleep, ctx);
err = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE);
spu_context_trace(spufs_ps_fault__wake, ctx, ctx->spu);
- down_read(¤t->mm->mmap_sem);
+ mm_read_lock(current->mm);
} else {
area = ctx->spu->problem_phys + ps_offs;
ret = vmf_insert_pfn(vmf->vma, vmf->address,
@@ -61,7 +61,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
vdso_len = (vdso_pages + 1) << PAGE_SHIFT;
- down_write(&mm->mmap_sem);
+ mm_write_lock(mm);
vdso_base = get_unmapped_area(NULL, 0, vdso_len, 0, 0);
if (IS_ERR_VALUE(vdso_base)) {
ret = vdso_base;
@@ -83,7 +83,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
mm->context.vdso = NULL;
end:
- up_write(&mm->mmap_sem);
+ mm_write_unlock(mm);
return ret;
}
@@ -69,7 +69,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
retry:
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
vma = find_vma(mm, addr);
if (unlikely(!vma))
goto bad_area;
@@ -160,7 +160,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
}
}
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
return;
/*
@@ -168,7 +168,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
* Fix it, but check if it's kernel or user first.
*/
bad_area:
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
/* User mode accesses just cause a SIGSEGV */
if (user_mode(regs)) {
do_trap(regs, SIGSEGV, code, addr);
@@ -196,14 +196,14 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
* (which will retry the fault, or kill us if we got oom-killed).
*/
out_of_memory:
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
if (!user_mode(regs))
goto no_context;
pagefault_out_of_memory();
return;
do_sigbus:
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
/* Kernel mode? Handle exceptions or die */
if (!user_mode(regs))
goto no_context;
@@ -208,7 +208,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
* it at vdso_base which is the "natural" base for it, but we might
* fail and end up putting it elsewhere.
*/
- if (down_write_killable(&mm->mmap_sem))
+ if (mm_write_lock_killable(mm))
return -EINTR;
vdso_base = get_unmapped_area(NULL, 0, vdso_pages << PAGE_SHIFT, 0, 0);
if (IS_ERR_VALUE(vdso_base)) {
@@ -239,7 +239,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
rc = 0;
out_up:
- up_write(&mm->mmap_sem);
+ mm_write_unlock(mm);
return rc;
}
@@ -1170,7 +1170,7 @@ int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg,
int dat_protection, fake;
int rc;
- down_read(&sg->mm->mmap_sem);
+ mm_read_lock(sg->mm);
/*
* We don't want any guest-2 tables to change - so the parent
* tables/pointers we read stay valid - unshadowing is however
@@ -1199,6 +1199,6 @@ int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg,
if (!rc)
rc = gmap_shadow_page(sg, saddr, __pte(pte.val));
ipte_unlock(vcpu);
- up_read(&sg->mm->mmap_sem);
+ mm_read_unlock(sg->mm);
return rc;
}
@@ -753,9 +753,9 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
r = -EINVAL;
else {
r = 0;
- down_write(&kvm->mm->mmap_sem);
+ mm_write_lock(kvm->mm);
kvm->mm->context.allow_gmap_hpage_1m = 1;
- up_write(&kvm->mm->mmap_sem);
+ mm_write_unlock(kvm->mm);
/*
* We might have to create fake 4k page
* tables. To avoid that the hardware works on
@@ -1805,7 +1805,7 @@ static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
if (!keys)
return -ENOMEM;
- down_read(¤t->mm->mmap_sem);
+ mm_read_lock(current->mm);
srcu_idx = srcu_read_lock(&kvm->srcu);
for (i = 0; i < args->count; i++) {
hva = gfn_to_hva(kvm, args->start_gfn + i);
@@ -1819,7 +1819,7 @@ static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
break;
}
srcu_read_unlock(&kvm->srcu, srcu_idx);
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm);
if (!r) {
r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
@@ -1863,7 +1863,7 @@ static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
goto out;
i = 0;
- down_read(¤t->mm->mmap_sem);
+ mm_read_lock(current->mm);
srcu_idx = srcu_read_lock(&kvm->srcu);
while (i < args->count) {
unlocked = false;
@@ -1890,7 +1890,7 @@ static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
i++;
}
srcu_read_unlock(&kvm->srcu, srcu_idx);
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm);
out:
kvfree(keys);
return r;
@@ -2073,14 +2073,14 @@ static int kvm_s390_get_cmma_bits(struct kvm *kvm,
if (!values)
return -ENOMEM;
- down_read(&kvm->mm->mmap_sem);
+ mm_read_lock(kvm->mm);
srcu_idx = srcu_read_lock(&kvm->srcu);
if (peek)
ret = kvm_s390_peek_cmma(kvm, args, values, bufsize);
else
ret = kvm_s390_get_cmma(kvm, args, values, bufsize);
srcu_read_unlock(&kvm->srcu, srcu_idx);
- up_read(&kvm->mm->mmap_sem);
+ mm_read_unlock(kvm->mm);
if (kvm->arch.migration_mode)
args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages);
@@ -2130,7 +2130,7 @@ static int kvm_s390_set_cmma_bits(struct kvm *kvm,
goto out;
}
- down_read(&kvm->mm->mmap_sem);
+ mm_read_lock(kvm->mm);
srcu_idx = srcu_read_lock(&kvm->srcu);
for (i = 0; i < args->count; i++) {
hva = gfn_to_hva(kvm, args->start_gfn + i);
@@ -2145,12 +2145,12 @@ static int kvm_s390_set_cmma_bits(struct kvm *kvm,
set_pgste_bits(kvm->mm, hva, mask, pgstev);
}
srcu_read_unlock(&kvm->srcu, srcu_idx);
- up_read(&kvm->mm->mmap_sem);
+ mm_read_unlock(kvm->mm);
if (!kvm->mm->context.uses_cmm) {
- down_write(&kvm->mm->mmap_sem);
+ mm_write_lock(kvm->mm);
kvm->mm->context.uses_cmm = 1;
- up_write(&kvm->mm->mmap_sem);
+ mm_write_unlock(kvm->mm);
}
out:
vfree(bits);
@@ -270,18 +270,18 @@ static int handle_iske(struct kvm_vcpu *vcpu)
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
retry:
unlocked = false;
- down_read(¤t->mm->mmap_sem);
+ mm_read_lock(current->mm);
rc = get_guest_storage_key(current->mm, vmaddr, &key);
if (rc) {
rc = fixup_user_fault(current, current->mm, vmaddr,
FAULT_FLAG_WRITE, &unlocked);
if (!rc) {
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm);
goto retry;
}
}
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm);
if (rc == -EFAULT)
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
if (rc < 0)
@@ -317,17 +317,17 @@ static int handle_rrbe(struct kvm_vcpu *vcpu)
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
retry:
unlocked = false;
- down_read(¤t->mm->mmap_sem);
+ mm_read_lock(current->mm);
rc = reset_guest_reference_bit(current->mm, vmaddr);
if (rc < 0) {
rc = fixup_user_fault(current, current->mm, vmaddr,
FAULT_FLAG_WRITE, &unlocked);
if (!rc) {
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm);
goto retry;
}
}
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm);
if (rc == -EFAULT)
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
if (rc < 0)
@@ -385,7 +385,7 @@ static int handle_sske(struct kvm_vcpu *vcpu)
if (kvm_is_error_hva(vmaddr))
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
- down_read(¤t->mm->mmap_sem);
+ mm_read_lock(current->mm);
rc = cond_set_guest_storage_key(current->mm, vmaddr, key, &oldkey,
m3 & SSKE_NQ, m3 & SSKE_MR,
m3 & SSKE_MC);
@@ -395,7 +395,7 @@ static int handle_sske(struct kvm_vcpu *vcpu)
FAULT_FLAG_WRITE, &unlocked);
rc = !rc ? -EAGAIN : rc;
}
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm);
if (rc == -EFAULT)
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
if (rc < 0)
@@ -1084,7 +1084,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
if (rc)
return rc;
- down_read(¤t->mm->mmap_sem);
+ mm_read_lock(current->mm);
rc = cond_set_guest_storage_key(current->mm, vmaddr,
key, NULL, nq, mr, mc);
if (rc < 0) {
@@ -1092,7 +1092,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
FAULT_FLAG_WRITE, &unlocked);
rc = !rc ? -EAGAIN : rc;
}
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm);
if (rc == -EFAULT)
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
if (rc == -EAGAIN)
@@ -1213,9 +1213,9 @@ static int handle_essa(struct kvm_vcpu *vcpu)
* already correct, we do nothing and avoid the lock.
*/
if (vcpu->kvm->mm->context.uses_cmm == 0) {
- down_write(&vcpu->kvm->mm->mmap_sem);
+ mm_write_lock(vcpu->kvm->mm);
vcpu->kvm->mm->context.uses_cmm = 1;
- up_write(&vcpu->kvm->mm->mmap_sem);
+ mm_write_unlock(vcpu->kvm->mm);
}
/*
* If we are here, we are supposed to have CMMA enabled in
@@ -1232,11 +1232,11 @@ static int handle_essa(struct kvm_vcpu *vcpu)
} else {
int srcu_idx;
- down_read(&vcpu->kvm->mm->mmap_sem);
+ mm_read_lock(vcpu->kvm->mm);
srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
i = __do_essa(vcpu, orc);
srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
- up_read(&vcpu->kvm->mm->mmap_sem);
+ mm_read_unlock(vcpu->kvm->mm);
if (i < 0)
return i;
/* Account for the possible extra cbrl entry */
@@ -1244,10 +1244,10 @@ static int handle_essa(struct kvm_vcpu *vcpu)
}
vcpu->arch.sie_block->cbrlo &= PAGE_MASK; /* reset nceo */
cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo);
- down_read(&gmap->mm->mmap_sem);
+ mm_read_lock(gmap->mm);
for (i = 0; i < entries; ++i)
__gmap_zap(gmap, cbrlo[i]);
- up_read(&gmap->mm->mmap_sem);
+ mm_read_unlock(gmap->mm);
return 0;
}
@@ -434,7 +434,7 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
flags |= FAULT_FLAG_USER;
if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
flags |= FAULT_FLAG_WRITE;
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
gmap = NULL;
if (IS_ENABLED(CONFIG_PGSTE) && type == GMAP_FAULT) {
@@ -519,7 +519,7 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
flags &= ~(FAULT_FLAG_ALLOW_RETRY |
FAULT_FLAG_RETRY_NOWAIT);
flags |= FAULT_FLAG_TRIED;
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
goto retry;
}
}
@@ -537,7 +537,7 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
}
fault = 0;
out_up:
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
out:
return fault;
}
@@ -405,10 +405,10 @@ int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
return -EINVAL;
flush = 0;
- down_write(&gmap->mm->mmap_sem);
+ mm_write_lock(gmap->mm);
for (off = 0; off < len; off += PMD_SIZE)
flush |= __gmap_unmap_by_gaddr(gmap, to + off);
- up_write(&gmap->mm->mmap_sem);
+ mm_write_unlock(gmap->mm);
if (flush)
gmap_flush_tlb(gmap);
return 0;
@@ -438,7 +438,7 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from,
return -EINVAL;
flush = 0;
- down_write(&gmap->mm->mmap_sem);
+ mm_write_lock(gmap->mm);
for (off = 0; off < len; off += PMD_SIZE) {
/* Remove old translation */
flush |= __gmap_unmap_by_gaddr(gmap, to + off);
@@ -448,7 +448,7 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from,
(void *) from + off))
break;
}
- up_write(&gmap->mm->mmap_sem);
+ mm_write_unlock(gmap->mm);
if (flush)
gmap_flush_tlb(gmap);
if (off >= len)
@@ -495,9 +495,9 @@ unsigned long gmap_translate(struct gmap *gmap, unsigned long gaddr)
{
unsigned long rc;
- down_read(&gmap->mm->mmap_sem);
+ mm_read_lock(gmap->mm);
rc = __gmap_translate(gmap, gaddr);
- up_read(&gmap->mm->mmap_sem);
+ mm_read_unlock(gmap->mm);
return rc;
}
EXPORT_SYMBOL_GPL(gmap_translate);
@@ -640,7 +640,7 @@ int gmap_fault(struct gmap *gmap, unsigned long gaddr,
int rc;
bool unlocked;
- down_read(&gmap->mm->mmap_sem);
+ mm_read_lock(gmap->mm);
retry:
unlocked = false;
@@ -663,7 +663,7 @@ int gmap_fault(struct gmap *gmap, unsigned long gaddr,
rc = __gmap_link(gmap, gaddr, vmaddr);
out_up:
- up_read(&gmap->mm->mmap_sem);
+ mm_read_unlock(gmap->mm);
return rc;
}
EXPORT_SYMBOL_GPL(gmap_fault);
@@ -696,7 +696,7 @@ void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to)
unsigned long gaddr, vmaddr, size;
struct vm_area_struct *vma;
- down_read(&gmap->mm->mmap_sem);
+ mm_read_lock(gmap->mm);
for (gaddr = from; gaddr < to;
gaddr = (gaddr + PMD_SIZE) & PMD_MASK) {
/* Find the vm address for the guest address */
@@ -719,7 +719,7 @@ void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to)
size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK));
zap_page_range(vma, vmaddr, size);
}
- up_read(&gmap->mm->mmap_sem);
+ mm_read_unlock(gmap->mm);
}
EXPORT_SYMBOL_GPL(gmap_discard);
@@ -1102,9 +1102,9 @@ int gmap_mprotect_notify(struct gmap *gmap, unsigned long gaddr,
return -EINVAL;
if (!MACHINE_HAS_ESOP && prot == PROT_READ)
return -EINVAL;
- down_read(&gmap->mm->mmap_sem);
+ mm_read_lock(gmap->mm);
rc = gmap_protect_range(gmap, gaddr, len, prot, GMAP_NOTIFY_MPROT);
- up_read(&gmap->mm->mmap_sem);
+ mm_read_unlock(gmap->mm);
return rc;
}
EXPORT_SYMBOL_GPL(gmap_mprotect_notify);
@@ -1692,11 +1692,11 @@ struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce,
}
spin_unlock(&parent->shadow_lock);
/* protect after insertion, so it will get properly invalidated */
- down_read(&parent->mm->mmap_sem);
+ mm_read_lock(parent->mm);
rc = gmap_protect_range(parent, asce & _ASCE_ORIGIN,
((asce & _ASCE_TABLE_LENGTH) + 1) * PAGE_SIZE,
PROT_READ, GMAP_NOTIFY_SHADOW);
- up_read(&parent->mm->mmap_sem);
+ mm_read_unlock(parent->mm);
spin_lock(&parent->shadow_lock);
new->initialized = true;
if (rc) {
@@ -2538,12 +2538,12 @@ int s390_enable_sie(void)
/* Fail if the page tables are 2K */
if (!mm_alloc_pgste(mm))
return -EINVAL;
- down_write(&mm->mmap_sem);
+ mm_write_lock(mm);
mm->context.has_pgste = 1;
/* split thp mappings and disable thp for future mappings */
thp_split_mm(mm);
walk_page_range(mm, 0, TASK_SIZE, &zap_zero_walk_ops, NULL);
- up_write(&mm->mmap_sem);
+ mm_write_unlock(mm);
return 0;
}
EXPORT_SYMBOL_GPL(s390_enable_sie);
@@ -2596,7 +2596,7 @@ int s390_enable_skey(void)
struct vm_area_struct *vma;
int rc = 0;
- down_write(&mm->mmap_sem);
+ mm_write_lock(mm);
if (mm_uses_skeys(mm))
goto out_up;
@@ -2614,7 +2614,7 @@ int s390_enable_skey(void)
walk_page_range(mm, 0, TASK_SIZE, &enable_skey_walk_ops, NULL);
out_up:
- up_write(&mm->mmap_sem);
+ mm_write_unlock(mm);
return rc;
}
EXPORT_SYMBOL_GPL(s390_enable_skey);
@@ -2635,8 +2635,8 @@ static const struct mm_walk_ops reset_cmma_walk_ops = {
void s390_reset_cmma(struct mm_struct *mm)
{
- down_write(&mm->mmap_sem);
+ mm_write_lock(mm);
walk_page_range(mm, 0, TASK_SIZE, &reset_cmma_walk_ops, NULL);
- up_write(&mm->mmap_sem);
+ mm_write_unlock(mm);
}
EXPORT_SYMBOL_GPL(s390_reset_cmma);
@@ -18,7 +18,7 @@ static long get_pfn(unsigned long user_addr, unsigned long access,
struct vm_area_struct *vma;
long ret;
- down_read(¤t->mm->mmap_sem);
+ mm_read_lock(current->mm);
ret = -EINVAL;
vma = find_vma(current->mm, user_addr);
if (!vma)
@@ -28,7 +28,7 @@ static long get_pfn(unsigned long user_addr, unsigned long access,
goto out;
ret = follow_pfn(vma, user_addr, pfn);
out:
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm);
return ret;
}
@@ -69,10 +69,10 @@ asmlinkage int sys_cacheflush(unsigned long addr, unsigned long len, int op)
if (addr + len < addr)
return -EFAULT;
- down_read(¤t->mm->mmap_sem);
+ mm_read_lock(current->mm);
vma = find_vma (current->mm, addr);
if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end) {
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm);
return -EFAULT;
}
@@ -91,6 +91,6 @@ asmlinkage int sys_cacheflush(unsigned long addr, unsigned long len, int op)
if (op & CACHEFLUSH_I)
flush_icache_range(addr, addr+len);
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm);
return 0;
}
@@ -61,7 +61,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
unsigned long addr;
int ret;
- if (down_write_killable(&mm->mmap_sem))
+ if (mm_write_lock_killable(mm))
return -EINTR;
addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
@@ -80,7 +80,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
current->mm->context.vdso = (void *)addr;
up_fail:
- up_write(&mm->mmap_sem);
+ mm_write_unlock(mm);
return ret;
}
@@ -261,7 +261,7 @@ __bad_area(struct pt_regs *regs, unsigned long error_code,
* Something tried to access memory that isn't in our memory map..
* Fix it, but check if it's kernel or user first..
*/
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
__bad_area_nosemaphore(regs, error_code, address, si_code);
}
@@ -285,7 +285,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address)
struct task_struct *tsk = current;
struct mm_struct *mm = tsk->mm;
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
/* Kernel mode? Handle exceptions or die: */
if (!user_mode(regs))
@@ -304,7 +304,7 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
*/
if (fatal_signal_pending(current)) {
if (!(fault & VM_FAULT_RETRY))
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm);
if (!user_mode(regs))
no_context(regs, error_code, address);
return 1;
@@ -316,11 +316,11 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
if (fault & VM_FAULT_OOM) {
/* Kernel mode? Handle exceptions or die: */
if (!user_mode(regs)) {
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm);
no_context(regs, error_code, address);
return 1;
}
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm);
/*
* We ran out of memory, call the OOM killer, and return the
@@ -424,7 +424,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
}
retry:
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
vma = find_vma(mm, address);
if (unlikely(!vma)) {
@@ -493,5 +493,5 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
}
}
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
}
@@ -196,7 +196,7 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
retry:
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
if (!from_user && address >= PAGE_OFFSET)
goto bad_area;
@@ -273,7 +273,7 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
}
}
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
return;
/*
@@ -281,7 +281,7 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
* Fix it, but check if it's kernel or user first..
*/
bad_area:
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
bad_area_nosemaphore:
/* User mode accesses just cause a SIGSEGV */
@@ -330,7 +330,7 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
* us unable to handle the page fault gracefully.
*/
out_of_memory:
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
if (from_user) {
pagefault_out_of_memory();
return;
@@ -338,7 +338,7 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
goto no_context;
do_sigbus:
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, text_fault);
if (!from_user)
goto no_context;
@@ -392,7 +392,7 @@ static void force_user_fault(unsigned long address, int write)
code = SEGV_MAPERR;
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
vma = find_vma(mm, address);
if (!vma)
goto bad_area;
@@ -417,15 +417,15 @@ static void force_user_fault(unsigned long address, int write)
case VM_FAULT_OOM:
goto do_sigbus;
}
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
return;
bad_area:
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
__do_fault_siginfo(code, SIGSEGV, tsk->thread.kregs, address);
return;
do_sigbus:
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
__do_fault_siginfo(BUS_ADRERR, SIGBUS, tsk->thread.kregs, address);
}
@@ -315,7 +315,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
- if (!down_read_trylock(&mm->mmap_sem)) {
+ if (!mm_read_trylock(mm)) {
if ((regs->tstate & TSTATE_PRIV) &&
!search_exception_tables(regs->tpc)) {
insn = get_fault_insn(regs, insn);
@@ -323,7 +323,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
}
retry:
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
}
if (fault_code & FAULT_CODE_BAD_RA)
@@ -456,7 +456,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
goto retry;
}
}
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
mm_rss = get_mm_rss(mm);
#if defined(CONFIG_TRANSPARENT_HUGEPAGE)
@@ -487,7 +487,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
*/
bad_area:
insn = get_fault_insn(regs, insn);
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
handle_kernel_fault:
do_kernel_fault(regs, si_code, fault_code, insn, address);
@@ -499,7 +499,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
*/
out_of_memory:
insn = get_fault_insn(regs, insn);
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
if (!(regs->tstate & TSTATE_PRIV)) {
pagefault_out_of_memory();
goto exit_exception;
@@ -512,7 +512,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
do_sigbus:
insn = get_fault_insn(regs, insn);
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
/*
* Send a sigbus, regardless of whether we were in kernel
@@ -366,7 +366,7 @@ static int map_vdso(const struct vdso_image *image,
unsigned long text_start, addr = 0;
int ret = 0;
- down_write(&mm->mmap_sem);
+ mm_write_lock(mm);
/*
* First, get an unmapped region: then randomize it, and make sure that
@@ -422,7 +422,7 @@ static int map_vdso(const struct vdso_image *image,
if (ret)
current->mm->context.vdso = NULL;
- up_write(&mm->mmap_sem);
+ mm_write_unlock(mm);
return ret;
}
@@ -54,7 +54,7 @@ static inline void activate_mm(struct mm_struct *old, struct mm_struct *new)
__switch_mm(&new->context.id);
down_write_nested(&new->mmap_sem, 1);
uml_setup_stubs(new);
- up_write(&new->mmap_sem);
+ mm_write_unlock(new);
}
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
@@ -350,7 +350,7 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
printk(KERN_ERR "fix_range_common: failed, killing current "
"process: %d\n", task_tgid_vnr(current));
/* We are under mmap_sem, release it such that current can terminate */
- up_write(¤t->mm->mmap_sem);
+ mm_write_unlock(current->mm);
force_sig(SIGKILL);
do_signal(¤t->thread.regs);
}
@@ -47,7 +47,7 @@ int handle_page_fault(unsigned long address, unsigned long ip,
if (is_user)
flags |= FAULT_FLAG_USER;
retry:
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
vma = find_vma(mm, address);
if (!vma)
goto out;
@@ -124,7 +124,7 @@ int handle_page_fault(unsigned long address, unsigned long ip,
#endif
flush_tlb_page(vma, address);
out:
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
out_nosemaphore:
return err;
@@ -133,7 +133,7 @@ int handle_page_fault(unsigned long address, unsigned long ip,
* We ran out of memory, call the OOM killer, and return the userspace
* (which will retry the fault, or kill us if we got oom-killed).
*/
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
if (!is_user)
goto out_nosemaphore;
pagefault_out_of_memory();
@@ -224,12 +224,12 @@ static int do_pf(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
* validly references user space from well defined areas of the code,
* we can bug out early if this is from code which shouldn't.
*/
- if (!down_read_trylock(&mm->mmap_sem)) {
+ if (!mm_read_trylock(mm)) {
if (!user_mode(regs)
&& !search_exception_tables(regs->UCreg_pc))
goto no_context;
retry:
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
} else {
/*
* The above down_read_trylock() might have succeeded in
@@ -266,7 +266,7 @@ static int do_pf(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
}
}
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
/*
* Handle the "normal" case first - VM_FAULT_MAJOR
@@ -150,7 +150,7 @@ static int map_vdso(const struct vdso_image *image, unsigned long addr)
unsigned long text_start;
int ret = 0;
- if (down_write_killable(&mm->mmap_sem))
+ if (mm_write_lock_killable(mm))
return -EINTR;
addr = get_unmapped_area(NULL, addr,
@@ -193,7 +193,7 @@ static int map_vdso(const struct vdso_image *image, unsigned long addr)
}
up_fail:
- up_write(&mm->mmap_sem);
+ mm_write_unlock(mm);
return ret;
}
@@ -255,7 +255,7 @@ int map_vdso_once(const struct vdso_image *image, unsigned long addr)
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
- down_write(&mm->mmap_sem);
+ mm_write_lock(mm);
/*
* Check if we have already mapped vdso blob - fail to prevent
* abusing from userspace install_speciall_mapping, which may
@@ -266,11 +266,11 @@ int map_vdso_once(const struct vdso_image *image, unsigned long addr)
for (vma = mm->mmap; vma; vma = vma->vm_next) {
if (vma_is_special_mapping(vma, &vdso_mapping) ||
vma_is_special_mapping(vma, &vvar_mapping)) {
- up_write(&mm->mmap_sem);
+ mm_write_unlock(mm);
return -EEXIST;
}
}
- up_write(&mm->mmap_sem);
+ mm_write_unlock(mm);
return map_vdso(image, addr);
}
@@ -172,7 +172,7 @@ static void mark_screen_rdonly(struct mm_struct *mm)
pte_t *pte;
int i;
- down_write(&mm->mmap_sem);
+ mm_write_lock(mm);
pgd = pgd_offset(mm, 0xA0000);
if (pgd_none_or_clear_bad(pgd))
goto out;
@@ -198,7 +198,7 @@ static void mark_screen_rdonly(struct mm_struct *mm)
}
pte_unmap_unlock(pte, ptl);
out:
- up_write(&mm->mmap_sem);
+ mm_write_unlock(mm);
flush_tlb_mm_range(mm, 0xA0000, 0xA0000 + 32*PAGE_SIZE, PAGE_SHIFT, false);
}
@@ -16,9 +16,9 @@ DEFINE_SHOW_ATTRIBUTE(ptdump);
static int ptdump_curknl_show(struct seq_file *m, void *v)
{
if (current->mm->pgd) {
- down_read(¤t->mm->mmap_sem);
+ mm_read_lock(current->mm);
ptdump_walk_pgd_level_debugfs(m, current->mm->pgd, false);
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm);
}
return 0;
}
@@ -29,9 +29,9 @@ DEFINE_SHOW_ATTRIBUTE(ptdump_curknl);
static int ptdump_curusr_show(struct seq_file *m, void *v)
{
if (current->mm->pgd) {
- down_read(¤t->mm->mmap_sem);
+ mm_read_lock(current->mm);
ptdump_walk_pgd_level_debugfs(m, current->mm->pgd, true);
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm);
}
return 0;
}
@@ -928,7 +928,7 @@ __bad_area(struct pt_regs *regs, unsigned long error_code,
* Something tried to access memory that isn't in our memory map..
* Fix it, but check if it's kernel or user first..
*/
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
__bad_area_nosemaphore(regs, error_code, address, pkey, si_code);
}
@@ -1379,7 +1379,7 @@ void do_user_addr_fault(struct pt_regs *regs,
* 1. Failed to acquire mmap_sem, and
* 2. The access did not originate in userspace.
*/
- if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
+ if (unlikely(!mm_read_trylock(mm))) {
if (!user_mode(regs) && !search_exception_tables(regs->ip)) {
/*
* Fault from code in kernel from
@@ -1389,7 +1389,7 @@ void do_user_addr_fault(struct pt_regs *regs,
return;
}
retry:
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
} else {
/*
* The above down_read_trylock() might have succeeded in
@@ -1464,7 +1464,7 @@ void do_user_addr_fault(struct pt_regs *regs,
return;
}
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
if (unlikely(fault & VM_FAULT_ERROR)) {
mm_fault_error(regs, hw_error_code, address, fault);
return;
@@ -52,10 +52,10 @@ static unsigned long mpx_mmap(unsigned long len)
if (len != mpx_bt_size_bytes(mm))
return -EINVAL;
- down_write(&mm->mmap_sem);
+ mm_write_lock(mm);
addr = do_mmap(NULL, 0, len, PROT_READ | PROT_WRITE,
MAP_ANONYMOUS | MAP_PRIVATE, VM_MPX, 0, &populate, NULL);
- up_write(&mm->mmap_sem);
+ mm_write_unlock(mm);
if (populate)
mm_populate(addr, populate);
@@ -227,7 +227,7 @@ int mpx_enable_management(void)
* unmap path; we can just use mm->context.bd_addr instead.
*/
bd_base = mpx_get_bounds_dir();
- down_write(&mm->mmap_sem);
+ mm_write_lock(mm);
/* MPX doesn't support addresses above 47 bits yet. */
if (find_vma(mm, DEFAULT_MAP_WINDOW)) {
@@ -241,7 +241,7 @@ int mpx_enable_management(void)
if (mm->context.bd_addr == MPX_INVALID_BOUNDS_DIR)
ret = -ENXIO;
out:
- up_write(&mm->mmap_sem);
+ mm_write_unlock(mm);
return ret;
}
@@ -252,9 +252,9 @@ int mpx_disable_management(void)
if (!cpu_feature_enabled(X86_FEATURE_MPX))
return -ENXIO;
- down_write(&mm->mmap_sem);
+ mm_write_lock(mm);
mm->context.bd_addr = MPX_INVALID_BOUNDS_DIR;
- up_write(&mm->mmap_sem);
+ mm_write_unlock(mm);
return 0;
}
@@ -58,7 +58,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
if (!vdso_enabled)
return 0;
- if (down_write_killable(&mm->mmap_sem))
+ if (mm_write_lock_killable(mm))
return -EINTR;
err = install_special_mapping(mm, um_vdso_addr, PAGE_SIZE,
@@ -66,7 +66,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
vdsop);
- up_write(&mm->mmap_sem);
+ mm_write_unlock(mm);
return err;
}
@@ -74,7 +74,7 @@ void do_page_fault(struct pt_regs *regs)
if (user_mode(regs))
flags |= FAULT_FLAG_USER;
retry:
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
vma = find_vma(mm, address);
if (!vma)
@@ -140,7 +140,7 @@ void do_page_fault(struct pt_regs *regs)
}
}
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
if (flags & VM_FAULT_MAJOR)
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
@@ -153,7 +153,7 @@ void do_page_fault(struct pt_regs *regs)
* Fix it, but check if it's kernel or user first..
*/
bad_area:
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
if (user_mode(regs)) {
current->thread.bad_vaddr = address;
current->thread.error_code = is_write;
@@ -168,7 +168,7 @@ void do_page_fault(struct pt_regs *regs)
* us unable to handle the page fault gracefully.
*/
out_of_memory:
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
if (!user_mode(regs))
bad_page_fault(regs, address, SIGKILL);
else
@@ -176,7 +176,7 @@ void do_page_fault(struct pt_regs *regs)
return;
do_sigbus:
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
/* Send a sigbus, regardless of whether we were in kernel
* or user mode.
@@ -212,7 +212,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
mm = alloc->vma_vm_mm;
if (mm) {
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
vma = alloc->vma;
}
@@ -270,7 +270,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
trace_binder_alloc_page_end(alloc, index);
}
if (mm) {
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
mmput(mm);
}
return 0;
@@ -303,7 +303,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
}
err_no_vma:
if (mm) {
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
mmput(mm);
}
return vma ? -ENOMEM : -ESRCH;
@@ -1343,9 +1343,9 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
* concurrently and the queues are actually stopped
*/
if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
- down_write(¤t->mm->mmap_sem);
+ mm_write_lock(current->mm);
is_invalid_userptr = atomic_read(&mem->invalid);
- up_write(¤t->mm->mmap_sem);
+ mm_write_unlock(current->mm);
}
mutex_lock(&mem->lock);
@@ -838,7 +838,7 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
goto out_free_ranges;
}
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
vma = find_vma(mm, start);
if (unlikely(!vma || start < vma->vm_start)) {
r = -EFAULT;
@@ -849,15 +849,15 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
r = -EPERM;
goto out_unlock;
}
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
retry:
range->notifier_seq = mmu_interval_read_begin(&bo->notifier);
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
r = hmm_range_fault(range, 0);
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
if (unlikely(r <= 0)) {
/*
* FIXME: This timeout should encompass the retry from
@@ -886,7 +886,7 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
return 0;
out_unlock:
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
out_free_pfns:
kvfree(range->pfns);
out_free_ranges:
@@ -902,7 +902,7 @@ void kfd_signal_iommu_event(struct kfd_dev *dev, unsigned int pasid,
memset(&memory_exception_data, 0, sizeof(memory_exception_data));
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
vma = find_vma(mm, address);
memory_exception_data.gpu_id = dev->id;
@@ -925,7 +925,7 @@ void kfd_signal_iommu_event(struct kfd_dev *dev, unsigned int pasid,
memory_exception_data.failure.NoExecute = 0;
}
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
mmput(mm);
pr_debug("notpresent %d, noexecute %d, readonly %d\n",
@@ -89,7 +89,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
- if (down_write_killable(&mm->mmap_sem)) {
+ if (mm_write_lock_killable(mm)) {
addr = -EINTR;
goto err;
}
@@ -99,7 +99,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
else
addr = -ENOMEM;
- up_write(&mm->mmap_sem);
+ mm_write_unlock(mm);
if (IS_ERR_VALUE(addr))
goto err;
}
@@ -201,7 +201,7 @@ i915_mmu_notifier_find(struct i915_mm_struct *mm)
if (IS_ERR(mn))
err = PTR_ERR(mn);
- down_write(&mm->mm->mmap_sem);
+ mm_write_lock(mm->mm);
mutex_lock(&mm->i915->mm_lock);
if (mm->mn == NULL && !err) {
/* Protected by mmap_sem (write-lock) */
@@ -218,7 +218,7 @@ i915_mmu_notifier_find(struct i915_mm_struct *mm)
err = 0;
}
mutex_unlock(&mm->i915->mm_lock);
- up_write(&mm->mm->mmap_sem);
+ mm_write_unlock(mm->mm);
if (mn && !IS_ERR(mn))
kfree(mn);
@@ -466,7 +466,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
ret = -EFAULT;
if (mmget_not_zero(mm)) {
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
while (pinned < npages) {
ret = get_user_pages_remote
(work->task, mm,
@@ -479,7 +479,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
pinned += ret;
}
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
mmput(mm);
}
}
@@ -169,7 +169,7 @@ nouveau_svmm_bind(struct drm_device *dev, void *data,
*/
mm = get_task_mm(current);
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
for (addr = args->va_start, end = args->va_start + size; addr < end;) {
struct vm_area_struct *vma;
@@ -192,7 +192,7 @@ nouveau_svmm_bind(struct drm_device *dev, void *data,
*/
args->result = 0;
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
mmput(mm);
return 0;
@@ -342,7 +342,7 @@ nouveau_svmm_init(struct drm_device *dev, void *data,
if (ret)
goto out_free;
- down_write(¤t->mm->mmap_sem);
+ mm_write_lock(current->mm);
svmm->notifier.ops = &nouveau_mn_ops;
ret = __mmu_notifier_register(&svmm->notifier, current->mm);
if (ret)
@@ -351,12 +351,12 @@ nouveau_svmm_init(struct drm_device *dev, void *data,
cli->svm.svmm = svmm;
cli->svm.cli = cli;
- up_write(¤t->mm->mmap_sem);
+ mm_write_unlock(current->mm);
mutex_unlock(&cli->mutex);
return 0;
out_mm_unlock:
- up_write(¤t->mm->mmap_sem);
+ mm_write_unlock(current->mm);
out_free:
mutex_unlock(&cli->mutex);
kfree(svmm);
@@ -540,9 +540,9 @@ static int nouveau_range_fault(struct nouveau_svmm *svmm,
range.notifier_seq = mmu_interval_read_begin(range.notifier);
range.default_flags = 0;
range.pfn_flags_mask = -1UL;
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
ret = hmm_range_fault(&range, 0);
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
if (ret <= 0) {
if (ret == 0 || ret == -EBUSY)
continue;
@@ -671,18 +671,18 @@ nouveau_svm_fault(struct nvif_notify *notify)
/* Intersect fault window with the CPU VMA, cancelling
* the fault if the address is invalid.
*/
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
vma = find_vma_intersection(mm, start, limit);
if (!vma) {
SVMM_ERR(svmm, "wndw %016llx-%016llx", start, limit);
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
mmput(mm);
nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
continue;
}
start = max_t(u64, start, vma->vm_start);
limit = min_t(u64, limit, vma->vm_end);
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
SVMM_DBG(svmm, "wndw %016llx-%016llx", start, limit);
if (buffer->fault[fi]->addr != start) {
@@ -196,12 +196,12 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
p->vm_bos = radeon_vm_get_bos(p->rdev, p->ib.vm,
&p->validated);
if (need_mmap_lock)
- down_read(¤t->mm->mmap_sem);
+ mm_read_lock(current->mm);
r = radeon_bo_list_validate(p->rdev, &p->ticket, &p->validated, p->ring);
if (need_mmap_lock)
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm);
return r;
}
@@ -341,17 +341,17 @@ int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
}
if (args->flags & RADEON_GEM_USERPTR_VALIDATE) {
- down_read(¤t->mm->mmap_sem);
+ mm_read_lock(current->mm);
r = radeon_bo_reserve(bo, true);
if (r) {
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm);
goto release_object;
}
radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT);
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
radeon_bo_unreserve(bo);
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm);
if (r)
goto release_object;
}
@@ -67,7 +67,7 @@ static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
goto out_unlock;
ttm_bo_get(bo);
- up_read(&vmf->vma->vm_mm->mmap_sem);
+ mm_read_unlock(vmf->vma->vm_mm);
(void) dma_fence_wait(bo->moving, true);
dma_resv_unlock(bo->base.resv);
ttm_bo_put(bo);
@@ -138,7 +138,7 @@ vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
ttm_bo_get(bo);
- up_read(&vmf->vma->vm_mm->mmap_sem);
+ mm_read_unlock(vmf->vma->vm_mm);
(void) ttm_bo_wait_unreserved(bo);
ttm_bo_put(bo);
}
@@ -266,14 +266,14 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
sg = umem->sg_head.sgl;
while (npages) {
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
ret = get_user_pages(cur_base,
min_t(unsigned long, npages,
PAGE_SIZE / sizeof (struct page *)),
gup_flags | FOLL_LONGTERM,
page_list, NULL);
if (ret < 0) {
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
goto umem_release;
}
@@ -284,7 +284,7 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
dma_get_max_seg_size(context->device->dma_device),
&umem->sg_nents);
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
}
sg_mark_end(sg);
@@ -246,16 +246,16 @@ struct ib_umem_odp *ib_umem_odp_get(struct ib_udata *udata, unsigned long addr,
struct vm_area_struct *vma;
struct hstate *h;
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
vma = find_vma(mm, ib_umem_start(umem_odp));
if (!vma || !is_vm_hugetlb_page(vma)) {
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
ret = -EINVAL;
goto err_free;
}
h = hstate_vma(vma);
umem_odp->page_shift = huge_page_shift(h);
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
}
umem_odp->tgid = get_task_pid(current->group_leader, PIDTYPE_PID);
@@ -443,7 +443,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 user_virt,
(bcnt + BIT(page_shift) - 1) >> page_shift,
PAGE_SIZE / sizeof(struct page *));
- down_read(&owning_mm->mmap_sem);
+ mm_read_lock(owning_mm);
/*
* Note: this might result in redundent page getting. We can
* avoid this by checking dma_list to be 0 before calling
@@ -454,7 +454,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 user_virt,
npages = get_user_pages_remote(owning_process, owning_mm,
user_virt, gup_num_pages,
flags, local_page_list, NULL, NULL);
- up_read(&owning_mm->mmap_sem);
+ mm_read_unlock(owning_mm);
if (npages < 0) {
if (npages != -EAGAIN)
@@ -939,7 +939,7 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
* at a time to get the lock ordering right. Typically there
* will only be one mm, so no big deal.
*/
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
if (!mmget_still_valid(mm))
goto skip_mm;
mutex_lock(&ufile->umap_lock);
@@ -961,7 +961,7 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
}
mutex_unlock(&ufile->umap_lock);
skip_mm:
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
mmput(mm);
}
}
@@ -380,7 +380,7 @@ static struct ib_umem *mlx4_get_umem_mr(struct ib_udata *udata, u64 start,
unsigned long untagged_start = untagged_addr(start);
struct vm_area_struct *vma;
- down_read(¤t->mm->mmap_sem);
+ mm_read_lock(current->mm);
/*
* FIXME: Ideally this would iterate over all the vmas that
* cover the memory, but for now it requires a single vma to
@@ -395,7 +395,7 @@ static struct ib_umem *mlx4_get_umem_mr(struct ib_udata *udata, u64 start,
access_flags |= IB_ACCESS_LOCAL_WRITE;
}
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm);
}
return ib_umem_get(udata, start, length, access_flags);
@@ -106,18 +106,18 @@ int qib_get_user_pages(unsigned long start_page, size_t num_pages,
goto bail;
}
- down_read(¤t->mm->mmap_sem);
+ mm_read_lock(current->mm);
for (got = 0; got < num_pages; got += ret) {
ret = get_user_pages(start_page + got * PAGE_SIZE,
num_pages - got,
FOLL_LONGTERM | FOLL_WRITE | FOLL_FORCE,
p + got, NULL);
if (ret < 0) {
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm);
goto bail_release;
}
}
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm);
return 0;
bail_release:
@@ -123,7 +123,7 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
npages = PAGE_ALIGN(size + (addr & ~PAGE_MASK)) >> PAGE_SHIFT;
uiomr->owning_mm = mm = current->mm;
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
locked = atomic64_add_return(npages, ¤t->mm->pinned_vm);
lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
@@ -187,7 +187,7 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
} else
mmgrab(uiomr->owning_mm);
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
free_page((unsigned long) page_list);
return ret;
}
@@ -397,7 +397,7 @@ struct siw_umem *siw_umem_get(u64 start, u64 len, bool writable)
if (!writable)
foll_flags |= FOLL_FORCE;
- down_read(&mm_s->mmap_sem);
+ mm_read_lock(mm_s);
mlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
@@ -441,7 +441,7 @@ struct siw_umem *siw_umem_get(u64 start, u64 len, bool writable)
num_pages -= got;
}
out_sem_up:
- up_read(&mm_s->mmap_sem);
+ mm_read_unlock(mm_s);
if (rv > 0)
return umem;
@@ -487,7 +487,7 @@ static void do_fault(struct work_struct *work)
flags |= FAULT_FLAG_WRITE;
flags |= FAULT_FLAG_REMOTE;
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
vma = find_extend_vma(mm, address);
if (!vma || address < vma->vm_start)
/* failed to get a vma in the right range */
@@ -499,7 +499,7 @@ static void do_fault(struct work_struct *work)
ret = handle_mm_fault(vma, address, flags);
out:
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
if (ret & VM_FAULT_ERROR)
/* failed to service fault */
@@ -591,7 +591,7 @@ static irqreturn_t prq_event_thread(int irq, void *d)
if (!is_canonical_address(address))
goto bad_req;
- down_read(&svm->mm->mmap_sem);
+ mm_read_lock(svm->mm);
vma = find_extend_vma(svm->mm, address);
if (!vma || address < vma->vm_start)
goto invalid;
@@ -606,7 +606,7 @@ static irqreturn_t prq_event_thread(int irq, void *d)
result = QI_RESP_SUCCESS;
invalid:
- up_read(&svm->mm->mmap_sem);
+ mm_read_unlock(svm->mm);
mmput(svm->mm);
bad_req:
/* Accounting for major/minor faults? */
@@ -534,7 +534,7 @@ int videobuf_qbuf(struct videobuf_queue *q, struct v4l2_buffer *b)
MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
if (b->memory == V4L2_MEMORY_MMAP)
- down_read(¤t->mm->mmap_sem);
+ mm_read_lock(current->mm);
videobuf_queue_lock(q);
retval = -EBUSY;
@@ -621,7 +621,7 @@ int videobuf_qbuf(struct videobuf_queue *q, struct v4l2_buffer *b)
videobuf_queue_unlock(q);
if (b->memory == V4L2_MEMORY_MMAP)
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm);
return retval;
}
@@ -169,7 +169,7 @@ static int videobuf_dma_contig_user_get(struct videobuf_dma_contig_memory *mem,
mem->size = PAGE_ALIGN(vb->size + offset);
ret = -EINVAL;
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
vma = find_vma(mm, untagged_baddr);
if (!vma)
@@ -201,7 +201,7 @@ static int videobuf_dma_contig_user_get(struct videobuf_dma_contig_memory *mem,
}
out_up:
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm);
return ret;
}
@@ -200,9 +200,9 @@ static int videobuf_dma_init_user(struct videobuf_dmabuf *dma, int direction,
{
int ret;
- down_read(¤t->mm->mmap_sem);
+ mm_read_lock(current->mm);
ret = videobuf_dma_init_user_locked(dma, direction, data, size);
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm);
return ret;
}
@@ -207,7 +207,7 @@ static int get_vma_info(struct mm_struct *mm, u64 addr,
struct vm_area_struct *vma = NULL;
int rc = 0;
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
vma = find_vma(mm, addr);
if (!vma) {
@@ -218,7 +218,7 @@ static int get_vma_info(struct mm_struct *mm, u64 addr,
*vma_start = vma->vm_start;
*vma_end = vma->vm_end;
out:
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
return rc;
}
@@ -321,7 +321,7 @@ static void cxl_prefault_vma(struct cxl_context *ctx)
return;
}
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
for (vma = mm->mmap; vma; vma = vma->vm_next) {
for (ea = vma->vm_start; ea < vma->vm_end;
ea = next_segment(ea, slb.vsid)) {
@@ -336,7 +336,7 @@ static void cxl_prefault_vma(struct cxl_context *ctx)
last_esid = slb.esid;
}
}
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
mmput(mm);
}
@@ -69,14 +69,14 @@ static struct gru_thread_state *gru_find_lock_gts(unsigned long vaddr)
struct vm_area_struct *vma;
struct gru_thread_state *gts = NULL;
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
vma = gru_find_vma(vaddr);
if (vma)
gts = gru_find_thread_state(vma, TSID(vaddr, vma));
if (gts)
mutex_lock(>s->ts_ctxlock);
else
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
return gts;
}
@@ -86,7 +86,7 @@ static struct gru_thread_state *gru_alloc_locked_gts(unsigned long vaddr)
struct vm_area_struct *vma;
struct gru_thread_state *gts = ERR_PTR(-EINVAL);
- down_write(&mm->mmap_sem);
+ mm_write_lock(mm);
vma = gru_find_vma(vaddr);
if (!vma)
goto err;
@@ -95,11 +95,11 @@ static struct gru_thread_state *gru_alloc_locked_gts(unsigned long vaddr)
if (IS_ERR(gts))
goto err;
mutex_lock(>s->ts_ctxlock);
- downgrade_write(&mm->mmap_sem);
+ mm_downgrade_write_lock(mm);
return gts;
err:
- up_write(&mm->mmap_sem);
+ mm_write_unlock(mm);
return gts;
}
@@ -109,7 +109,7 @@ static struct gru_thread_state *gru_alloc_locked_gts(unsigned long vaddr)
static void gru_unlock_gts(struct gru_thread_state *gts)
{
mutex_unlock(>s->ts_ctxlock);
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm);
}
/*
@@ -575,9 +575,9 @@ static irqreturn_t gru_intr(int chiplet, int blade)
*/
gts->ustats.fmm_tlbmiss++;
if (!gts->ts_force_cch_reload &&
- down_read_trylock(>s->ts_mm->mmap_sem)) {
+ mm_read_trylock(gts->ts_mm)) {
gru_try_dropin(gru, gts, tfh, NULL);
- up_read(>s->ts_mm->mmap_sem);
+ mm_read_unlock(gts->ts_mm);
} else {
tfh_user_polling_mode(tfh);
STAT(intr_mm_lock_failed);
@@ -135,7 +135,7 @@ static int gru_create_new_context(unsigned long arg)
if (!(req.options & GRU_OPT_MISS_MASK))
req.options |= GRU_OPT_MISS_FMM_INTR;
- down_write(¤t->mm->mmap_sem);
+ mm_write_lock(current->mm);
vma = gru_find_vma(req.gseg);
if (vma) {
vdata = vma->vm_private_data;
@@ -146,7 +146,7 @@ static int gru_create_new_context(unsigned long arg)
vdata->vd_tlb_preload_count = req.tlb_preload_count;
ret = 0;
}
- up_write(¤t->mm->mmap_sem);
+ mm_write_unlock(current->mm);
return ret;
}
@@ -91,11 +91,11 @@ munmap_notify(struct notifier_block *self, unsigned long val, void *data)
struct mm_struct *mm = current->mm;
struct vm_area_struct *mpnt;
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
mpnt = find_vma(mm, addr);
if (mpnt && mpnt->vm_file && (mpnt->vm_flags & VM_EXEC)) {
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
/* To avoid latency problems, we only process the current CPU,
* hoping that most samples for the task are on this CPU
*/
@@ -103,7 +103,7 @@ munmap_notify(struct notifier_block *self, unsigned long val, void *data)
return 0;
}
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
return 0;
}
@@ -256,7 +256,7 @@ lookup_dcookie(struct mm_struct *mm, unsigned long addr, off_t *offset)
unsigned long cookie = NO_COOKIE;
struct vm_area_struct *vma;
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
if (addr < vma->vm_start || addr >= vma->vm_end)
@@ -276,7 +276,7 @@ lookup_dcookie(struct mm_struct *mm, unsigned long addr, off_t *offset)
if (!vma)
cookie = INVALID_COOKIE;
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
return cookie;
}
@@ -76,9 +76,9 @@ static int kpc_dma_transfer(struct dev_private_data *priv,
}
// Lock the user buffer pages in memory, and hold on to the page pointers (for the sglist)
- down_read(¤t->mm->mmap_sem); /* get memory map semaphore */
+ mm_read_lock(current->mm); /* get memory map semaphore */
rv = get_user_pages(iov_base, acd->page_count, FOLL_TOUCH | FOLL_WRITE | FOLL_GET, acd->user_pages, NULL);
- up_read(¤t->mm->mmap_sem); /* release the semaphore */
+ mm_read_unlock(current->mm); /* release the semaphore */
if (rv != acd->page_count) {
dev_err(&priv->ldev->pldev->dev, "Couldn't get_user_pages (%ld)\n", rv);
goto err_get_user_pages;
@@ -561,10 +561,10 @@ static int check_mem_type(unsigned long start, size_t num_pages)
if (virt_addr_valid(start))
return 0;
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
rc = __check_mem_type(find_vma(mm, start),
start + num_pages * PAGE_SIZE);
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
return rc;
}
@@ -277,11 +277,11 @@ static int vfio_lock_acct(struct vfio_dma *dma, long npage, bool async)
if (!mm)
return -ESRCH; /* process exited */
- ret = down_write_killable(&mm->mmap_sem);
+ ret = mm_write_lock_killable(mm);
if (!ret) {
ret = __account_locked_vm(mm, abs(npage), npage > 0, dma->task,
dma->lock_cap);
- up_write(&mm->mmap_sem);
+ mm_write_unlock(mm);
}
if (async)
@@ -329,7 +329,7 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
if (prot & IOMMU_WRITE)
flags |= FOLL_WRITE;
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
if (mm == current->mm) {
ret = get_user_pages(vaddr, 1, flags | FOLL_LONGTERM, page,
vmas);
@@ -348,14 +348,14 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
put_page(page[0]);
}
}
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
if (ret == 1) {
*pfn = page_to_pfn(page[0]);
return 0;
}
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
vaddr = untagged_addr(vaddr);
@@ -367,7 +367,7 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
ret = 0;
}
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
return ret;
}
@@ -625,7 +625,7 @@ static long gntdev_ioctl_get_offset_for_vaddr(struct gntdev_priv *priv,
return -EFAULT;
pr_debug("priv %p, offset for vaddr %lx\n", priv, (unsigned long)op.vaddr);
- down_read(¤t->mm->mmap_sem);
+ mm_read_lock(current->mm);
vma = find_vma(current->mm, op.vaddr);
if (!vma || vma->vm_ops != &gntdev_vmops)
goto out_unlock;
@@ -639,7 +639,7 @@ static long gntdev_ioctl_get_offset_for_vaddr(struct gntdev_priv *priv,
rv = 0;
out_unlock:
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm);
if (rv == 0 && copy_to_user(u, &op, sizeof(op)) != 0)
return -EFAULT;
@@ -278,7 +278,7 @@ static long privcmd_ioctl_mmap(struct file *file, void __user *udata)
if (rc || list_empty(&pagelist))
goto out;
- down_write(&mm->mmap_sem);
+ mm_write_lock(mm);
{
struct page *page = list_first_entry(&pagelist,
@@ -303,7 +303,7 @@ static long privcmd_ioctl_mmap(struct file *file, void __user *udata)
out_up:
- up_write(&mm->mmap_sem);
+ mm_write_unlock(mm);
out:
free_page_list(&pagelist);
@@ -499,7 +499,7 @@ static long privcmd_ioctl_mmap_batch(
}
}
- down_write(&mm->mmap_sem);
+ mm_write_lock(mm);
vma = find_vma(mm, m.addr);
if (!vma ||
@@ -555,7 +555,7 @@ static long privcmd_ioctl_mmap_batch(
BUG_ON(traverse_pages_block(m.num, sizeof(xen_pfn_t),
&pagelist, mmap_batch_fn, &state));
- up_write(&mm->mmap_sem);
+ mm_write_unlock(mm);
if (state.global_error) {
/* Write back errors in second pass. */
@@ -576,7 +576,7 @@ static long privcmd_ioctl_mmap_batch(
return ret;
out_unlock:
- up_write(&mm->mmap_sem);
+ mm_write_unlock(mm);
goto out;
}
@@ -741,7 +741,7 @@ static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata)
if (data->domid != DOMID_INVALID && data->domid != kdata.dom)
return -EPERM;
- down_write(&mm->mmap_sem);
+ mm_write_lock(mm);
vma = find_vma(mm, kdata.addr);
if (!vma || vma->vm_ops != &privcmd_vm_ops) {
@@ -820,7 +820,7 @@ static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata)
}
out:
- up_write(&mm->mmap_sem);
+ mm_write_unlock(mm);
kfree(pfns);
return rc;
@@ -519,7 +519,7 @@ static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events)
ctx->mmap_size = nr_pages * PAGE_SIZE;
pr_debug("attempting mmap of %lu bytes\n", ctx->mmap_size);
- if (down_write_killable(&mm->mmap_sem)) {
+ if (mm_write_lock_killable(mm)) {
ctx->mmap_size = 0;
aio_free_ring(ctx);
return -EINTR;
@@ -528,7 +528,7 @@ static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events)
ctx->mmap_base = do_mmap_pgoff(ctx->aio_ring_file, 0, ctx->mmap_size,
PROT_READ | PROT_WRITE,
MAP_SHARED, 0, &unused, NULL);
- up_write(&mm->mmap_sem);
+ mm_write_unlock(mm);
if (IS_ERR((void *)ctx->mmap_base)) {
ctx->mmap_size = 0;
aio_free_ring(ctx);
@@ -443,12 +443,12 @@ static int coredump_wait(int exit_code, struct core_state *core_state)
core_state->dumper.task = tsk;
core_state->dumper.next = NULL;
- if (down_write_killable(&mm->mmap_sem))
+ if (mm_write_lock_killable(mm))
return -EINTR;
if (!mm->core_state)
core_waiters = zap_threads(tsk, mm, core_state, exit_code);
- up_write(&mm->mmap_sem);
+ mm_write_unlock(mm);
if (core_waiters > 0) {
struct core_thread *ptr;
@@ -250,7 +250,7 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
return -ENOMEM;
vma_set_anonymous(vma);
- if (down_write_killable(&mm->mmap_sem)) {
+ if (mm_write_lock_killable(mm)) {
err = -EINTR;
goto err_free;
}
@@ -273,11 +273,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
mm->stack_vm = mm->total_vm = 1;
arch_bprm_mm_init(mm, vma);
- up_write(&mm->mmap_sem);
+ mm_write_unlock(mm);
bprm->p = vma->vm_end - sizeof(void *);
return 0;
err:
- up_write(&mm->mmap_sem);
+ mm_write_unlock(mm);
err_free:
bprm->vma = NULL;
vm_area_free(vma);
@@ -738,7 +738,7 @@ int setup_arg_pages(struct linux_binprm *bprm,
bprm->loader -= stack_shift;
bprm->exec -= stack_shift;
- if (down_write_killable(&mm->mmap_sem))
+ if (mm_write_lock_killable(mm))
return -EINTR;
vm_flags = VM_STACK_FLAGS;
@@ -795,7 +795,7 @@ int setup_arg_pages(struct linux_binprm *bprm,
ret = -EFAULT;
out_unlock:
- up_write(&mm->mmap_sem);
+ mm_write_unlock(mm);
return ret;
}
EXPORT_SYMBOL(setup_arg_pages);
@@ -1024,9 +1024,9 @@ static int exec_mmap(struct mm_struct *mm)
* through with the exec. We must hold mmap_sem around
* checking core_state and changing tsk->mm.
*/
- down_read(&old_mm->mmap_sem);
+ mm_read_lock(old_mm);
if (unlikely(old_mm->core_state)) {
- up_read(&old_mm->mmap_sem);
+ mm_read_unlock(old_mm);
return -EINTR;
}
}
@@ -1040,7 +1040,7 @@ static int exec_mmap(struct mm_struct *mm)
vmacache_flush(tsk);
task_unlock(tsk);
if (old_mm) {
- up_read(&old_mm->mmap_sem);
+ mm_read_unlock(old_mm);
BUG_ON(active_mm != old_mm);
setmax_mm_hiwater_rss(&tsk->signal->maxrss, old_mm);
mm_update_next_owner(old_mm);
@@ -4822,7 +4822,7 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg,
}
ret = 0;
- down_read(¤t->mm->mmap_sem);
+ mm_read_lock(current->mm);
pret = get_user_pages(ubuf, nr_pages,
FOLL_WRITE | FOLL_LONGTERM,
pages, vmas);
@@ -4840,7 +4840,7 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg,
} else {
ret = pret < 0 ? pret : -EFAULT;
}
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm);
if (ret) {
/*
* if we did partial map, or found file backed vmas,
@@ -1979,11 +1979,11 @@ static int map_files_d_revalidate(struct dentry *dentry, unsigned int flags)
goto out;
if (!dname_to_vma_addr(dentry, &vm_start, &vm_end)) {
- status = down_read_killable(&mm->mmap_sem);
+ status = mm_read_lock_killable(mm);
if (!status) {
exact_vma_exists = !!find_exact_vma(mm, vm_start,
vm_end);
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
}
}
@@ -2030,7 +2030,7 @@ static int map_files_get_link(struct dentry *dentry, struct path *path)
if (rc)
goto out_mmput;
- rc = down_read_killable(&mm->mmap_sem);
+ rc = mm_read_lock_killable(mm);
if (rc)
goto out_mmput;
@@ -2041,7 +2041,7 @@ static int map_files_get_link(struct dentry *dentry, struct path *path)
path_get(path);
rc = 0;
}
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
out_mmput:
mmput(mm);
@@ -2131,7 +2131,7 @@ static struct dentry *proc_map_files_lookup(struct inode *dir,
goto out_put_task;
result = ERR_PTR(-EINTR);
- if (down_read_killable(&mm->mmap_sem))
+ if (mm_read_lock_killable(mm))
goto out_put_mm;
result = ERR_PTR(-ENOENT);
@@ -2144,7 +2144,7 @@ static struct dentry *proc_map_files_lookup(struct inode *dir,
(void *)(unsigned long)vma->vm_file->f_mode);
out_no_vma:
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
out_put_mm:
mmput(mm);
out_put_task:
@@ -128,7 +128,7 @@ static void vma_stop(struct proc_maps_private *priv)
struct mm_struct *mm = priv->mm;
release_task_mempolicy(priv);
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
mmput(mm);
}
@@ -166,7 +166,7 @@ static void *m_start(struct seq_file *m, loff_t *ppos)
if (!mm || !mmget_not_zero(mm))
return NULL;
- if (down_read_killable(&mm->mmap_sem)) {
+ if (mm_read_lock_killable(mm)) {
mmput(mm);
return ERR_PTR(-EINTR);
}
@@ -873,7 +873,7 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
memset(&mss, 0, sizeof(mss));
- ret = down_read_killable(&mm->mmap_sem);
+ ret = mm_read_lock_killable(mm);
if (ret)
goto out_put_mm;
@@ -892,7 +892,7 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
__show_smap(m, &mss, true);
release_task_mempolicy(priv);
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
out_put_mm:
mmput(mm);
@@ -1166,7 +1166,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
};
if (type == CLEAR_REFS_MM_HIWATER_RSS) {
- if (down_write_killable(&mm->mmap_sem)) {
+ if (mm_write_lock_killable(mm)) {
count = -EINTR;
goto out_mm;
}
@@ -1176,11 +1176,11 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
* resident set size to this mm's current rss value.
*/
reset_mm_hiwater_rss(mm);
- up_write(&mm->mmap_sem);
+ mm_write_unlock(mm);
goto out_mm;
}
- if (down_read_killable(&mm->mmap_sem)) {
+ if (mm_read_lock_killable(mm)) {
count = -EINTR;
goto out_mm;
}
@@ -1189,8 +1189,8 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
for (vma = mm->mmap; vma; vma = vma->vm_next) {
if (!(vma->vm_flags & VM_SOFTDIRTY))
continue;
- up_read(&mm->mmap_sem);
- if (down_write_killable(&mm->mmap_sem)) {
+ mm_read_unlock(mm);
+ if (mm_write_lock_killable(mm)) {
count = -EINTR;
goto out_mm;
}
@@ -1209,14 +1209,14 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
* failed like if
* get_proc_task() fails?
*/
- up_write(&mm->mmap_sem);
+ mm_write_unlock(mm);
goto out_mm;
}
for (vma = mm->mmap; vma; vma = vma->vm_next) {
vma->vm_flags &= ~VM_SOFTDIRTY;
vma_set_page_prot(vma);
}
- downgrade_write(&mm->mmap_sem);
+ mm_downgrade_write_lock(mm);
break;
}
@@ -1229,7 +1229,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
if (type == CLEAR_REFS_SOFT_DIRTY)
mmu_notifier_invalidate_range_end(&range);
tlb_finish_mmu(&tlb, 0, -1);
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
out_mm:
mmput(mm);
}
@@ -1590,11 +1590,11 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
/* overflow ? */
if (end < start_vaddr || end > end_vaddr)
end = end_vaddr;
- ret = down_read_killable(&mm->mmap_sem);
+ ret = mm_read_lock_killable(mm);
if (ret)
goto out_free;
ret = walk_page_range(mm, start_vaddr, end, &pagemap_ops, &pm);
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
start_vaddr = end;
len = min(count, PM_ENTRY_BYTES * pm.pos);
@@ -25,7 +25,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
struct rb_node *p;
unsigned long bytes = 0, sbytes = 0, slack = 0, size;
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
vma = rb_entry(p, struct vm_area_struct, vm_rb);
@@ -77,7 +77,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
"Shared:\t%8lu bytes\n",
bytes, slack, sbytes);
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
}
unsigned long task_vsize(struct mm_struct *mm)
@@ -86,12 +86,12 @@ unsigned long task_vsize(struct mm_struct *mm)
struct rb_node *p;
unsigned long vsize = 0;
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
vma = rb_entry(p, struct vm_area_struct, vm_rb);
vsize += vma->vm_end - vma->vm_start;
}
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
return vsize;
}
@@ -104,7 +104,7 @@ unsigned long task_statm(struct mm_struct *mm,
struct rb_node *p;
unsigned long size = kobjsize(mm);
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
vma = rb_entry(p, struct vm_area_struct, vm_rb);
size += kobjsize(vma);
@@ -119,7 +119,7 @@ unsigned long task_statm(struct mm_struct *mm,
>> PAGE_SHIFT;
*data = (PAGE_ALIGN(mm->start_stack) - (mm->start_data & PAGE_MASK))
>> PAGE_SHIFT;
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
size >>= PAGE_SHIFT;
size += *text + *data;
*resident = size;
@@ -211,7 +211,7 @@ static void *m_start(struct seq_file *m, loff_t *pos)
if (!mm || !mmget_not_zero(mm))
return NULL;
- if (down_read_killable(&mm->mmap_sem)) {
+ if (mm_read_lock_killable(mm)) {
mmput(mm);
return ERR_PTR(-EINTR);
}
@@ -221,7 +221,7 @@ static void *m_start(struct seq_file *m, loff_t *pos)
if (n-- == 0)
return p;
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
mmput(mm);
return NULL;
}
@@ -231,7 +231,7 @@ static void m_stop(struct seq_file *m, void *_vml)
struct proc_maps_private *priv = m->private;
if (!IS_ERR_OR_NULL(_vml)) {
- up_read(&priv->mm->mmap_sem);
+ mm_read_unlock(priv->mm);
mmput(priv->mm);
}
if (priv->task) {
@@ -234,7 +234,7 @@ static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
pte_t *ptep, pte;
bool ret = true;
- VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
+ VM_BUG_ON(!mm_is_locked(mm));
ptep = huge_pte_offset(mm, address, vma_mmu_pagesize(vma));
@@ -286,7 +286,7 @@ static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx,
pte_t *pte;
bool ret = true;
- VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
+ VM_BUG_ON(!mm_is_locked(mm));
pgd = pgd_offset(mm, address);
if (!pgd_present(*pgd))
@@ -376,7 +376,7 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
* Coredumping runs without mmap_sem so we can only check that
* the mmap_sem is held, if PF_DUMPCORE was not set.
*/
- WARN_ON_ONCE(!rwsem_is_locked(&mm->mmap_sem));
+ WARN_ON_ONCE(!mm_is_locked(mm));
ctx = vmf->vma->vm_userfaultfd_ctx.ctx;
if (!ctx)
@@ -489,7 +489,7 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
must_wait = userfaultfd_huge_must_wait(ctx, vmf->vma,
vmf->address,
vmf->flags, reason);
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
if (likely(must_wait && !READ_ONCE(ctx->released) &&
(return_to_userland ? !signal_pending(current) :
@@ -543,7 +543,7 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
* and there's no need to retake the mmap_sem
* in such case.
*/
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
ret = VM_FAULT_NOPAGE;
}
}
@@ -638,7 +638,7 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
struct mm_struct *mm = release_new_ctx->mm;
/* the various vma->vm_userfaultfd_ctx still points to it */
- down_write(&mm->mmap_sem);
+ mm_write_lock(mm);
/* no task can run (and in turn coredump) yet */
VM_WARN_ON(!mmget_still_valid(mm));
for (vma = mm->mmap; vma; vma = vma->vm_next)
@@ -646,7 +646,7 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
vma->vm_flags &= ~(VM_UFFD_WP | VM_UFFD_MISSING);
}
- up_write(&mm->mmap_sem);
+ mm_write_unlock(mm);
userfaultfd_ctx_put(release_new_ctx);
}
@@ -800,7 +800,7 @@ bool userfaultfd_remove(struct vm_area_struct *vma,
userfaultfd_ctx_get(ctx);
WRITE_ONCE(ctx->mmap_changing, true);
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
msg_init(&ewq.msg);
@@ -895,7 +895,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
* it's critical that released is set to true (above), before
* taking the mmap_sem for writing.
*/
- down_write(&mm->mmap_sem);
+ mm_write_lock(mm);
still_valid = mmget_still_valid(mm);
prev = NULL;
for (vma = mm->mmap; vma; vma = vma->vm_next) {
@@ -921,7 +921,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
vma->vm_flags = new_flags;
vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
}
- up_write(&mm->mmap_sem);
+ mm_write_unlock(mm);
mmput(mm);
wakeup:
/*
@@ -1350,7 +1350,7 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
if (!mmget_not_zero(mm))
goto out;
- down_write(&mm->mmap_sem);
+ mm_write_lock(mm);
if (!mmget_still_valid(mm))
goto out_unlock;
vma = find_vma_prev(mm, start, &prev);
@@ -1495,7 +1495,7 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
vma = vma->vm_next;
} while (vma && vma->vm_start < end);
out_unlock:
- up_write(&mm->mmap_sem);
+ mm_write_unlock(mm);
mmput(mm);
if (!ret) {
/*
@@ -1540,7 +1540,7 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
if (!mmget_not_zero(mm))
goto out;
- down_write(&mm->mmap_sem);
+ mm_write_lock(mm);
if (!mmget_still_valid(mm))
goto out_unlock;
vma = find_vma_prev(mm, start, &prev);
@@ -1657,7 +1657,7 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
vma = vma->vm_next;
} while (vma && vma->vm_start < end);
out_unlock:
- up_write(&mm->mmap_sem);
+ mm_write_unlock(mm);
mmput(mm);
out:
return ret;
@@ -5,6 +5,7 @@
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/mm_types.h>
+#include <linux/mm_lock.h>
#include <linux/srcu.h>
#include <linux/interval_tree.h>
@@ -275,9 +276,9 @@ mmu_notifier_get(const struct mmu_notifier_ops *ops, struct mm_struct *mm)
{
struct mmu_notifier *ret;
- down_write(&mm->mmap_sem);
+ mm_write_lock(mm);
ret = mmu_notifier_get_locked(ops, mm);
- up_write(&mm->mmap_sem);
+ mm_write_unlock(mm);
return ret;
}
void mmu_notifier_put(struct mmu_notifier *mn);
@@ -1544,7 +1544,7 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg,
if (err)
goto out_fput;
- if (down_write_killable(¤t->mm->mmap_sem)) {
+ if (mm_write_lock_killable(current->mm)) {
err = -EINTR;
goto out_fput;
}
@@ -1564,7 +1564,7 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg,
if (IS_ERR_VALUE(addr))
err = (long)addr;
invalid:
- up_write(¤t->mm->mmap_sem);
+ mm_write_unlock(current->mm);
if (populate)
mm_populate(addr, populate);
@@ -1638,7 +1638,7 @@ long ksys_shmdt(char __user *shmaddr)
if (addr & ~PAGE_MASK)
return retval;
- if (down_write_killable(&mm->mmap_sem))
+ if (mm_write_lock_killable(mm))
return -EINTR;
/*
@@ -1726,7 +1726,7 @@ long ksys_shmdt(char __user *shmaddr)
#endif
- up_write(&mm->mmap_sem);
+ mm_write_unlock(mm);
return retval;
}
@@ -539,13 +539,13 @@ void acct_collect(long exitcode, int group_dead)
if (group_dead && current->mm) {
struct vm_area_struct *vma;
- down_read(¤t->mm->mmap_sem);
+ mm_read_lock(current->mm);
vma = current->mm->mmap;
while (vma) {
vsize += vma->vm_end - vma->vm_start;
vma = vma->vm_next;
}
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm);
}
spin_lock_irq(¤t->sighand->siglock);
@@ -305,7 +305,7 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
* with build_id.
*/
if (!user || !current || !current->mm || irq_work_busy ||
- down_read_trylock(¤t->mm->mmap_sem) == 0) {
+ mm_read_trylock(current->mm) == 0) {
/* cannot access current->mm, fall back to ips */
for (i = 0; i < trace_nr; i++) {
id_offs[i].status = BPF_STACK_BUILD_ID_IP;
@@ -330,7 +330,7 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
}
if (!work) {
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm);
} else {
work->sem = ¤t->mm->mmap_sem;
irq_work_queue(&work->irq_work);
@@ -9467,7 +9467,7 @@ static void perf_event_addr_filters_apply(struct perf_event *event)
if (!mm)
goto restart;
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
}
raw_spin_lock_irqsave(&ifh->lock, flags);
@@ -9493,7 +9493,7 @@ static void perf_event_addr_filters_apply(struct perf_event *event)
raw_spin_unlock_irqrestore(&ifh->lock, flags);
if (ifh->nr_file_filters) {
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
mmput(mm);
}
@@ -1064,7 +1064,7 @@ register_for_each_vma(struct uprobe *uprobe, struct uprobe_consumer *new)
if (err && is_register)
goto free;
- down_write(&mm->mmap_sem);
+ mm_write_lock(mm);
vma = find_vma(mm, info->vaddr);
if (!vma || !valid_vma(vma, is_register) ||
file_inode(vma->vm_file) != uprobe->inode)
@@ -1086,7 +1086,7 @@ register_for_each_vma(struct uprobe *uprobe, struct uprobe_consumer *new)
}
unlock:
- up_write(&mm->mmap_sem);
+ mm_write_unlock(mm);
free:
mmput(mm);
info = free_map_info(info);
@@ -1241,7 +1241,7 @@ static int unapply_uprobe(struct uprobe *uprobe, struct mm_struct *mm)
struct vm_area_struct *vma;
int err = 0;
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
for (vma = mm->mmap; vma; vma = vma->vm_next) {
unsigned long vaddr;
loff_t offset;
@@ -1258,7 +1258,7 @@ static int unapply_uprobe(struct uprobe *uprobe, struct mm_struct *mm)
vaddr = offset_to_vaddr(vma, uprobe->offset);
err |= remove_breakpoint(uprobe, mm, vaddr);
}
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
return err;
}
@@ -1445,7 +1445,7 @@ static int xol_add_vma(struct mm_struct *mm, struct xol_area *area)
struct vm_area_struct *vma;
int ret;
- if (down_write_killable(&mm->mmap_sem))
+ if (mm_write_lock_killable(mm))
return -EINTR;
if (mm->uprobes_state.xol_area) {
@@ -1475,7 +1475,7 @@ static int xol_add_vma(struct mm_struct *mm, struct xol_area *area)
/* pairs with get_xol_area() */
smp_store_release(&mm->uprobes_state.xol_area, area); /* ^^^ */
fail:
- up_write(&mm->mmap_sem);
+ mm_write_unlock(mm);
return ret;
}
@@ -2045,7 +2045,7 @@ static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp)
struct uprobe *uprobe = NULL;
struct vm_area_struct *vma;
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
vma = find_vma(mm, bp_vaddr);
if (vma && vma->vm_start <= bp_vaddr) {
if (valid_vma(vma, false)) {
@@ -2063,7 +2063,7 @@ static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp)
if (!uprobe && test_and_clear_bit(MMF_RECALC_UPROBES, &mm->flags))
mmf_recalc_uprobes(mm);
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
return uprobe;
}
@@ -448,12 +448,12 @@ static void exit_mm(void)
* will increment ->nr_threads for each thread in the
* group with ->mm != NULL.
*/
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
core_state = mm->core_state;
if (core_state) {
struct core_thread self;
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
self.task = current;
self.next = xchg(&core_state->dumper.next, &self);
@@ -471,14 +471,14 @@ static void exit_mm(void)
freezable_schedule();
}
__set_current_state(TASK_RUNNING);
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
}
mmgrab(mm);
BUG_ON(mm != current->active_mm);
/* more a memory barrier than a real lock */
task_lock(current);
current->mm = NULL;
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
enter_lazy_tlb(mm, current);
task_unlock(current);
mm_update_next_owner(mm);
@@ -488,7 +488,7 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
LIST_HEAD(uf);
uprobe_start_dup_mmap();
- if (down_write_killable(&oldmm->mmap_sem)) {
+ if (mm_write_lock_killable(oldmm)) {
retval = -EINTR;
goto fail_uprobe_end;
}
@@ -612,9 +612,9 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
/* a new mm has just been created */
retval = arch_dup_mmap(oldmm, mm);
out:
- up_write(&mm->mmap_sem);
+ mm_write_unlock(mm);
flush_tlb_mm(oldmm);
- up_write(&oldmm->mmap_sem);
+ mm_write_unlock(oldmm);
dup_userfaultfd_complete(&uf);
fail_uprobe_end:
uprobe_end_dup_mmap();
@@ -644,9 +644,9 @@ static inline void mm_free_pgd(struct mm_struct *mm)
#else
static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
{
- down_write(&oldmm->mmap_sem);
+ mm_write_lock(oldmm);
RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm));
- up_write(&oldmm->mmap_sem);
+ mm_write_unlock(oldmm);
return 0;
}
#define mm_alloc_pgd(mm) (0)
@@ -1011,7 +1011,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
mm->vmacache_seqnum = 0;
atomic_set(&mm->mm_users, 1);
atomic_set(&mm->mm_count, 1);
- init_rwsem(&mm->mmap_sem);
+ mm_init_lock(mm);
INIT_LIST_HEAD(&mm->mmlist);
mm->core_state = NULL;
mm_pgtables_bytes_init(mm);
@@ -753,10 +753,10 @@ static int fault_in_user_writeable(u32 __user *uaddr)
struct mm_struct *mm = current->mm;
int ret;
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
ret = fixup_user_fault(current, mm, (unsigned long)uaddr,
FAULT_FLAG_WRITE, NULL);
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
return ret < 0 ? ret : 0;
}
@@ -2545,7 +2545,7 @@ static void task_numa_work(struct callback_head *work)
return;
- if (!down_read_trylock(&mm->mmap_sem))
+ if (!mm_read_trylock(mm))
return;
vma = find_vma(mm, start);
if (!vma) {
@@ -2613,7 +2613,7 @@ static void task_numa_work(struct callback_head *work)
mm->numa_scan_offset = start;
else
reset_ptenuma_scan(p);
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
/*
* Make sure tasks use at least 32x as much time to run other code
@@ -1845,7 +1845,7 @@ static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd)
if (exe_file) {
struct vm_area_struct *vma;
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
for (vma = mm->mmap; vma; vma = vma->vm_next) {
if (!vma->vm_file)
continue;
@@ -1854,7 +1854,7 @@ static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd)
goto exit_err;
}
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
fput(exe_file);
}
@@ -1868,7 +1868,7 @@ static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd)
fdput(exe);
return err;
exit_err:
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
fput(exe_file);
goto exit;
}
@@ -2009,7 +2009,7 @@ static int prctl_set_mm_map(int opt, const void __user *addr, unsigned long data
* arg_lock protects concurent updates but we still need mmap_sem for
* read to exclude races with sys_brk.
*/
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
/*
* We don't validate if these members are pointing to
@@ -2048,7 +2048,7 @@ static int prctl_set_mm_map(int opt, const void __user *addr, unsigned long data
if (prctl_map.auxv_size)
memcpy(mm->saved_auxv, user_auxv, sizeof(user_auxv));
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
return 0;
}
#endif /* CONFIG_CHECKPOINT_RESTORE */
@@ -2124,7 +2124,7 @@ static int prctl_set_mm(int opt, unsigned long addr,
* mmap_sem for a) concurrent sys_brk, b) finding VMA for addr
* validation.
*/
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
vma = find_vma(mm, addr);
spin_lock(&mm->arg_lock);
@@ -2216,7 +2216,7 @@ static int prctl_set_mm(int opt, unsigned long addr,
error = 0;
out:
spin_unlock(&mm->arg_lock);
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
return error;
}
@@ -2439,13 +2439,13 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
case PR_SET_THP_DISABLE:
if (arg3 || arg4 || arg5)
return -EINVAL;
- if (down_write_killable(&me->mm->mmap_sem))
+ if (mm_write_lock_killable(me->mm))
return -EINTR;
if (arg2)
set_bit(MMF_DISABLE_THP, &me->mm->flags);
else
clear_bit(MMF_DISABLE_THP, &me->mm->flags);
- up_write(&me->mm->mmap_sem);
+ mm_write_unlock(me->mm);
break;
case PR_MPX_ENABLE_MANAGEMENT:
case PR_MPX_DISABLE_MANAGEMENT:
@@ -393,7 +393,7 @@ static int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
if (mm) {
const struct vm_area_struct *vma;
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
vma = find_vma(mm, ip);
if (vma) {
file = vma->vm_file;
@@ -405,7 +405,7 @@ static int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
trace_seq_printf(s, "[+0x%lx]",
ip - vmstart);
}
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
}
if (ret && ((sym_flags & TRACE_ITER_SYM_ADDR) || !file))
trace_seq_printf(s, " <" IP_FMT ">", ip);
@@ -1416,7 +1416,7 @@ int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
if (flags & FAULT_FLAG_RETRY_NOWAIT)
return 0;
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
if (flags & FAULT_FLAG_KILLABLE)
wait_on_page_locked_killable(page);
else
@@ -1428,7 +1428,7 @@ int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
ret = __lock_page_killable(page);
if (ret) {
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
return 0;
}
} else
@@ -2364,7 +2364,7 @@ static int lock_page_maybe_drop_mmap(struct vm_fault *vmf, struct page *page,
* mmap_sem here and return 0 if we don't have a fpin.
*/
if (*fpin == NULL)
- up_read(&vmf->vma->vm_mm->mmap_sem);
+ mm_read_unlock(vmf->vma->vm_mm);
return 0;
}
} else
@@ -48,7 +48,7 @@ int get_vaddr_frames(unsigned long start, unsigned int nr_frames,
start = untagged_addr(start);
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
locked = 1;
vma = find_vma_intersection(mm, start, start + 1);
if (!vma) {
@@ -102,7 +102,7 @@ int get_vaddr_frames(unsigned long start, unsigned int nr_frames,
} while (vma && vma->vm_flags & (VM_IO | VM_PFNMAP));
out:
if (locked)
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
if (!ret)
ret = -EFAULT;
if (ret > 0)
@@ -982,7 +982,7 @@ int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
}
if (ret & VM_FAULT_RETRY) {
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
if (!(fault_flags & FAULT_FLAG_TRIED)) {
*unlocked = true;
fault_flags &= ~FAULT_FLAG_ALLOW_RETRY;
@@ -1068,7 +1068,7 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
*/
*locked = 1;
lock_dropped = true;
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
ret = __get_user_pages(tsk, mm, start, 1, flags | FOLL_TRIED,
pages, NULL, NULL);
if (ret != 1) {
@@ -1090,7 +1090,7 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
* We must let the caller know we temporarily dropped the lock
* and so the critical section protected by it was lost.
*/
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
*locked = 0;
}
return pages_done;
@@ -1208,7 +1208,7 @@ long populate_vma_page_range(struct vm_area_struct *vma,
VM_BUG_ON(end & ~PAGE_MASK);
VM_BUG_ON_VMA(start < vma->vm_start, vma);
VM_BUG_ON_VMA(end > vma->vm_end, vma);
- VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm);
+ VM_BUG_ON_MM(!mm_is_locked(mm), mm);
gup_flags = FOLL_TOUCH | FOLL_POPULATE | FOLL_MLOCK;
if (vma->vm_flags & VM_LOCKONFAULT)
@@ -1260,7 +1260,7 @@ int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
*/
if (!locked) {
locked = 1;
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
vma = find_vma(mm, nstart);
} else if (nstart >= vma->vm_end)
vma = vma->vm_next;
@@ -1292,7 +1292,7 @@ int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
ret = 0;
}
if (locked)
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
return ret; /* 0 or negative error code */
}
@@ -1698,11 +1698,11 @@ long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
return -EINVAL;
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
ret = __get_user_pages_locked(current, mm, start, nr_pages, pages, NULL,
&locked, gup_flags | FOLL_TOUCH);
if (locked)
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
return ret;
}
EXPORT_SYMBOL(get_user_pages_unlocked);
@@ -2380,11 +2380,11 @@ static int __gup_longterm_unlocked(unsigned long start, int nr_pages,
* get_user_pages_unlocked() (see comments in that function)
*/
if (gup_flags & FOLL_LONGTERM) {
- down_read(¤t->mm->mmap_sem);
+ mm_read_lock(current->mm);
ret = __gup_longterm_locked(current, current->mm,
start, nr_pages,
pages, NULL, gup_flags);
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm);
} else {
ret = get_user_pages_unlocked(start, nr_pages,
pages, gup_flags);
@@ -382,7 +382,7 @@ static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
if ((flags & (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT)) ==
FAULT_FLAG_ALLOW_RETRY) {
fpin = get_file(vmf->vma->vm_file);
- up_read(&vmf->vma->vm_mm->mmap_sem);
+ mm_read_unlock(vmf->vma->vm_mm);
}
return fpin;
}
@@ -508,8 +508,8 @@ void __khugepaged_exit(struct mm_struct *mm)
* khugepaged has finished working on the pagetables
* under the mmap_sem.
*/
- down_write(&mm->mmap_sem);
- up_write(&mm->mmap_sem);
+ mm_write_lock(mm);
+ mm_write_unlock(mm);
}
}
@@ -918,7 +918,7 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm,
/* do_swap_page returns VM_FAULT_RETRY with released mmap_sem */
if (ret & VM_FAULT_RETRY) {
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
if (hugepage_vma_revalidate(mm, address, &vmf.vma)) {
/* vma is no longer available, don't continue to swapin */
trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
@@ -970,7 +970,7 @@ static void collapse_huge_page(struct mm_struct *mm,
* sync compaction, and we do not need to hold the mmap_sem during
* that. We will recheck the vma after taking it again in write mode.
*/
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
new_page = khugepaged_alloc_page(hpage, gfp, node);
if (!new_page) {
result = SCAN_ALLOC_HUGE_PAGE_FAIL;
@@ -982,11 +982,11 @@ static void collapse_huge_page(struct mm_struct *mm,
goto out_nolock;
}
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
result = hugepage_vma_revalidate(mm, address, &vma);
if (result) {
mem_cgroup_cancel_charge(new_page, memcg, true);
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
goto out_nolock;
}
@@ -994,7 +994,7 @@ static void collapse_huge_page(struct mm_struct *mm,
if (!pmd) {
result = SCAN_PMD_NULL;
mem_cgroup_cancel_charge(new_page, memcg, true);
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
goto out_nolock;
}
@@ -1005,17 +1005,17 @@ static void collapse_huge_page(struct mm_struct *mm,
*/
if (!__collapse_huge_page_swapin(mm, vma, address, pmd, referenced)) {
mem_cgroup_cancel_charge(new_page, memcg, true);
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
goto out_nolock;
}
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
/*
* Prevent all access to pagetables with the exception of
* gup_fast later handled by the ptep_clear_flush and the VM
* handled by the anon_vma lock + PG_lock.
*/
- down_write(&mm->mmap_sem);
+ mm_write_lock(mm);
result = SCAN_ANY_PROCESS;
if (!mmget_still_valid(mm))
goto out;
@@ -1103,7 +1103,7 @@ static void collapse_huge_page(struct mm_struct *mm,
khugepaged_pages_collapsed++;
result = SCAN_SUCCEED;
out_up_write:
- up_write(&mm->mmap_sem);
+ mm_write_unlock(mm);
out_nolock:
trace_mm_collapse_huge_page(mm, isolated, result);
return;
@@ -1399,7 +1399,7 @@ static int khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
if (likely(mm_slot->nr_pte_mapped_thp == 0))
return 0;
- if (!down_write_trylock(&mm->mmap_sem))
+ if (!mm_write_trylock(mm))
return -EBUSY;
if (unlikely(khugepaged_test_exit(mm)))
@@ -1410,7 +1410,7 @@ static int khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
out:
mm_slot->nr_pte_mapped_thp = 0;
- up_write(&mm->mmap_sem);
+ mm_write_unlock(mm);
return 0;
}
@@ -1455,12 +1455,12 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
* mmap_sem while holding page lock. Fault path does it in
* reverse order. Trylock is a way to avoid deadlock.
*/
- if (down_write_trylock(&vma->vm_mm->mmap_sem)) {
+ if (mm_write_trylock(vma->vm_mm)) {
spinlock_t *ptl = pmd_lock(vma->vm_mm, pmd);
/* assume page table is clear */
_pmd = pmdp_collapse_flush(vma, addr, pmd);
spin_unlock(ptl);
- up_write(&vma->vm_mm->mmap_sem);
+ mm_write_unlock(vma->vm_mm);
mm_dec_nr_ptes(vma->vm_mm);
pte_free(vma->vm_mm, pmd_pgtable(_pmd));
} else {
@@ -1947,7 +1947,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
* the next mm on the list.
*/
vma = NULL;
- if (unlikely(!down_read_trylock(&mm->mmap_sem)))
+ if (unlikely(!mm_read_trylock(mm)))
goto breakouterloop_mmap_sem;
if (likely(!khugepaged_test_exit(mm)))
vma = find_vma(mm, khugepaged_scan.address);
@@ -1994,7 +1994,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
&& !shmem_huge_enabled(vma))
goto skip;
file = get_file(vma->vm_file);
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
ret = 1;
khugepaged_scan_file(mm, file, pgoff, hpage);
fput(file);
@@ -2014,7 +2014,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
}
}
breakouterloop:
- up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */
+ mm_read_unlock(mm); /* exit_mmap will destroy ptes after this */
breakouterloop_mmap_sem:
spin_lock(&khugepaged_mm_lock);
@@ -542,11 +542,11 @@ static void break_cow(struct rmap_item *rmap_item)
*/
put_anon_vma(rmap_item->anon_vma);
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
vma = find_mergeable_vma(mm, addr);
if (vma)
break_ksm(vma, addr);
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
}
static struct page *get_mergeable_page(struct rmap_item *rmap_item)
@@ -556,7 +556,7 @@ static struct page *get_mergeable_page(struct rmap_item *rmap_item)
struct vm_area_struct *vma;
struct page *page;
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
vma = find_mergeable_vma(mm, addr);
if (!vma)
goto out;
@@ -572,7 +572,7 @@ static struct page *get_mergeable_page(struct rmap_item *rmap_item)
out:
page = NULL;
}
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
return page;
}
@@ -976,7 +976,7 @@ static int unmerge_and_remove_all_rmap_items(void)
for (mm_slot = ksm_scan.mm_slot;
mm_slot != &ksm_mm_head; mm_slot = ksm_scan.mm_slot) {
mm = mm_slot->mm;
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
for (vma = mm->mmap; vma; vma = vma->vm_next) {
if (ksm_test_exit(mm))
break;
@@ -989,7 +989,7 @@ static int unmerge_and_remove_all_rmap_items(void)
}
remove_trailing_rmap_items(mm_slot, &mm_slot->rmap_list);
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
spin_lock(&ksm_mmlist_lock);
ksm_scan.mm_slot = list_entry(mm_slot->mm_list.next,
@@ -1012,7 +1012,7 @@ static int unmerge_and_remove_all_rmap_items(void)
return 0;
error:
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
spin_lock(&ksm_mmlist_lock);
ksm_scan.mm_slot = &ksm_mm_head;
spin_unlock(&ksm_mmlist_lock);
@@ -1280,7 +1280,7 @@ static int try_to_merge_with_ksm_page(struct rmap_item *rmap_item,
struct vm_area_struct *vma;
int err = -EFAULT;
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
vma = find_mergeable_vma(mm, rmap_item->address);
if (!vma)
goto out;
@@ -1296,7 +1296,7 @@ static int try_to_merge_with_ksm_page(struct rmap_item *rmap_item,
rmap_item->anon_vma = vma->anon_vma;
get_anon_vma(vma->anon_vma);
out:
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
return err;
}
@@ -2110,11 +2110,11 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
if (ksm_use_zero_pages && (checksum == zero_checksum)) {
struct vm_area_struct *vma;
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
vma = find_mergeable_vma(mm, rmap_item->address);
err = try_to_merge_one_page(vma, page,
ZERO_PAGE(rmap_item->address));
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
/*
* In case of failure, the page was not really empty, so we
* need to continue. Otherwise we're done.
@@ -2277,7 +2277,7 @@ static struct rmap_item *scan_get_next_rmap_item(struct page **page)
}
mm = slot->mm;
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
if (ksm_test_exit(mm))
vma = NULL;
else
@@ -2311,7 +2311,7 @@ static struct rmap_item *scan_get_next_rmap_item(struct page **page)
ksm_scan.address += PAGE_SIZE;
} else
put_page(*page);
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
return rmap_item;
}
put_page(*page);
@@ -2349,10 +2349,10 @@ static struct rmap_item *scan_get_next_rmap_item(struct page **page)
free_mm_slot(slot);
clear_bit(MMF_VM_MERGEABLE, &mm->flags);
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
mmdrop(mm);
} else {
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
/*
* up_read(&mm->mmap_sem) first because after
* spin_unlock(&ksm_mmlist_lock) run, the "mm" may
@@ -2552,8 +2552,8 @@ void __ksm_exit(struct mm_struct *mm)
clear_bit(MMF_VM_MERGEABLE, &mm->flags);
mmdrop(mm);
} else if (mm_slot) {
- down_write(&mm->mmap_sem);
- up_write(&mm->mmap_sem);
+ mm_write_lock(mm);
+ mm_write_unlock(mm);
}
}
@@ -288,12 +288,12 @@ static long madvise_willneed(struct vm_area_struct *vma,
*/
*prev = NULL; /* tell sys_madvise we drop mmap_sem */
get_file(file);
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm);
offset = (loff_t)(start - vma->vm_start)
+ ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
vfs_fadvise(file, offset, end - start, POSIX_FADV_WILLNEED);
fput(file);
- down_read(¤t->mm->mmap_sem);
+ mm_read_lock(current->mm);
return 0;
}
@@ -763,7 +763,7 @@ static long madvise_dontneed_free(struct vm_area_struct *vma,
if (!userfaultfd_remove(vma, start, end)) {
*prev = NULL; /* mmap_sem has been dropped, prev is stale */
- down_read(¤t->mm->mmap_sem);
+ mm_read_lock(current->mm);
vma = find_vma(current->mm, start);
if (!vma)
return -ENOMEM;
@@ -845,13 +845,13 @@ static long madvise_remove(struct vm_area_struct *vma,
get_file(f);
if (userfaultfd_remove(vma, start, end)) {
/* mmap_sem was not released by userfaultfd_remove() */
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm);
}
error = vfs_fallocate(f,
FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
offset, end - start);
fput(f);
- down_read(¤t->mm->mmap_sem);
+ mm_read_lock(current->mm);
return error;
}
@@ -1082,10 +1082,10 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
write = madvise_need_mmap_write(behavior);
if (write) {
- if (down_write_killable(¤t->mm->mmap_sem))
+ if (mm_write_lock_killable(current->mm))
return -EINTR;
} else {
- down_read(¤t->mm->mmap_sem);
+ mm_read_lock(current->mm);
}
/*
@@ -1135,9 +1135,9 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
out:
blk_finish_plug(&plug);
if (write)
- up_write(¤t->mm->mmap_sem);
+ mm_write_unlock(current->mm);
else
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm);
return error;
}
@@ -5533,9 +5533,9 @@ static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
{
unsigned long precharge;
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
walk_page_range(mm, 0, mm->highest_vm_end, &precharge_walk_ops, NULL);
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
precharge = mc.precharge;
mc.precharge = 0;
@@ -5818,7 +5818,7 @@ static void mem_cgroup_move_charge(void)
atomic_inc(&mc.from->moving_account);
synchronize_rcu();
retry:
- if (unlikely(!down_read_trylock(&mc.mm->mmap_sem))) {
+ if (unlikely(!mm_read_trylock(mc.mm))) {
/*
* Someone who are holding the mmap_sem might be waiting in
* waitq. So we cancel all extra charges, wake up all waiters,
@@ -5837,7 +5837,7 @@ static void mem_cgroup_move_charge(void)
walk_page_range(mc.mm, 0, mc.mm->highest_vm_end, &charge_walk_ops,
NULL);
- up_read(&mc.mm->mmap_sem);
+ mm_read_unlock(mc.mm);
atomic_dec(&mc.from->moving_account);
}
@@ -1202,7 +1202,7 @@ static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
next = pud_addr_end(addr, end);
if (pud_trans_huge(*pud) || pud_devmap(*pud)) {
if (next - addr != HPAGE_PUD_SIZE) {
- VM_BUG_ON_VMA(!rwsem_is_locked(&tlb->mm->mmap_sem), vma);
+ VM_BUG_ON_VMA(!mm_is_locked(tlb->mm), vma);
split_huge_pud(vma, pud, addr);
} else if (zap_huge_pud(tlb, vma, pud, addr))
goto next;
@@ -1507,7 +1507,7 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
if (!page_count(page))
return -EINVAL;
if (!(vma->vm_flags & VM_MIXEDMAP)) {
- BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
+ BUG_ON(mm_read_trylock(vma->vm_mm));
BUG_ON(vma->vm_flags & VM_PFNMAP);
vma->vm_flags |= VM_MIXEDMAP;
}
@@ -4447,7 +4447,7 @@ int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
void *old_buf = buf;
int write = gup_flags & FOLL_WRITE;
- if (down_read_killable(&mm->mmap_sem))
+ if (mm_read_lock_killable(mm))
return 0;
/* ignore errors, just check how much was successfully transferred */
@@ -4498,7 +4498,7 @@ int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
buf += bytes;
addr += bytes;
}
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
return buf - old_buf;
}
@@ -4555,7 +4555,7 @@ void print_vma_addr(char *prefix, unsigned long ip)
/*
* we might be running from an atomic context so we cannot sleep
*/
- if (!down_read_trylock(&mm->mmap_sem))
+ if (!mm_read_trylock(mm))
return;
vma = find_vma(mm, ip);
@@ -4574,7 +4574,7 @@ void print_vma_addr(char *prefix, unsigned long ip)
free_page((unsigned long)buf);
}
}
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
}
#if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP)
@@ -379,10 +379,10 @@ void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
{
struct vm_area_struct *vma;
- down_write(&mm->mmap_sem);
+ mm_write_lock(mm);
for (vma = mm->mmap; vma; vma = vma->vm_next)
mpol_rebind_policy(vma->vm_policy, new);
- up_write(&mm->mmap_sem);
+ mm_write_unlock(mm);
}
static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
@@ -878,7 +878,7 @@ static int lookup_node(struct mm_struct *mm, unsigned long addr)
put_page(p);
}
if (locked)
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
return err;
}
@@ -911,10 +911,10 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask,
* vma/shared policy at addr is NULL. We
* want to return MPOL_DEFAULT in this case.
*/
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
vma = find_vma_intersection(mm, addr, addr+1);
if (!vma) {
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
return -EFAULT;
}
if (vma->vm_ops && vma->vm_ops->get_policy)
@@ -973,7 +973,7 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask,
out:
mpol_cond_put(pol);
if (vma)
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
if (pol_refcount)
mpol_put(pol_refcount);
return err;
@@ -1082,7 +1082,7 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
if (err)
return err;
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
/*
* Find a 'source' bit set in 'tmp' whose corresponding 'dest'
@@ -1163,7 +1163,7 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
if (err < 0)
break;
}
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
if (err < 0)
return err;
return busy;
@@ -1286,12 +1286,12 @@ static long do_mbind(unsigned long start, unsigned long len,
{
NODEMASK_SCRATCH(scratch);
if (scratch) {
- down_write(&mm->mmap_sem);
+ mm_write_lock(mm);
task_lock(current);
err = mpol_set_nodemask(new, nmask, scratch);
task_unlock(current);
if (err)
- up_write(&mm->mmap_sem);
+ mm_write_unlock(mm);
} else
err = -ENOMEM;
NODEMASK_SCRATCH_FREE(scratch);
@@ -1328,7 +1328,7 @@ static long do_mbind(unsigned long start, unsigned long len,
putback_movable_pages(&pagelist);
}
- up_write(&mm->mmap_sem);
+ mm_write_unlock(mm);
mpol_out:
mpol_put(new);
return err;
@@ -1526,7 +1526,7 @@ static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
unsigned int follflags;
int err;
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
err = -EFAULT;
vma = find_vma(mm, addr);
if (!vma || addr < vma->vm_start || !vma_migratable(vma))
@@ -1579,7 +1579,7 @@ static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
*/
put_page(page);
out:
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
return err;
}
@@ -1690,7 +1690,7 @@ static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
{
unsigned long i;
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
for (i = 0; i < nr_pages; i++) {
unsigned long addr = (unsigned long)(*pages);
@@ -1717,7 +1717,7 @@ static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
status++;
}
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
}
/*
@@ -283,9 +283,9 @@ SYSCALL_DEFINE3(mincore, unsigned long, start, size_t, len,
* Do at most PAGE_SIZE entries per iteration, due to
* the temporary buffer size.
*/
- down_read(¤t->mm->mmap_sem);
+ mm_read_lock(current->mm);
retval = do_mincore(start, min(pages, PAGE_SIZE), tmp);
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm);
if (retval <= 0)
break;
@@ -686,7 +686,7 @@ static __must_check int do_mlock(unsigned long start, size_t len, vm_flags_t fla
lock_limit >>= PAGE_SHIFT;
locked = len >> PAGE_SHIFT;
- if (down_write_killable(¤t->mm->mmap_sem))
+ if (mm_write_lock_killable(current->mm))
return -EINTR;
locked += current->mm->locked_vm;
@@ -705,7 +705,7 @@ static __must_check int do_mlock(unsigned long start, size_t len, vm_flags_t fla
if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
error = apply_vma_lock_flags(start, len, flags);
- up_write(¤t->mm->mmap_sem);
+ mm_write_unlock(current->mm);
if (error)
return error;
@@ -742,10 +742,10 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
len = PAGE_ALIGN(len + (offset_in_page(start)));
start &= PAGE_MASK;
- if (down_write_killable(¤t->mm->mmap_sem))
+ if (mm_write_lock_killable(current->mm))
return -EINTR;
ret = apply_vma_lock_flags(start, len, 0);
- up_write(¤t->mm->mmap_sem);
+ mm_write_unlock(current->mm);
return ret;
}
@@ -811,14 +811,14 @@ SYSCALL_DEFINE1(mlockall, int, flags)
lock_limit = rlimit(RLIMIT_MEMLOCK);
lock_limit >>= PAGE_SHIFT;
- if (down_write_killable(¤t->mm->mmap_sem))
+ if (mm_write_lock_killable(current->mm))
return -EINTR;
ret = -ENOMEM;
if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
capable(CAP_IPC_LOCK))
ret = apply_mlockall_flags(flags);
- up_write(¤t->mm->mmap_sem);
+ mm_write_unlock(current->mm);
if (!ret && (flags & MCL_CURRENT))
mm_populate(0, TASK_SIZE);
@@ -829,10 +829,10 @@ SYSCALL_DEFINE0(munlockall)
{
int ret;
- if (down_write_killable(¤t->mm->mmap_sem))
+ if (mm_write_lock_killable(current->mm))
return -EINTR;
ret = apply_mlockall_flags(0);
- up_write(¤t->mm->mmap_sem);
+ mm_write_unlock(current->mm);
return ret;
}
@@ -197,7 +197,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
brk = untagged_addr(brk);
- if (down_write_killable(&mm->mmap_sem))
+ if (mm_write_lock_killable(mm))
return -EINTR;
origbrk = mm->brk;
@@ -271,9 +271,9 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
success:
populate = newbrk > oldbrk && (mm->def_flags & VM_LOCKED) != 0;
if (downgraded)
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
else
- up_write(&mm->mmap_sem);
+ mm_write_unlock(mm);
userfaultfd_unmap_complete(mm, &uf);
if (populate)
mm_populate(oldbrk, newbrk - oldbrk);
@@ -281,7 +281,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
out:
retval = origbrk;
- up_write(&mm->mmap_sem);
+ mm_write_unlock(mm);
return retval;
}
@@ -2812,7 +2812,7 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
detach_vmas_to_be_unmapped(mm, vma, prev, end);
if (downgrade)
- downgrade_write(&mm->mmap_sem);
+ mm_downgrade_write_lock(mm);
unmap_region(mm, vma, prev, start, end);
@@ -2834,7 +2834,7 @@ static int __vm_munmap(unsigned long start, size_t len, bool downgrade)
struct mm_struct *mm = current->mm;
LIST_HEAD(uf);
- if (down_write_killable(&mm->mmap_sem))
+ if (mm_write_lock_killable(mm))
return -EINTR;
ret = __do_munmap(mm, start, len, &uf, downgrade);
@@ -2844,10 +2844,10 @@ static int __vm_munmap(unsigned long start, size_t len, bool downgrade)
* it to 0 before return.
*/
if (ret == 1) {
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
ret = 0;
} else
- up_write(&mm->mmap_sem);
+ mm_write_unlock(mm);
userfaultfd_unmap_complete(mm, &uf);
return ret;
@@ -2895,7 +2895,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
if (pgoff + (size >> PAGE_SHIFT) < pgoff)
return ret;
- if (down_write_killable(&mm->mmap_sem))
+ if (mm_write_lock_killable(mm))
return -EINTR;
vma = find_vma(mm, start);
@@ -2958,7 +2958,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
prot, flags, pgoff, &populate, NULL);
fput(file);
out:
- up_write(&mm->mmap_sem);
+ mm_write_unlock(mm);
if (populate)
mm_populate(ret, populate);
if (!IS_ERR_VALUE(ret))
@@ -3058,12 +3058,12 @@ int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags)
if (!len)
return 0;
- if (down_write_killable(&mm->mmap_sem))
+ if (mm_write_lock_killable(mm))
return -EINTR;
ret = do_brk_flags(addr, len, flags, &uf);
populate = ((mm->def_flags & VM_LOCKED) != 0);
- up_write(&mm->mmap_sem);
+ mm_write_unlock(mm);
userfaultfd_unmap_complete(mm, &uf);
if (populate && !ret)
mm_populate(addr, len);
@@ -3107,8 +3107,8 @@ void exit_mmap(struct mm_struct *mm)
(void)__oom_reap_task_mm(mm);
set_bit(MMF_OOM_SKIP, &mm->flags);
- down_write(&mm->mmap_sem);
- up_write(&mm->mmap_sem);
+ mm_write_lock(mm);
+ mm_write_unlock(mm);
}
if (mm->locked_vm) {
@@ -3532,7 +3532,7 @@ int mm_take_all_locks(struct mm_struct *mm)
struct vm_area_struct *vma;
struct anon_vma_chain *avc;
- BUG_ON(down_read_trylock(&mm->mmap_sem));
+ BUG_ON(mm_read_trylock(mm));
mutex_lock(&mm_all_locks_mutex);
@@ -3612,7 +3612,7 @@ void mm_drop_all_locks(struct mm_struct *mm)
struct vm_area_struct *vma;
struct anon_vma_chain *avc;
- BUG_ON(down_read_trylock(&mm->mmap_sem));
+ BUG_ON(mm_read_trylock(mm));
BUG_ON(!mutex_is_locked(&mm_all_locks_mutex));
for (vma = mm->mmap; vma; vma = vma->vm_next) {
@@ -665,9 +665,9 @@ int mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm)
{
int ret;
- down_write(&mm->mmap_sem);
+ mm_write_lock(mm);
ret = __mmu_notifier_register(mn, mm);
- up_write(&mm->mmap_sem);
+ mm_write_unlock(mm);
return ret;
}
EXPORT_SYMBOL_GPL(mmu_notifier_register);
@@ -478,7 +478,7 @@ static int do_mprotect_pkey(unsigned long start, size_t len,
reqprot = prot;
- if (down_write_killable(¤t->mm->mmap_sem))
+ if (mm_write_lock_killable(current->mm))
return -EINTR;
/*
@@ -568,7 +568,7 @@ static int do_mprotect_pkey(unsigned long start, size_t len,
prot = reqprot;
}
out:
- up_write(¤t->mm->mmap_sem);
+ mm_write_unlock(current->mm);
return error;
}
@@ -598,7 +598,7 @@ SYSCALL_DEFINE2(pkey_alloc, unsigned long, flags, unsigned long, init_val)
if (init_val & ~PKEY_ACCESS_MASK)
return -EINVAL;
- down_write(¤t->mm->mmap_sem);
+ mm_write_lock(current->mm);
pkey = mm_pkey_alloc(current->mm);
ret = -ENOSPC;
@@ -612,7 +612,7 @@ SYSCALL_DEFINE2(pkey_alloc, unsigned long, flags, unsigned long, init_val)
}
ret = pkey;
out:
- up_write(¤t->mm->mmap_sem);
+ mm_write_unlock(current->mm);
return ret;
}
@@ -620,9 +620,9 @@ SYSCALL_DEFINE1(pkey_free, int, pkey)
{
int ret;
- down_write(¤t->mm->mmap_sem);
+ mm_write_lock(current->mm);
ret = mm_pkey_free(current->mm, pkey);
- up_write(¤t->mm->mmap_sem);
+ mm_write_unlock(current->mm);
/*
* We could provie warnings or errors if any VMA still
@@ -629,7 +629,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
if (!new_len)
return ret;
- if (down_write_killable(¤t->mm->mmap_sem))
+ if (mm_write_lock_killable(current->mm))
return -EINTR;
if (flags & MREMAP_FIXED) {
@@ -720,9 +720,9 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
locked = 0;
}
if (downgraded)
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm);
else
- up_write(¤t->mm->mmap_sem);
+ mm_write_unlock(current->mm);
if (locked && new_len > old_len)
mm_populate(new_addr + old_len, new_len - old_len);
userfaultfd_unmap_complete(mm, &uf_unmap_early);
@@ -57,7 +57,7 @@ SYSCALL_DEFINE3(msync, unsigned long, start, size_t, len, int, flags)
* If the interval [start,end) covers some unmapped address ranges,
* just ignore them, but return -ENOMEM at the end.
*/
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
vma = find_vma(mm, start);
for (;;) {
struct file *file;
@@ -88,12 +88,12 @@ SYSCALL_DEFINE3(msync, unsigned long, start, size_t, len, int, flags)
if ((flags & MS_SYNC) && file &&
(vma->vm_flags & VM_SHARED)) {
get_file(file);
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
error = vfs_fsync_range(file, fstart, fend, 1);
fput(file);
if (error || start >= end)
goto out;
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
vma = find_vma(mm, start);
} else {
if (start >= end) {
@@ -104,7 +104,7 @@ SYSCALL_DEFINE3(msync, unsigned long, start, size_t, len, int, flags)
}
}
out_unlock:
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
out:
return error ? : unmapped_error;
}
@@ -163,11 +163,11 @@ static void *__vmalloc_user_flags(unsigned long size, gfp_t flags)
if (ret) {
struct vm_area_struct *vma;
- down_write(¤t->mm->mmap_sem);
+ mm_write_lock(current->mm);
vma = find_vma(current->mm, (unsigned long)ret);
if (vma)
vma->vm_flags |= VM_USERMAP;
- up_write(¤t->mm->mmap_sem);
+ mm_write_unlock(current->mm);
}
return ret;
@@ -1548,9 +1548,9 @@ int vm_munmap(unsigned long addr, size_t len)
struct mm_struct *mm = current->mm;
int ret;
- down_write(&mm->mmap_sem);
+ mm_write_lock(mm);
ret = do_munmap(mm, addr, len, NULL);
- up_write(&mm->mmap_sem);
+ mm_write_unlock(mm);
return ret;
}
EXPORT_SYMBOL(vm_munmap);
@@ -1637,9 +1637,9 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
{
unsigned long ret;
- down_write(¤t->mm->mmap_sem);
+ mm_write_lock(current->mm);
ret = do_mremap(addr, old_len, new_len, flags, new_addr);
- up_write(¤t->mm->mmap_sem);
+ mm_write_unlock(current->mm);
return ret;
}
@@ -1711,7 +1711,7 @@ int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
struct vm_area_struct *vma;
int write = gup_flags & FOLL_WRITE;
- if (down_read_killable(&mm->mmap_sem))
+ if (mm_read_lock_killable(mm))
return 0;
/* the access must start within one of the target process's mappings */
@@ -1734,7 +1734,7 @@ int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
len = 0;
}
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
return len;
}
@@ -568,7 +568,7 @@ static bool oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
{
bool ret = true;
- if (!down_read_trylock(&mm->mmap_sem)) {
+ if (!mm_read_trylock(mm)) {
trace_skip_task_reaping(tsk->pid);
return false;
}
@@ -599,7 +599,7 @@ static bool oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
out_finish:
trace_finish_task_reaping(tsk->pid);
out_unlock:
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
return ret;
}
@@ -105,11 +105,11 @@ static int process_vm_rw_single_vec(unsigned long addr,
* access remotely because task/mm might not
* current/current->mm
*/
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
pages = get_user_pages_remote(task, mm, pa, pages, flags,
process_pages, NULL, &locked);
if (locked)
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
if (pages <= 0)
return -EFAULT;
@@ -2070,7 +2070,7 @@ static int unuse_mm(struct mm_struct *mm, unsigned int type,
struct vm_area_struct *vma;
int ret = 0;
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
for (vma = mm->mmap; vma; vma = vma->vm_next) {
if (vma->anon_vma) {
ret = unuse_vma(vma, type, frontswap,
@@ -2080,7 +2080,7 @@ static int unuse_mm(struct mm_struct *mm, unsigned int type,
}
cond_resched();
}
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
return ret;
}
@@ -226,7 +226,7 @@ static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
* feature is not supported.
*/
if (zeropage) {
- up_read(&dst_mm->mmap_sem);
+ mm_read_unlock(dst_mm);
return -EINVAL;
}
@@ -306,7 +306,7 @@ static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
cond_resched();
if (unlikely(err == -ENOENT)) {
- up_read(&dst_mm->mmap_sem);
+ mm_read_unlock(dst_mm);
BUG_ON(!page);
err = copy_huge_page_from_user(page,
@@ -317,7 +317,7 @@ static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
err = -EFAULT;
goto out;
}
- down_read(&dst_mm->mmap_sem);
+ mm_read_lock(dst_mm);
dst_vma = NULL;
goto retry;
@@ -337,7 +337,7 @@ static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
}
out_unlock:
- up_read(&dst_mm->mmap_sem);
+ mm_read_unlock(dst_mm);
out:
if (page) {
/*
@@ -471,7 +471,7 @@ static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm,
copied = 0;
page = NULL;
retry:
- down_read(&dst_mm->mmap_sem);
+ mm_read_lock(dst_mm);
/*
* If memory mappings are changing because of non-cooperative
@@ -561,7 +561,7 @@ static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm,
if (unlikely(err == -ENOENT)) {
void *page_kaddr;
- up_read(&dst_mm->mmap_sem);
+ mm_read_unlock(dst_mm);
BUG_ON(!page);
page_kaddr = kmap(page);
@@ -590,7 +590,7 @@ static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm,
}
out_unlock:
- up_read(&dst_mm->mmap_sem);
+ mm_read_unlock(dst_mm);
out:
if (page)
put_page(page);
@@ -481,10 +481,10 @@ int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc)
if (pages == 0 || !mm)
return 0;
- down_write(&mm->mmap_sem);
+ mm_write_lock(mm);
ret = __account_locked_vm(mm, pages, inc, current,
capable(CAP_IPC_LOCK));
- up_write(&mm->mmap_sem);
+ mm_write_unlock(mm);
return ret;
}
@@ -501,11 +501,11 @@ unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
ret = security_mmap_file(file, prot, flag);
if (!ret) {
- if (down_write_killable(&mm->mmap_sem))
+ if (mm_write_lock_killable(mm))
return -EINTR;
ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff,
&populate, &uf);
- up_write(&mm->mmap_sem);
+ mm_write_unlock(mm);
userfaultfd_unmap_complete(mm, &uf);
if (populate)
mm_populate(ret, populate);
@@ -1755,7 +1755,7 @@ static int tcp_zerocopy_receive(struct sock *sk,
sock_rps_record_flow(sk);
- down_read(¤t->mm->mmap_sem);
+ mm_read_lock(current->mm);
ret = -EINVAL;
vma = find_vma(current->mm, address);
@@ -1817,7 +1817,7 @@ static int tcp_zerocopy_receive(struct sock *sk,
frags++;
}
out:
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm);
if (length) {
WRITE_ONCE(tp->copied_seq, seq);
tcp_rcv_space_adjust(sk);
@@ -290,10 +290,10 @@ static int xdp_umem_pin_pages(struct xdp_umem *umem)
if (!umem->pgs)
return -ENOMEM;
- down_read(¤t->mm->mmap_sem);
+ mm_read_lock(current->mm);
npgs = get_user_pages(umem->address, umem->npgs,
gup_flags | FOLL_LONGTERM, &umem->pgs[0], NULL);
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm);
if (npgs != umem->npgs) {
if (npgs >= 0) {
@@ -975,7 +975,7 @@ void stage2_unmap_vm(struct kvm *kvm)
int idx;
idx = srcu_read_lock(&kvm->srcu);
- down_read(¤t->mm->mmap_sem);
+ mm_read_lock(current->mm);
spin_lock(&kvm->mmu_lock);
slots = kvm_memslots(kvm);
@@ -983,7 +983,7 @@ void stage2_unmap_vm(struct kvm *kvm)
stage2_unmap_memslot(kvm, memslot);
spin_unlock(&kvm->mmu_lock);
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm);
srcu_read_unlock(&kvm->srcu, idx);
}
@@ -1693,11 +1693,11 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
}
/* Let's check if we will get back a huge page backed by hugetlbfs */
- down_read(¤t->mm->mmap_sem);
+ mm_read_lock(current->mm);
vma = find_vma_intersection(current->mm, hva, hva + 1);
if (unlikely(!vma)) {
kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm);
return -EFAULT;
}
@@ -1719,7 +1719,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
if (vma_pagesize == PMD_SIZE ||
(vma_pagesize == PUD_SIZE && kvm_stage2_has_pmd(kvm)))
gfn = (fault_ipa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT;
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm);
/* We need minimum second+third level pages */
ret = mmu_topup_memory_cache(memcache, kvm_mmu_cache_min_pages(kvm),
@@ -2294,7 +2294,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
(kvm_phys_size(kvm) >> PAGE_SHIFT))
return -EFAULT;
- down_read(¤t->mm->mmap_sem);
+ mm_read_lock(current->mm);
/*
* A memory region could potentially cover multiple VMAs, and any holes
* between them, so iterate over all of them to find out if we can map
@@ -2353,7 +2353,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
stage2_flush_memslot(kvm, memslot);
spin_unlock(&kvm->mmu_lock);
out:
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm);
return ret;
}
@@ -74,11 +74,11 @@ static void async_pf_execute(struct work_struct *work)
* mm and might be done in another context, so we must
* access remotely.
*/
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm);
get_user_pages_remote(NULL, mm, addr, 1, FOLL_WRITE, NULL, NULL,
&locked);
if (locked)
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm);
kvm_async_page_present_sync(vcpu, apf);
@@ -1417,7 +1417,7 @@ unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn)
if (kvm_is_error_hva(addr))
return PAGE_SIZE;
- down_read(¤t->mm->mmap_sem);
+ mm_read_lock(current->mm);
vma = find_vma(current->mm, addr);
if (!vma)
goto out;
@@ -1425,7 +1425,7 @@ unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn)
size = vma_kernel_pagesize(vma);
out:
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm);
return size;
}
@@ -1680,7 +1680,7 @@ static kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
if (npages == 1)
return pfn;
- down_read(¤t->mm->mmap_sem);
+ mm_read_lock(current->mm);
if (npages == -EHWPOISON ||
(!async && check_user_page_hwpoison(addr))) {
pfn = KVM_PFN_ERR_HWPOISON;
@@ -1704,7 +1704,7 @@ static kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
pfn = KVM_PFN_ERR_FAULT;
}
exit:
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm);
return pfn;
}
This change converts the existing mmap_sem rwsem calls to use the new MM locking API instead. The change is generated using coccinelle with the following rules: // spatch --sp-file foo.cocci --in-place --include-headers --dir . @@ expression mm; @@ -init_rwsem(&mm->mmap_sem) +mm_init_lock(mm) @@ expression mm; @@ -down_write(&mm->mmap_sem) +mm_write_lock(mm) @@ expression mm; @@ -down_write_killable(&mm->mmap_sem) +mm_write_lock_killable(mm) @@ expression mm; @@ -down_write_trylock(&mm->mmap_sem) +mm_write_trylock(mm) @@ expression mm; @@ -up_write(&mm->mmap_sem) +mm_write_unlock(mm) @@ expression mm; @@ -downgrade_write(&mm->mmap_sem) +mm_downgrade_write_lock(mm) @@ expression mm; @@ -down_read(&mm->mmap_sem) +mm_read_lock(mm) @@ expression mm; @@ -down_read_killable(&mm->mmap_sem) +mm_read_lock_killable(mm) @@ expression mm; @@ -down_read_trylock(&mm->mmap_sem) +mm_read_trylock(mm) @@ expression mm; @@ -up_read(&mm->mmap_sem) +mm_read_unlock(mm) @@ expression mm; @@ -rwsem_is_locked(&mm->mmap_sem) +mm_is_locked(mm) Signed-off-by: Michel Lespinasse <walken@google.com> --- arch/alpha/kernel/traps.c | 4 +- arch/alpha/mm/fault.c | 10 ++--- arch/arc/kernel/process.c | 4 +- arch/arc/kernel/troubleshoot.c | 4 +- arch/arc/mm/fault.c | 4 +- arch/arm/kernel/process.c | 4 +- arch/arm/kernel/swp_emulate.c | 4 +- arch/arm/lib/uaccess_with_memcpy.c | 16 ++++---- arch/arm/mm/fault.c | 6 +-- arch/arm64/kernel/traps.c | 4 +- arch/arm64/kernel/vdso.c | 8 ++-- arch/arm64/mm/fault.c | 8 ++-- arch/csky/kernel/vdso.c | 4 +- arch/csky/mm/fault.c | 8 ++-- arch/hexagon/kernel/vdso.c | 4 +- arch/hexagon/mm/vm_fault.c | 8 ++-- arch/ia64/kernel/perfmon.c | 8 ++-- arch/ia64/mm/fault.c | 8 ++-- arch/ia64/mm/init.c | 12 +++--- arch/m68k/kernel/sys_m68k.c | 14 +++---- arch/m68k/mm/fault.c | 8 ++-- arch/microblaze/mm/fault.c | 12 +++--- arch/mips/kernel/traps.c | 4 +- arch/mips/kernel/vdso.c | 4 +- arch/nds32/kernel/vdso.c | 6 +-- arch/nds32/mm/fault.c | 12 +++--- arch/nios2/mm/fault.c | 12 +++--- arch/nios2/mm/init.c | 4 +- arch/openrisc/mm/fault.c | 10 ++--- arch/parisc/kernel/traps.c | 6 +-- arch/parisc/mm/fault.c | 8 ++-- arch/powerpc/kernel/vdso.c | 6 +-- arch/powerpc/kvm/book3s_64_mmu_hv.c | 4 +- arch/powerpc/kvm/book3s_hv.c | 6 +-- arch/powerpc/kvm/book3s_hv_uvmem.c | 12 +++--- arch/powerpc/kvm/e500_mmu_host.c | 4 +- arch/powerpc/mm/book3s64/iommu_api.c | 4 +- arch/powerpc/mm/book3s64/subpage_prot.c | 12 +++--- arch/powerpc/mm/copro_fault.c | 4 +- arch/powerpc/mm/fault.c | 12 +++--- arch/powerpc/oprofile/cell/spu_task_sync.c | 6 +-- arch/powerpc/platforms/cell/spufs/file.c | 4 +- arch/riscv/kernel/vdso.c | 4 +- arch/riscv/mm/fault.c | 10 ++--- arch/s390/kernel/vdso.c | 4 +- arch/s390/kvm/gaccess.c | 4 +- arch/s390/kvm/kvm-s390.c | 24 +++++------ arch/s390/kvm/priv.c | 32 +++++++-------- arch/s390/mm/fault.c | 6 +-- arch/s390/mm/gmap.c | 40 +++++++++---------- arch/s390/pci/pci_mmio.c | 4 +- arch/sh/kernel/sys_sh.c | 6 +-- arch/sh/kernel/vsyscall/vsyscall.c | 4 +- arch/sh/mm/fault.c | 14 +++---- arch/sparc/mm/fault_32.c | 18 ++++----- arch/sparc/mm/fault_64.c | 12 +++--- arch/sparc/vdso/vma.c | 4 +- arch/um/include/asm/mmu_context.h | 2 +- arch/um/kernel/tlb.c | 2 +- arch/um/kernel/trap.c | 6 +-- arch/unicore32/mm/fault.c | 6 +-- arch/x86/entry/vdso/vma.c | 10 ++--- arch/x86/kernel/vm86_32.c | 4 +- arch/x86/mm/debug_pagetables.c | 8 ++-- arch/x86/mm/fault.c | 8 ++-- arch/x86/mm/mpx.c | 12 +++--- arch/x86/um/vdso/vma.c | 4 +- arch/xtensa/mm/fault.c | 10 ++--- drivers/android/binder_alloc.c | 6 +-- .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 4 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 10 ++--- drivers/gpu/drm/amd/amdkfd/kfd_events.c | 4 +- drivers/gpu/drm/i915/gem/i915_gem_mman.c | 4 +- drivers/gpu/drm/i915/gem/i915_gem_userptr.c | 8 ++-- drivers/gpu/drm/nouveau/nouveau_svm.c | 20 +++++----- drivers/gpu/drm/radeon/radeon_cs.c | 4 +- drivers/gpu/drm/radeon/radeon_gem.c | 6 +-- drivers/gpu/drm/ttm/ttm_bo_vm.c | 4 +- drivers/infiniband/core/umem.c | 6 +-- drivers/infiniband/core/umem_odp.c | 10 ++--- drivers/infiniband/core/uverbs_main.c | 4 +- drivers/infiniband/hw/mlx4/mr.c | 4 +- drivers/infiniband/hw/qib/qib_user_pages.c | 6 +-- drivers/infiniband/hw/usnic/usnic_uiom.c | 4 +- drivers/infiniband/sw/siw/siw_mem.c | 4 +- drivers/iommu/amd_iommu_v2.c | 4 +- drivers/iommu/intel-svm.c | 4 +- drivers/media/v4l2-core/videobuf-core.c | 4 +- drivers/media/v4l2-core/videobuf-dma-contig.c | 4 +- drivers/media/v4l2-core/videobuf-dma-sg.c | 4 +- drivers/misc/cxl/cxllib.c | 4 +- drivers/misc/cxl/fault.c | 4 +- drivers/misc/sgi-gru/grufault.c | 16 ++++---- drivers/misc/sgi-gru/grufile.c | 4 +- drivers/oprofile/buffer_sync.c | 10 ++--- drivers/staging/kpc2000/kpc_dma/fileops.c | 4 +- drivers/tee/optee/call.c | 4 +- drivers/vfio/vfio_iommu_type1.c | 12 +++--- drivers/xen/gntdev.c | 4 +- drivers/xen/privcmd.c | 14 +++---- fs/aio.c | 4 +- fs/coredump.c | 4 +- fs/exec.c | 16 ++++---- fs/io_uring.c | 4 +- fs/proc/base.c | 12 +++--- fs/proc/task_mmu.c | 28 ++++++------- fs/proc/task_nommu.c | 18 ++++----- fs/userfaultfd.c | 28 ++++++------- include/linux/mmu_notifier.h | 5 ++- ipc/shm.c | 8 ++-- kernel/acct.c | 4 +- kernel/bpf/stackmap.c | 4 +- kernel/events/core.c | 4 +- kernel/events/uprobes.c | 16 ++++---- kernel/exit.c | 8 ++-- kernel/fork.c | 12 +++--- kernel/futex.c | 4 +- kernel/sched/fair.c | 4 +- kernel/sys.c | 18 ++++----- kernel/trace/trace_output.c | 4 +- mm/filemap.c | 6 +-- mm/frame_vector.c | 4 +- mm/gup.c | 20 +++++----- mm/internal.h | 2 +- mm/khugepaged.c | 36 ++++++++--------- mm/ksm.c | 34 ++++++++-------- mm/madvise.c | 18 ++++----- mm/memcontrol.c | 8 ++-- mm/memory.c | 12 +++--- mm/mempolicy.c | 22 +++++----- mm/migrate.c | 8 ++-- mm/mincore.c | 4 +- mm/mlock.c | 16 ++++---- mm/mmap.c | 32 +++++++-------- mm/mmu_notifier.c | 4 +- mm/mprotect.c | 12 +++--- mm/mremap.c | 6 +-- mm/msync.c | 8 ++-- mm/nommu.c | 16 ++++---- mm/oom_kill.c | 4 +- mm/process_vm_access.c | 4 +- mm/swapfile.c | 4 +- mm/userfaultfd.c | 14 +++---- mm/util.c | 8 ++-- net/ipv4/tcp.c | 4 +- net/xdp/xdp_umem.c | 4 +- virt/kvm/arm/mmu.c | 14 +++---- virt/kvm/async_pf.c | 4 +- virt/kvm/kvm_main.c | 8 ++-- 149 files changed, 653 insertions(+), 652 deletions(-)