@@ -299,11 +299,11 @@ static inline void ll_trunc_sem_init(struct ll_trunc_sem *sem)
*
* We must take lli_trunc_sem in read mode on entry in to various i/o paths
* in Lustre, in order to exclude truncates. Some of these paths then need to
- * take the mmap_sem, while still holding the trunc_sem. The problem is that
- * page faults hold the mmap_sem when calling in to Lustre, and then must also
+ * take the mmap_lock, while still holding the trunc_sem. The problem is that
+ * page faults hold the mmap_lock when calling in to Lustre, and then must also
* take the trunc_sem to exclude truncate.
*
- * This means the locking order for trunc_sem and mmap_sem is sometimes AB,
+ * This means the locking order for trunc_sem and mmap_lock is sometimes AB,
* sometimes BA. This is almost OK because in both cases, we take the trunc
* sem for read, so it doesn't block.
*
@@ -313,9 +313,9 @@ static inline void ll_trunc_sem_init(struct ll_trunc_sem *sem)
*
* So we have, on our truncate sem, in order (where 'reader' and 'writer' refer
* to the mode in which they take the semaphore):
- * reader (holding mmap_sem, needs truncate_sem)
+ * reader (holding mmap_lock, needs truncate_sem)
* writer
- * reader (holding truncate sem, waiting for mmap_sem)
+ * reader (holding truncate sem, waiting for mmap_lock)
*
* And so the readers deadlock.
*
@@ -325,7 +325,7 @@ static inline void ll_trunc_sem_init(struct ll_trunc_sem *sem)
* of the order they arrived in.
*
* down_read_nowait is only used in the page fault case, where we already hold
- * the mmap_sem. This is because otherwise repeated read and write operations
+ * the mmap_lock. This is because otherwise repeated read and write operations
* (which take the truncate sem) could prevent a truncate from ever starting.
* This could still happen with page faults, but without an even more complex
* mechanism, this is unavoidable.
@@ -63,8 +63,8 @@ struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr,
{
struct vm_area_struct *vma, *ret = NULL;
- /* mmap_sem must have been held by caller. */
- LASSERT(!down_write_trylock(&mm->mmap_sem));
+ /* mmap_lock must have been held by caller. */
+ LASSERT(!mmap_write_trylock(mm));
for (vma = find_vma(mm, addr);
vma && vma->vm_start < (addr + count); vma = vma->vm_next) {
@@ -288,11 +288,11 @@ static vm_fault_t __ll_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
bool allow_retry = vmf->flags & FAULT_FLAG_ALLOW_RETRY;
bool has_retry = vmf->flags & FAULT_FLAG_RETRY_NOWAIT;
- /* To avoid loops, instruct downstream to not drop mmap_sem */
+ /* To avoid loops, instruct downstream to not drop mmap_lock */
/**
* only need FAULT_FLAG_ALLOW_RETRY prior to Linux 5.1
* (6b4c9f4469819), where FAULT_FLAG_RETRY_NOWAIT is enough
- * to not drop mmap_sem when failed to lock the page.
+ * to not drop mmap_lock when failed to lock the page.
*/
vmf->flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT;
ll_cl_add(inode, env, NULL, LCC_MMAP);
@@ -1906,7 +1906,7 @@ vm_fault_t pcc_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
"%s: PCC backend fs not support ->page_mkwrite()\n",
ll_i2sbi(inode)->ll_fsname);
pcc_ioctl_detach(inode, PCC_DETACH_OPT_UNCACHE);
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
*cached = true;
return VM_FAULT_RETRY | VM_FAULT_NOPAGE;
}
@@ -1933,7 +1933,7 @@ vm_fault_t pcc_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
*/
if (page->mapping == pcc_file->f_mapping) {
*cached = true;
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return VM_FAULT_RETRY | VM_FAULT_NOPAGE;
}
@@ -1947,7 +1947,7 @@ vm_fault_t pcc_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
if (OBD_FAIL_CHECK(OBD_FAIL_LLITE_PCC_DETACH_MKWRITE)) {
pcc_io_fini(inode);
pcc_ioctl_detach(inode, PCC_DETACH_OPT_UNCACHE);
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return VM_FAULT_RETRY | VM_FAULT_NOPAGE;
}
@@ -462,7 +462,7 @@ static int vvp_mmap_locks(const struct lu_env *env,
count += addr & (~PAGE_MASK);
addr &= PAGE_MASK;
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
while ((vma = our_vma(mm, addr, count)) != NULL) {
struct inode *inode = file_inode(vma->vm_file);
int flags = CEF_MUST;
@@ -503,7 +503,7 @@ static int vvp_mmap_locks(const struct lu_env *env,
count -= vma->vm_end - addr;
addr = vma->vm_end;
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (result < 0)
break;
}