@@ -460,7 +460,6 @@ static const struct address_space_operations aio_ctx_aops = {
static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events)
{
struct aio_ring *ring;
- struct mm_struct *mm = current->mm;
unsigned long size, unused;
int nr_pages;
int i;
@@ -519,20 +518,13 @@ static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events)
ctx->mmap_size = nr_pages * PAGE_SIZE;
pr_debug("attempting mmap of %lu bytes\n", ctx->mmap_size);
- if (mm_write_lock_killable(mm)) {
- ctx->mmap_size = 0;
- aio_free_ring(ctx);
- return -EINTR;
- }
-
ctx->mmap_base = do_mmap(ctx->aio_ring_file, 0, ctx->mmap_size,
PROT_READ | PROT_WRITE,
- MAP_SHARED, 0, 0, true, &unused, NULL);
- mm_write_unlock(mm);
+ MAP_SHARED, 0, 0, false, &unused, NULL);
if (IS_ERR((void *)ctx->mmap_base)) {
ctx->mmap_size = 0;
aio_free_ring(ctx);
- return -ENOMEM;
+ return (ctx->mmap_base == -EINTR) ? -EINTR : -ENOMEM;
}
pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base);
@@ -501,12 +501,10 @@ unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
ret = security_mmap_file(file, prot, flag);
if (!ret) {
- if (mm_write_lock_killable(mm))
- return -EINTR;
ret = do_mmap(file, addr, len, prot, flag, 0, pgoff,
- true, &populate, &uf);
- mm_write_unlock(mm);
- userfaultfd_unmap_complete(mm, &uf);
+ false, &populate, &uf);
+ if (ret != -EINTR)
+ userfaultfd_unmap_complete(mm, &uf);
if (populate)
mm_populate(ret, populate);
}
Change vm_mmap_pgoff() and aio_setup_ring() to call do_mmap() with locked=false. Moving the mmap_sem acquisition to within do_mmap() enables it to acquire a fine grained lock in the future. Signed-off-by: Michel Lespinasse <walken@google.com> --- fs/aio.c | 12 ++---------- mm/util.c | 8 +++----- 2 files changed, 5 insertions(+), 15 deletions(-)