Message ID | 1430281503.3711.27.camel@edumazet-glaptop2.roam.corp.google.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
On Tue, Apr 28, 2015 at 09:25:03PM -0700, Eric Dumazet wrote: > @@ -553,11 +572,20 @@ void __fd_install(struct files_struct *files, unsigned int fd, > struct file *file) > { > struct fdtable *fdt; > - spin_lock(&files->file_lock); > - fdt = files_fdtable(files); > + > + rcu_read_lock_sched(); > + > + while (unlikely(files->resize_in_progress)) { > + rcu_read_unlock_sched(); > + wait_event(files->resize_wait, !files->resize_in_progress); > + rcu_read_lock_sched(); > + } > + /* coupled with smp_wmb() in expand_fdtable() */ > + smp_rmb(); > + fdt = rcu_dereference_sched(files->fdt); > BUG_ON(fdt->fd[fd] != NULL); > rcu_assign_pointer(fdt->fd[fd], file); > - spin_unlock(&files->file_lock); > + rcu_read_unlock_sched(); Umm... You've taken something that was safe to use in atomic contexts and turned into something that might wait for GFP_KERNEL allocation; what's to guarantee that no users get broken by that? At the very least, you want to slap might_sleep() in there - the actual sleep is going to be very rare, so it would be an extremely hard to reproduce and debug. AFAICS, all current in-tree users should be safe, but fd_install() is exported and quiet changes of that sort are rather antisocial. Generally I don't give a damn about out-of-tree code, but this one is over the top. I _think_ it's otherwise OK, but please, add might_sleep() *AND* a note in Documentation/filesystems/porting. -- To unsubscribe from this list: send the line "unsubscribe linux-fsdevel" in
On Mon, 2015-06-22 at 03:32 +0100, Al Viro wrote: > On Tue, Apr 28, 2015 at 09:25:03PM -0700, Eric Dumazet wrote: > > > @@ -553,11 +572,20 @@ void __fd_install(struct files_struct *files, unsigned int fd, > > struct file *file) > > { > > struct fdtable *fdt; > > - spin_lock(&files->file_lock); > > - fdt = files_fdtable(files); > > + > > + rcu_read_lock_sched(); > > + > > + while (unlikely(files->resize_in_progress)) { > > + rcu_read_unlock_sched(); > > + wait_event(files->resize_wait, !files->resize_in_progress); > > + rcu_read_lock_sched(); > > + } > > + /* coupled with smp_wmb() in expand_fdtable() */ > > + smp_rmb(); > > + fdt = rcu_dereference_sched(files->fdt); > > BUG_ON(fdt->fd[fd] != NULL); > > rcu_assign_pointer(fdt->fd[fd], file); > > - spin_unlock(&files->file_lock); > > + rcu_read_unlock_sched(); > > Umm... You've taken something that was safe to use in atomic contexts > and turned into something that might wait for GFP_KERNEL allocation; what's > to guarantee that no users get broken by that? At the very least, you want > to slap might_sleep() in there - the actual sleep is going to be very rare, > so it would be an extremely hard to reproduce and debug. > > AFAICS, all current in-tree users should be safe, but fd_install() is exported > and quiet changes of that sort are rather antisocial. Generally I don't give > a damn about out-of-tree code, but this one is over the top. > > I _think_ it's otherwise OK, but please, add might_sleep() *AND* a note in > Documentation/filesystems/porting. > Good points. I am currently traveling and will address this asap. Thanks -- To unsubscribe from this list: send the line "unsubscribe linux-fsdevel" in
diff --git a/fs/file.c b/fs/file.c index 93c5f89c248b..17cef5e52f16 100644 --- a/fs/file.c +++ b/fs/file.c @@ -147,6 +147,13 @@ static int expand_fdtable(struct files_struct *files, int nr) spin_unlock(&files->file_lock); new_fdt = alloc_fdtable(nr); + + /* make sure all __fd_install() have seen resize_in_progress + * or have finished their rcu_read_lock_sched() section. + */ + if (atomic_read(&files->count) > 1) + synchronize_sched(); + spin_lock(&files->file_lock); if (!new_fdt) return -ENOMEM; @@ -158,21 +165,14 @@ static int expand_fdtable(struct files_struct *files, int nr) __free_fdtable(new_fdt); return -EMFILE; } - /* - * Check again since another task may have expanded the fd table while - * we dropped the lock - */ cur_fdt = files_fdtable(files); - if (nr >= cur_fdt->max_fds) { - /* Continue as planned */ - copy_fdtable(new_fdt, cur_fdt); - rcu_assign_pointer(files->fdt, new_fdt); - if (cur_fdt != &files->fdtab) - call_rcu(&cur_fdt->rcu, free_fdtable_rcu); - } else { - /* Somebody else expanded, so undo our attempt */ - __free_fdtable(new_fdt); - } + BUG_ON(nr < cur_fdt->max_fds); + copy_fdtable(new_fdt, cur_fdt); + rcu_assign_pointer(files->fdt, new_fdt); + if (cur_fdt != &files->fdtab) + call_rcu(&cur_fdt->rcu, free_fdtable_rcu); + /* coupled with smp_rmb() in __fd_install() */ + smp_wmb(); return 1; } @@ -185,21 +185,38 @@ static int expand_fdtable(struct files_struct *files, int nr) * The files->file_lock should be held on entry, and will be held on exit. */ static int expand_files(struct files_struct *files, int nr) + __releases(files->file_lock) + __acquires(files->file_lock) { struct fdtable *fdt; + int expanded = 0; +repeat: fdt = files_fdtable(files); /* Do we need to expand? */ if (nr < fdt->max_fds) - return 0; + return expanded; /* Can we expand? */ if (nr >= sysctl_nr_open) return -EMFILE; + if (unlikely(files->resize_in_progress)) { + spin_unlock(&files->file_lock); + expanded = 1; + wait_event(files->resize_wait, !files->resize_in_progress); + spin_lock(&files->file_lock); + goto repeat; + } + /* All good, so we try */ - return expand_fdtable(files, nr); + files->resize_in_progress = true; + expanded = expand_fdtable(files, nr); + files->resize_in_progress = false; + + wake_up_all(&files->resize_wait); + return expanded; } static inline void __set_close_on_exec(int fd, struct fdtable *fdt) @@ -256,6 +273,8 @@ struct files_struct *dup_fd(struct files_struct *oldf, int *errorp) atomic_set(&newf->count, 1); spin_lock_init(&newf->file_lock); + newf->resize_in_progress = false; + init_waitqueue_head(&newf->resize_wait); newf->next_fd = 0; new_fdt = &newf->fdtab; new_fdt->max_fds = NR_OPEN_DEFAULT; @@ -553,11 +572,20 @@ void __fd_install(struct files_struct *files, unsigned int fd, struct file *file) { struct fdtable *fdt; - spin_lock(&files->file_lock); - fdt = files_fdtable(files); + + rcu_read_lock_sched(); + + while (unlikely(files->resize_in_progress)) { + rcu_read_unlock_sched(); + wait_event(files->resize_wait, !files->resize_in_progress); + rcu_read_lock_sched(); + } + /* coupled with smp_wmb() in expand_fdtable() */ + smp_rmb(); + fdt = rcu_dereference_sched(files->fdt); BUG_ON(fdt->fd[fd] != NULL); rcu_assign_pointer(fdt->fd[fd], file); - spin_unlock(&files->file_lock); + rcu_read_unlock_sched(); } void fd_install(unsigned int fd, struct file *file) diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h index 230f87bdf5ad..fbb88740634a 100644 --- a/include/linux/fdtable.h +++ b/include/linux/fdtable.h @@ -47,6 +47,9 @@ struct files_struct { * read mostly part */ atomic_t count; + bool resize_in_progress; + wait_queue_head_t resize_wait; + struct fdtable __rcu *fdt; struct fdtable fdtab; /*