@@ -685,7 +685,10 @@ static void __put_nfs_open_context(struct nfs_open_context *ctx, int is_sync)
if (ctx->cred != NULL)
put_rpccred(ctx->cred);
dput(ctx->dentry);
- nfs_sb_deactive(sb);
+ if (is_sync)
+ nfs_sb_deactive(sb);
+ else
+ nfs_sb_deactive_async(sb);
kfree(ctx->mdsthreshold);
kfree(ctx);
}
@@ -351,6 +351,7 @@ extern int __init register_nfs_fs(void);
extern void __exit unregister_nfs_fs(void);
extern void nfs_sb_active(struct super_block *sb);
extern void nfs_sb_deactive(struct super_block *sb);
+extern void nfs_sb_deactive_async(struct super_block *sb);
/* namespace.c */
extern char *nfs_path(char **p, struct dentry *dentry,
@@ -2193,7 +2193,7 @@ static void nfs4_free_closedata(void *data)
nfs4_put_open_state(calldata->state);
nfs_free_seqid(calldata->arg.seqid);
nfs4_put_state_owner(sp);
- nfs_sb_deactive(sb);
+ nfs_sb_deactive_async(sb);
kfree(calldata);
}
@@ -54,6 +54,7 @@
#include <linux/parser.h>
#include <linux/nsproxy.h>
#include <linux/rcupdate.h>
+#include <linux/kthread.h>
#include <asm/uaccess.h>
@@ -415,6 +416,38 @@ void nfs_sb_deactive(struct super_block *sb)
}
EXPORT_SYMBOL_GPL(nfs_sb_deactive);
+static int nfs_deactivate_super_async(void *ptr)
+{
+ struct super_block *sb = ptr;
+
+ deactivate_super(sb);
+ return 0;
+}
+
+void nfs_sb_deactive_async(struct super_block *sb)
+{
+ struct task_struct *task;
+ char buf[INET6_ADDRSTRLEN + sizeof("-deactivate-super") + 1];
+ struct nfs_server *server = NFS_SB(sb);
+ struct nfs_client *clp = server->nfs_client;
+
+ rcu_read_lock();
+ snprintf(buf, sizeof(buf), "%s-deactivate-super",
+ rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR));
+ rcu_read_unlock();
+
+ if (atomic_dec_and_test(&server->active)) {
+ task = kthread_run(nfs_deactivate_super_async, sb, buf);
+ if (IS_ERR(task)) {
+ pr_err("%s: kthread_run: %ld\n",
+ __func__, PTR_ERR(task));
+ /* make synchronous call and hope for the best */
+ nfs_sb_deactive(sb);
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(nfs_sb_deactive_async);
+
/*
* Deliver file system statistics to userspace
*/
@@ -95,7 +95,7 @@ static void nfs_async_unlink_release(void *calldata)
nfs_dec_sillycount(data->dir);
nfs_free_unlinkdata(data);
- nfs_sb_deactive(sb);
+ nfs_sb_deactive_async(sb);
}
static void nfs_unlink_prepare(struct rpc_task *task, void *calldata)
Use nfs_sb_deactive_async instead of nfs_sb_deactive when in a workqueue context. This avoids a deadlock where rpc_shutdown_client loops forever in a workqueue kworker context, trying to kill all RPC tasks associated with the client, while one or more of these tasks have already been assigned to the same kworker (and will never run rpc_exit_task). This approach is needed because RPC tasks that have already been assigned to a kworker by queue_work cannot be canceled, as explained in the comment for workqueue.c:insert_wq_barrier. Signed-off-by: Weston Andros Adamson <dros@netapp.com> --- fs/nfs/inode.c | 5 ++++- fs/nfs/internal.h | 1 + fs/nfs/nfs4proc.c | 2 +- fs/nfs/super.c | 33 +++++++++++++++++++++++++++++++++ fs/nfs/unlink.c | 2 +- 5 files changed, 40 insertions(+), 3 deletions(-)