diff mbox series

[01/11] sunrpc: remove explicit barrier from rpc_make_runnable()

Message ID 20241206021830.3526922-2-neilb@suse.de (mailing list archive)
State New
Headers show
Series nfs: improve use of wake_up_bit and wake_up_var | expand

Commit Message

NeilBrown Dec. 6, 2024, 2:15 a.m. UTC
The wake_up_bit() interface is fragile as a barrier is often required
as is present here.  clear_and_wake_up_bit() is a more robust interface
as it includes the barrier and is appropriate here.

This patch rearranges the code slightly and makes use of
clear_and_wake_up_bit().  This removes some of the need to understand
barriers.

Signed-off-by: NeilBrown <neilb@suse.de>
---
 include/linux/sunrpc/sched.h |  2 ++
 net/sunrpc/sched.c           | 13 +++++--------
 2 files changed, 7 insertions(+), 8 deletions(-)
diff mbox series

Patch

diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index fec1e8a1570c..76e1c0194376 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -168,6 +168,8 @@  enum {
 #define RPC_IS_QUEUED(t)	test_bit(RPC_TASK_QUEUED, &(t)->tk_runstate)
 #define rpc_set_queued(t)	set_bit(RPC_TASK_QUEUED, &(t)->tk_runstate)
 #define rpc_clear_queued(t)	clear_bit(RPC_TASK_QUEUED, &(t)->tk_runstate)
+#define rpc_clear_and_wake_queued(t)	clear_and_wake_up_bit(RPC_TASK_QUEUED,	\
+							      &(t)->tk_runstate)
 
 #define RPC_IS_ACTIVATED(t)	test_bit(RPC_TASK_ACTIVE, &(t)->tk_runstate)
 
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index cef623ea1506..1b710ffc7ad6 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -361,17 +361,14 @@  EXPORT_SYMBOL_GPL(rpc_wait_for_completion_task);
 static void rpc_make_runnable(struct workqueue_struct *wq,
 		struct rpc_task *task)
 {
-	bool need_wakeup = !rpc_test_and_set_running(task);
-
-	rpc_clear_queued(task);
-	if (!need_wakeup)
-		return;
-	if (RPC_IS_ASYNC(task)) {
+	if (rpc_test_and_set_running(task))
+		rpc_clear_queued(task);
+	else if (RPC_IS_ASYNC(task)) {
+		rpc_clear_queued(task);
 		INIT_WORK(&task->u.tk_work, rpc_async_schedule);
 		queue_work(wq, &task->u.tk_work);
 	} else {
-		smp_mb__after_atomic();
-		wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED);
+		rpc_clear_and_wake_queued(task);
 	}
 }