@@ -168,6 +168,8 @@ enum {
#define RPC_IS_QUEUED(t) test_bit(RPC_TASK_QUEUED, &(t)->tk_runstate)
#define rpc_set_queued(t) set_bit(RPC_TASK_QUEUED, &(t)->tk_runstate)
#define rpc_clear_queued(t) clear_bit(RPC_TASK_QUEUED, &(t)->tk_runstate)
+#define rpc_clear_and_wake_queued(t) clear_and_wake_up_bit(RPC_TASK_QUEUED, \
+ &(t)->tk_runstate)
#define RPC_IS_ACTIVATED(t) test_bit(RPC_TASK_ACTIVE, &(t)->tk_runstate)
@@ -361,17 +361,14 @@ EXPORT_SYMBOL_GPL(rpc_wait_for_completion_task);
static void rpc_make_runnable(struct workqueue_struct *wq,
struct rpc_task *task)
{
- bool need_wakeup = !rpc_test_and_set_running(task);
-
- rpc_clear_queued(task);
- if (!need_wakeup)
- return;
- if (RPC_IS_ASYNC(task)) {
+ if (rpc_test_and_set_running(task))
+ rpc_clear_queued(task);
+ else if (RPC_IS_ASYNC(task)) {
+ rpc_clear_queued(task);
INIT_WORK(&task->u.tk_work, rpc_async_schedule);
queue_work(wq, &task->u.tk_work);
} else {
- smp_mb__after_atomic();
- wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED);
+ rpc_clear_and_wake_queued(task);
}
}
The wake_up_bit() interface is fragile as a barrier is often required as is present here. clear_and_wake_up_bit() is a more robust interface as it includes the barrier and is appropriate here. This patch rearranges the code slightly and makes use of clear_and_wake_up_bit(). This removes some of the need to understand barriers. Signed-off-by: NeilBrown <neilb@suse.de> --- include/linux/sunrpc/sched.h | 2 ++ net/sunrpc/sched.c | 13 +++++-------- 2 files changed, 7 insertions(+), 8 deletions(-)