@@ -10,36 +10,46 @@
#include "rxe.h"
-/*
- * this locking is due to a potential race where
- * a second caller finds the task already running
- * but looks just after the last call to func
- */
-static void do_task(struct rxe_task *task)
+static bool task_is_idle(struct rxe_task *task)
{
- unsigned int iterations = RXE_MAX_ITERATIONS;
- int cont;
- int ret;
+ if (task->destroyed)
+ return false;
spin_lock_bh(&task->lock);
switch (task->state) {
case TASK_STATE_START:
task->state = TASK_STATE_BUSY;
spin_unlock_bh(&task->lock);
- break;
-
+ return true;
case TASK_STATE_BUSY:
task->state = TASK_STATE_ARMED;
fallthrough;
case TASK_STATE_ARMED:
- spin_unlock_bh(&task->lock);
- return;
-
+ case TASK_STATE_PAUSED:
+ break;
default:
+ WARN_ON(1);
+ break;
+ }
+ spin_unlock_bh(&task->lock);
+
+ return false;
+}
+
+static void do_task(struct rxe_task *task)
+{
+ unsigned int iterations = RXE_MAX_ITERATIONS;
+ bool resched = false;
+ int cont;
+ int ret;
+
+ /* flush out pending tasks */
+ spin_lock_bh(&task->lock);
+ if (task->state == TASK_STATE_PAUSED) {
spin_unlock_bh(&task->lock);
- pr_warn("%s failed with bad state %d\n", __func__, task->state);
return;
}
+ spin_unlock_bh(&task->lock);
do {
cont = 0;
@@ -47,47 +57,52 @@ static void do_task(struct rxe_task *task)
spin_lock_bh(&task->lock);
switch (task->state) {
+ case TASK_STATE_START:
case TASK_STATE_BUSY:
if (ret) {
task->state = TASK_STATE_START;
- } else if (iterations--) {
+ } else if (task->type == RXE_TASK_TYPE_INLINE ||
+ iterations--) {
cont = 1;
} else {
- /* reschedule the tasklet and exit
- * the loop to give up the cpu
- */
- tasklet_schedule(&task->tasklet);
task->state = TASK_STATE_START;
+ resched = true;
}
break;
-
- /* someone tried to run the task since the last time we called
- * func, so we will call one more time regardless of the
- * return value
- */
case TASK_STATE_ARMED:
task->state = TASK_STATE_BUSY;
cont = 1;
break;
-
+ case TASK_STATE_PAUSED:
+ break;
default:
- pr_warn("%s failed with bad state %d\n", __func__,
- task->state);
+ WARN_ON(1);
+ break;
}
spin_unlock_bh(&task->lock);
} while (cont);
+ if (resched)
+ rxe_sched_task(task);
+
task->ret = ret;
}
static void disable_task(struct rxe_task *task)
{
- /* todo */
+ spin_lock_bh(&task->lock);
+ task->state = TASK_STATE_PAUSED;
+ spin_unlock_bh(&task->lock);
}
static void enable_task(struct rxe_task *task)
{
- /* todo */
+ spin_lock_bh(&task->lock);
+ task->state = TASK_STATE_START;
+ spin_unlock_bh(&task->lock);
+
+ /* restart task in case */
+ rxe_run_task(task);
}
/* busy wait until any previous tasks are done */
@@ -99,7 +114,8 @@ static void cleanup_task(struct rxe_task *task)
do {
spin_lock_bh(&task->lock);
- idle = (task->state == TASK_STATE_START);
+ idle = (task->state == TASK_STATE_START ||
+ task->state == TASK_STATE_PAUSED);
spin_unlock_bh(&task->lock);
} while (!idle);
}
@@ -107,22 +123,26 @@ static void cleanup_task(struct rxe_task *task)
/* silently treat schedule as inline for inline tasks */
static void inline_sched(struct rxe_task *task)
{
- do_task(task);
+ if (task_is_idle(task))
+ do_task(task);
}
static void inline_run(struct rxe_task *task)
{
- do_task(task);
+ if (task_is_idle(task))
+ do_task(task);
}
static void inline_disable(struct rxe_task *task)
{
- disable_task(task);
+ if (!task->destroyed)
+ disable_task(task);
}
static void inline_enable(struct rxe_task *task)
{
- enable_task(task);
+ if (!task->destroyed)
+ enable_task(task);
}
static void inline_cleanup(struct rxe_task *task)
@@ -146,31 +166,34 @@ static void inline_init(struct rxe_task *task)
/* use tsklet_xxx to avoid name collisions with tasklet_xxx */
static void tsklet_sched(struct rxe_task *task)
{
- tasklet_schedule(&task->tasklet);
+ if (task_is_idle(task))
+ tasklet_schedule(&task->tasklet);
}
static void tsklet_do_task(struct tasklet_struct *tasklet)
{
struct rxe_task *task = container_of(tasklet, typeof(*task), tasklet);
- do_task(task);
+ if (!task->destroyed)
+ do_task(task);
}
static void tsklet_run(struct rxe_task *task)
{
- do_task(task);
+ if (task_is_idle(task))
+ do_task(task);
}
static void tsklet_disable(struct rxe_task *task)
{
- disable_task(task);
- tasklet_disable(&task->tasklet);
+ if (!task->destroyed)
+ disable_task(task);
}
static void tsklet_enable(struct rxe_task *task)
{
- tasklet_enable(&task->tasklet);
- enable_task(task);
+ if (!task->destroyed)
+ enable_task(task);
}
static void tsklet_cleanup(struct rxe_task *task)
@@ -26,6 +26,7 @@ enum {
TASK_STATE_START = 0,
TASK_STATE_BUSY = 1,
TASK_STATE_ARMED = 2,
+ TASK_STATE_PAUSED = 3,
};
/*
Implement common disable_task() and enable_task() routines by adding a new PAUSED state to the do_task() state machine. These replace tasklet_disable and tasklet_enable with code that can be shared with all the task types. Move rxe_sched_task to re-schedule the task outside of the locks to avoid a deadlock. Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com> --- drivers/infiniband/sw/rxe/rxe_task.c | 107 ++++++++++++++++----------- drivers/infiniband/sw/rxe/rxe_task.h | 1 + 2 files changed, 66 insertions(+), 42 deletions(-)