@@ -545,8 +545,23 @@ static bool io_wait_on_hash(struct io_wqe *wqe, unsigned int hash)
return ret;
}
+static inline void conditional_acct_lock(struct io_wqe_acct *acct,
+ bool needs_lock)
+{
+ if (needs_lock)
+ raw_spin_lock(&acct->lock);
+}
+
+static inline void conditional_acct_unlock(struct io_wqe_acct *acct,
+ bool needs_lock)
+{
+ if (needs_lock)
+ raw_spin_unlock(&acct->lock);
+}
+
static struct io_wq_work *io_get_next_work(struct io_wqe_acct *acct,
- struct io_worker *worker)
+ struct io_worker *worker,
+ bool needs_lock)
__must_hold(acct->lock)
{
struct io_wq_work_node *node, *prev;
@@ -554,6 +569,7 @@ static struct io_wq_work *io_get_next_work(struct io_wqe_acct *acct,
unsigned int stall_hash = -1U;
struct io_wqe *wqe = worker->wqe;
+ conditional_acct_lock(acct, needs_lock);
wq_list_for_each(node, prev, &acct->work_list) {
unsigned int hash;
@@ -562,6 +578,7 @@ static struct io_wq_work *io_get_next_work(struct io_wqe_acct *acct,
/* not hashed, can run anytime */
if (!io_wq_is_hashed(work)) {
wq_list_del(&acct->work_list, node, prev);
+ conditional_acct_unlock(acct, needs_lock);
return work;
}
@@ -573,6 +590,7 @@ static struct io_wq_work *io_get_next_work(struct io_wqe_acct *acct,
if (!test_and_set_bit(hash, &wqe->wq->hash->map)) {
wqe->hash_tail[hash] = NULL;
wq_list_cut(&acct->work_list, &tail->list, prev);
+ conditional_acct_unlock(acct, needs_lock);
return work;
}
if (stall_hash == -1U)
@@ -589,15 +607,16 @@ static struct io_wq_work *io_get_next_work(struct io_wqe_acct *acct,
* work being added and clearing the stalled bit.
*/
set_bit(IO_ACCT_STALLED_BIT, &acct->flags);
- raw_spin_unlock(&acct->lock);
+ conditional_acct_unlock(acct, needs_lock);
unstalled = io_wait_on_hash(wqe, stall_hash);
- raw_spin_lock(&acct->lock);
+ conditional_acct_lock(acct, needs_lock);
if (unstalled) {
clear_bit(IO_ACCT_STALLED_BIT, &acct->flags);
if (wq_has_sleeper(&wqe->wq->hash->wait))
wake_up(&wqe->wq->hash->wait);
}
}
+ conditional_acct_unlock(acct, needs_lock);
return NULL;
}
@@ -631,7 +650,7 @@ static void io_assign_current_work(struct io_worker *worker,
static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work);
static void io_worker_handle_work(struct io_worker *worker,
- struct io_wqe_acct *acct)
+ struct io_wqe_acct *acct, bool needs_lock)
{
struct io_wqe *wqe = worker->wqe;
struct io_wq *wq = wqe->wq;
@@ -647,9 +666,7 @@ static void io_worker_handle_work(struct io_worker *worker,
* can't make progress, any work completion or insertion will
* clear the stalled flag.
*/
- raw_spin_lock(&acct->lock);
- work = io_get_next_work(acct, worker);
- raw_spin_unlock(&acct->lock);
+ work = io_get_next_work(acct, worker, needs_lock);
if (work) {
__io_worker_busy(wqe, worker);
@@ -706,12 +723,19 @@ static void io_worker_handle_work(struct io_worker *worker,
static inline void io_worker_handle_private_work(struct io_worker *worker)
{
- io_worker_handle_work(worker, &worker->acct);
+ struct io_wqe_acct acct;
+
+ raw_spin_lock(&worker->acct.lock);
+ acct = worker->acct;
+ wq_list_clean(&worker->acct.work_list);
+ worker->acct.nr_works = 0;
+ raw_spin_unlock(&worker->acct.lock);
+ io_worker_handle_work(worker, &acct, false);
}
static inline void io_worker_handle_public_work(struct io_worker *worker)
{
- io_worker_handle_work(worker, io_wqe_get_acct(worker));
+ io_worker_handle_work(worker, io_wqe_get_acct(worker), true);
}
static int io_wqe_worker(void *data)
@@ -32,6 +32,11 @@ enum io_wq_cancel {
(list)->first = NULL; \
} while (0)
+static inline void wq_list_clean(struct io_wq_work_list *list)
+{
+ list->first = list->last = NULL;
+}
+
static inline void wq_list_add_after(struct io_wq_work_node *node,
struct io_wq_work_node *pos,
struct io_wq_work_list *list)