@@ -1217,7 +1217,7 @@ void ldlm_lock_fail_match_locked(struct ldlm_lock *lock)
{
if ((lock->l_flags & LDLM_FL_FAIL_NOTIFIED) == 0) {
lock->l_flags |= LDLM_FL_FAIL_NOTIFIED;
- wake_up_all(&lock->l_waitq);
+ wake_up(&lock->l_waitq);
}
}
@@ -1231,7 +1231,7 @@ void ldlm_lock_fail_match_locked(struct ldlm_lock *lock)
void ldlm_lock_allow_match_locked(struct ldlm_lock *lock)
{
ldlm_set_lvb_ready(lock);
- wake_up_all(&lock->l_waitq);
+ wake_up(&lock->l_waitq);
}
EXPORT_SYMBOL(ldlm_lock_allow_match_locked);
@@ -1752,7 +1752,7 @@ void ldlm_cancel_callback(struct ldlm_lock *lock)
}
/* only canceller can set bl_done bit */
ldlm_set_bl_done(lock);
- wake_up_all(&lock->l_waitq);
+ wake_up(&lock->l_waitq);
} else if (!ldlm_is_bl_done(lock)) {
/*
* The lock is guaranteed to have been canceled once
@@ -1566,7 +1566,7 @@ static void pcc_io_fini(struct inode *inode)
LASSERT(pcci && atomic_read(&pcci->pcci_active_ios) > 0);
if (atomic_dec_and_test(&pcci->pcci_active_ios))
- wake_up_all(&pcci->pcci_waitq);
+ wake_up(&pcci->pcci_waitq);
}
ssize_t pcc_file_read_iter(struct kiocb *iocb,
@@ -651,7 +651,7 @@ static void lov_io_fini(const struct lu_env *env, const struct cl_io_slice *ios)
LASSERT(atomic_read(&lov->lo_active_ios) > 0);
if (atomic_dec_and_test(&lov->lo_active_ios))
- wake_up_all(&lov->lo_waitq);
+ wake_up(&lov->lo_waitq);
}
static void lov_io_sub_inherit(struct lov_io_sub *sub, struct lov_io *lio,
@@ -1674,7 +1674,7 @@ static void lov_empty_io_fini(const struct lu_env *env,
struct lov_object *lov = cl2lov(ios->cis_obj);
if (atomic_dec_and_test(&lov->lo_active_ios))
- wake_up_all(&lov->lo_waitq);
+ wake_up(&lov->lo_waitq);
}
static int lov_empty_io_submit(const struct lu_env *env,
@@ -256,7 +256,7 @@ static int chlg_read_cat_process_cb(const struct lu_env *env,
crs->crs_rec_count++;
mutex_unlock(&crs->crs_lock);
- wake_up_all(&crs->crs_waitq_cons);
+ wake_up(&crs->crs_waitq_cons);
return 0;
}
@@ -347,7 +347,7 @@ static int chlg_load(void *args)
if (rc < 0)
crs->crs_err = rc;
- wake_up_all(&crs->crs_waitq_cons);
+ wake_up(&crs->crs_waitq_cons);
if (llh)
llog_cat_close(NULL, llh);
@@ -420,7 +420,7 @@ static ssize_t chlg_read(struct file *file, char __user *buff, size_t count,
if (written_total > 0) {
rc = written_total;
- wake_up_all(&crs->crs_waitq_prod);
+ wake_up(&crs->crs_waitq_prod);
} else if (rc == 0) {
rc = crs->crs_err;
}
@@ -464,7 +464,7 @@ static int chlg_set_start_offset(struct chlg_reader_state *crs, u64 offset)
}
mutex_unlock(&crs->crs_lock);
- wake_up_all(&crs->crs_waitq_prod);
+ wake_up(&crs->crs_waitq_prod);
return 0;
}
@@ -1187,7 +1187,7 @@ void cl_sync_io_note(const struct lu_env *env, struct cl_sync_io *anchor,
* the wakeup ensures cl_sync_io_wait() doesn't complete
* before the wakeup completes.
*/
- wake_up_all_locked(&anchor->csi_waitq);
+ wake_up_locked(&anchor->csi_waitq);
if (end_io)
end_io(env, anchor);
if (anchor->csi_aio)
@@ -398,7 +398,7 @@ static void lu_object_free(const struct lu_env *env, struct lu_object *o)
}
if (waitqueue_active(wq))
- wake_up_all(wq);
+ wake_up(wq);
}
/**
@@ -437,7 +437,7 @@ void osc_io_iter_fini(const struct lu_env *env,
oio->oi_is_active = 0;
LASSERT(atomic_read(&osc->oo_nr_ios) > 0);
if (atomic_dec_and_test(&osc->oo_nr_ios))
- wake_up_all(&osc->oo_io_waitq);
+ wake_up(&osc->oo_io_waitq);
}
}
EXPORT_SYMBOL(osc_io_iter_fini);
@@ -679,7 +679,7 @@ long osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
atomic_dec(&cli->cl_lru_shrinkers);
if (count > 0) {
atomic_long_add(count, cli->cl_lru_left);
- wake_up_all(&osc_lru_waitq);
+ wake_up(&osc_lru_waitq);
}
return count > 0 ? count : rc;
}
@@ -890,7 +890,7 @@ unsigned long osc_lru_reserve(struct client_obd *cli, unsigned long npages)
void osc_lru_unreserve(struct client_obd *cli, unsigned long npages)
{
atomic_long_add(npages, cli->cl_lru_left);
- wake_up_all(&osc_lru_waitq);
+ wake_up(&osc_lru_waitq);
}
/**
@@ -987,7 +987,7 @@ void osc_dec_unstable_pages(struct ptlrpc_request *req)
&cli->cl_cache->ccc_unstable_nr);
LASSERT(unstable_count >= 0);
if (!unstable_count)
- wake_up_all(&cli->cl_cache->ccc_unstable_waitq);
+ wake_up(&cli->cl_cache->ccc_unstable_waitq);
if (waitqueue_active(&osc_lru_waitq))
(void)ptlrpcd_queue_work(cli->cl_lru_work);
@@ -304,7 +304,7 @@ static inline void enc_pools_wakeup(void)
if (unlikely(page_pools.epp_waitqlen)) {
LASSERT(waitqueue_active(&page_pools.epp_waitq));
- wake_up_all(&page_pools.epp_waitq);
+ wake_up(&page_pools.epp_waitq);
}
}
@@ -2465,7 +2465,7 @@ static void ptlrpc_stop_hr_threads(void)
if (!hrp->hrp_thrs)
continue; /* uninitialized */
for (j = 0; j < hrp->hrp_nthrs; j++)
- wake_up_all(&hrp->hrp_thrs[j].hrt_waitq);
+ wake_up(&hrp->hrp_thrs[j].hrt_waitq);
}
cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) {
@@ -2520,8 +2520,8 @@ static void kiblnd_base_shutdown(void)
cfs_percpt_for_each(sched, i, kiblnd_data.kib_scheds)
wake_up_all(&sched->ibs_waitq);
- wake_up_all(&kiblnd_data.kib_connd_waitq);
- wake_up_all(&kiblnd_data.kib_failover_waitq);
+ wake_up(&kiblnd_data.kib_connd_waitq);
+ wake_up(&kiblnd_data.kib_failover_waitq);
wait_var_event_warning(&kiblnd_data.kib_nthreads,
!atomic_read(&kiblnd_data.kib_nthreads),
@@ -1799,12 +1799,12 @@ static int ksocknal_push(struct lnet_ni *ni, struct lnet_process_id id)
/* flag threads to terminate; wake and wait for them to die */
ksocknal_data.ksnd_shuttingdown = 1;
wake_up_all(&ksocknal_data.ksnd_connd_waitq);
- wake_up_all(&ksocknal_data.ksnd_reaper_waitq);
+ wake_up(&ksocknal_data.ksnd_reaper_waitq);
if (ksocknal_data.ksnd_schedulers) {
cfs_percpt_for_each(sched, i,
ksocknal_data.ksnd_schedulers)
- wake_up_all(&sched->kss_waitq);
+ wake_up_all(&sched->kss_waitq);
}
wait_var_event_warning(&ksocknal_data.ksnd_nthreads,
@@ -2131,7 +2131,7 @@ static void lnet_peer_discovery_complete(struct lnet_peer *lp)
spin_lock(&lp->lp_lock);
list_splice_init(&lp->lp_dc_pendq, &pending_msgs);
spin_unlock(&lp->lp_lock);
- wake_up_all(&lp->lp_dc_waitq);
+ wake_up(&lp->lp_dc_waitq);
if (lp->lp_rtr_refcount > 0)
lnet_router_discovery_complete(lp);