@@ -55,7 +55,6 @@
#define SOCKNAL_NSCHEDS_HIGH (SOCKNAL_NSCHEDS << 1)
#define SOCKNAL_PEER_HASH_BITS 7 /* # log2 of # of peer_ni lists */
-#define SOCKNAL_RESCHED 100 /* # scheduler loops before reschedule */
#define SOCKNAL_INSANITY_RECONN 5000 /* connd is trying on reconn infinitely */
#define SOCKNAL_ENOMEM_RETRY 1 /* seconds between retries */
@@ -1328,7 +1328,6 @@ int ksocknal_scheduler(void *arg)
struct ksock_conn *conn;
struct ksock_tx *tx;
int rc;
- int nloops = 0;
long id = (long)arg;
sched = ksocknal_data.ksnd_schedulers[KSOCK_THREAD_CPT(id)];
@@ -1470,12 +1469,10 @@ int ksocknal_scheduler(void *arg)
did_something = 1;
}
- if (!did_something || /* nothing to do */
- ++nloops == SOCKNAL_RESCHED) { /* hogging CPU? */
+ if (!did_something || /* nothing to do */
+ need_resched()) { /* hogging CPU? */
spin_unlock_bh(&sched->kss_lock);
- nloops = 0;
-
if (!did_something) { /* wait for something to do */
rc = wait_event_interruptible_exclusive(
sched->kss_waitq,
@@ -2080,7 +2077,6 @@ void ksocknal_write_callback(struct ksock_conn *conn)
spinlock_t *connd_lock = &ksocknal_data.ksnd_connd_lock;
struct ksock_connreq *cr;
wait_queue_entry_t wait;
- int nloops = 0;
int cons_retry = 0;
init_waitqueue_entry(&wait, current);
@@ -2158,10 +2154,9 @@ void ksocknal_write_callback(struct ksock_conn *conn)
}
if (dropped_lock) {
- if (++nloops < SOCKNAL_RESCHED)
+ if (!need_resched())
continue;
spin_unlock_bh(connd_lock);
- nloops = 0;
cond_resched();
spin_lock_bh(connd_lock);
continue;
@@ -2173,7 +2168,6 @@ void ksocknal_write_callback(struct ksock_conn *conn)
&wait);
spin_unlock_bh(connd_lock);
- nloops = 0;
schedule_timeout(timeout);
remove_wait_queue(&ksocknal_data.ksnd_connd_waitq, &wait);