@@ -54,16 +54,10 @@ void dccp_time_wait(struct sock *sk, int state, int timeo)
if (state == DCCP_TIME_WAIT)
timeo = DCCP_TIMEWAIT_LEN;
- /* tw_timer is pinned, so we need to make sure BH are disabled
- * in following section, otherwise timer handler could run before
- * we complete the initialization.
- */
- local_bh_disable();
/* Linkage updates.
* Note that access to tw after this point is illegal.
*/
inet_twsk_hashdance_schedule(tw, sk, &dccp_hashinfo, timeo);
- local_bh_enable();
} else {
/* Sorry, if we're out of memory, just CLOSE this
* socket up. We've got bigger problems than
@@ -93,7 +93,7 @@ static void inet_twsk_add_node_rcu(struct inet_timewait_sock *tw,
}
/*
- * Enter the time wait state. This is called with locally disabled BH.
+ * Enter the time wait state.
* Essentially we whip up a timewait bucket, copy the relevant info into it
* from the SK, and mess with hash chains and list linkage.
*
@@ -118,6 +118,7 @@ void inet_twsk_hashdance_schedule(struct inet_timewait_sock *tw,
hashinfo->bhash_size)];
bhead2 = inet_bhashfn_portaddr(hashinfo, sk, twsk_net(tw), inet->inet_num);
+ local_bh_disable();
spin_lock(&bhead->lock);
spin_lock(&bhead2->lock);
@@ -158,6 +159,7 @@ void inet_twsk_hashdance_schedule(struct inet_timewait_sock *tw,
inet_twsk_schedule(tw, timeo);
spin_unlock(lock);
+ local_bh_enable();
}
EXPORT_SYMBOL_GPL(inet_twsk_hashdance_schedule);
@@ -203,7 +205,7 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
tw->tw_prot = sk->sk_prot_creator;
atomic64_set(&tw->tw_cookie, atomic64_read(&sk->sk_cookie));
twsk_net_set(tw, sock_net(sk));
- timer_setup(&tw->tw_timer, tw_timer_handler, TIMER_PINNED);
+ timer_setup(&tw->tw_timer, tw_timer_handler, 0);
/*
* Because we use RCU lookups, we should not set tw_refcnt
* to a non null value before everything is setup for this
@@ -345,16 +345,10 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
if (state == TCP_TIME_WAIT)
timeo = TCP_TIMEWAIT_LEN;
- /* tw_timer is pinned, so we need to make sure BH are disabled
- * in following section, otherwise timer handler could run before
- * we complete the initialization.
- */
- local_bh_disable();
/* Linkage updates.
* Note that access to tw after this point is illegal.
*/
inet_twsk_hashdance_schedule(tw, sk, net->ipv4.tcp_death_row.hashinfo, timeo);
- local_bh_enable();
} else {
/* Sorry, if we're out of memory, just CLOSE this
* socket up. We've got bigger problems than
After previous patch, even if timer fires immediately on another CPU, context that schedules the timer now holds the ehash spinlock, so timer cannot reap tw socket until ehash lock is released. BH disable is moved into hashdance_schedule. Signed-off-by: Florian Westphal <fw@strlen.de> --- Changes since last version: Move local_bh_enable/disable into inet_twsk_hashdance_schedule and remove the comments mentioning timer_pinned in the two callers. net/dccp/minisocks.c | 6 ------ net/ipv4/inet_timewait_sock.c | 6 ++++-- net/ipv4/tcp_minisocks.c | 6 ------ 3 files changed, 4 insertions(+), 14 deletions(-)