@@ -3089,7 +3089,7 @@ int netif_rx(struct sk_buff *skb)
struct rps_dev_flow voidflow, *rflow = &voidflow;
int cpu;
- preempt_disable();
+ get_online_cpus_atomic();
rcu_read_lock();
cpu = get_rps_cpu(skb->dev, skb, &rflow);
@@ -3099,7 +3099,7 @@ int netif_rx(struct sk_buff *skb)
ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
rcu_read_unlock();
- preempt_enable();
+ put_online_cpus_atomic();
} else
#endif
{
@@ -3498,6 +3498,7 @@ int netif_receive_skb(struct sk_buff *skb)
struct rps_dev_flow voidflow, *rflow = &voidflow;
int cpu, ret;
+ get_online_cpus_atomic();
rcu_read_lock();
cpu = get_rps_cpu(skb->dev, skb, &rflow);
@@ -3505,9 +3506,11 @@ int netif_receive_skb(struct sk_buff *skb)
if (cpu >= 0) {
ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
rcu_read_unlock();
+ put_online_cpus_atomic();
return ret;
}
rcu_read_unlock();
+ put_online_cpus_atomic();
}
#endif
return __netif_receive_skb(skb);
@@ -3887,6 +3890,7 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd)
local_irq_enable();
/* Send pending IPI's to kick RPS processing on remote cpus. */
+ get_online_cpus_atomic();
while (remsd) {
struct softnet_data *next = remsd->rps_ipi_next;
@@ -3895,6 +3899,7 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd)
&remsd->csd, 0);
remsd = next;
}
+ put_online_cpus_atomic();
} else
#endif
local_irq_enable();
Once stop_machine() is gone from the CPU offline path, we won't be able to depend on preempt_disable() or local_irq_disable() to prevent CPUs from going offline from under us. Use the get/put_online_cpus_atomic() APIs to prevent CPUs from going offline, while invoking from atomic context. Cc: "David S. Miller" <davem@davemloft.net> Cc: Eric Dumazet <edumazet@google.com> Cc: netdev@vger.kernel.org Signed-off-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com> --- net/core/dev.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-)