@@ -1144,7 +1144,7 @@ static int blk_softirq_cpu_dead(unsigned int cpu)
static void __blk_mq_complete_request_remote(void *data)
{
- __raise_softirq_irqoff(BLOCK_SOFTIRQ);
+ raise_softirq_no_wake(BLOCK_SOFTIRQ);
}
static inline bool blk_mq_complete_need_ipi(struct request *rq)
@@ -604,7 +604,7 @@ static inline void do_softirq_post_smp_call_flush(unsigned int unused)
extern void open_softirq(int nr, void (*action)(struct softirq_action *));
extern void softirq_init(void);
-extern void __raise_softirq_irqoff(unsigned int nr);
+extern void raise_softirq_no_wake(unsigned int nr);
extern void raise_softirq_irqoff(unsigned int nr);
extern void raise_softirq(unsigned int nr);
@@ -664,7 +664,7 @@ void irq_exit(void)
*/
inline void raise_softirq_irqoff(unsigned int nr)
{
- __raise_softirq_irqoff(nr);
+ raise_softirq_no_wake(nr);
/*
* If we're in an interrupt or softirq, we're done
@@ -688,7 +688,7 @@ void raise_softirq(unsigned int nr)
local_irq_restore(flags);
}
-void __raise_softirq_irqoff(unsigned int nr)
+void raise_softirq_no_wake(unsigned int nr)
{
lockdep_assert_irqs_disabled();
trace_softirq_raise(nr);
@@ -795,7 +795,7 @@ static void tasklet_action_common(struct softirq_action *a,
t->next = NULL;
*tl_head->tail = t;
tl_head->tail = &t->next;
- __raise_softirq_irqoff(softirq_nr);
+ raise_softirq_no_wake(softirq_nr);
local_irq_enable();
}
}
@@ -130,7 +130,7 @@ static void __latent_entropy irq_poll_softirq(struct softirq_action *h)
}
if (rearm)
- __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
+ raise_softirq_no_wake(IRQ_POLL_SOFTIRQ);
local_irq_enable();
}
@@ -197,7 +197,7 @@ static int irq_poll_cpu_dead(unsigned int cpu)
local_irq_disable();
list_splice_init(&per_cpu(blk_cpu_iopoll, cpu),
this_cpu_ptr(&blk_cpu_iopoll));
- __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
+ raise_softirq_no_wake(IRQ_POLL_SOFTIRQ);
local_irq_enable();
local_bh_enable();
@@ -4459,7 +4459,7 @@ static inline void ____napi_schedule(struct softnet_data *sd,
* we have to raise NET_RX_SOFTIRQ.
*/
if (!sd->in_net_rx_action)
- __raise_softirq_irqoff(NET_RX_SOFTIRQ);
+ raise_softirq_no_wake(NET_RX_SOFTIRQ);
}
#ifdef CONFIG_RPS
@@ -4678,7 +4678,7 @@ static void trigger_rx_softirq(void *data)
{
struct softnet_data *sd = data;
- __raise_softirq_irqoff(NET_RX_SOFTIRQ);
+ raise_softirq_no_wake(NET_RX_SOFTIRQ);
smp_store_release(&sd->defer_ipi_scheduled, 0);
}
@@ -4705,7 +4705,7 @@ static void napi_schedule_rps(struct softnet_data *sd)
* we have to raise NET_RX_SOFTIRQ.
*/
if (!mysd->in_net_rx_action && !mysd->in_napi_threaded_poll)
- __raise_softirq_irqoff(NET_RX_SOFTIRQ);
+ raise_softirq_no_wake(NET_RX_SOFTIRQ);
return;
}
#endif /* CONFIG_RPS */
@@ -6743,7 +6743,7 @@ static __latent_entropy void net_rx_action(struct softirq_action *h)
list_splice_tail(&repoll, &list);
list_splice(&list, &sd->poll_list);
if (!list_empty(&sd->poll_list))
- __raise_softirq_irqoff(NET_RX_SOFTIRQ);
+ raise_softirq_no_wake(NET_RX_SOFTIRQ);
else
sd->in_net_rx_action = false;
This makes the purpose of this function clearer. Fixes: cff9b2332ab7 ("kernel/sched: Modify initial boot task idle setup") Cc: Liam R. Howlett <Liam.Howlett@oracle.com> Cc: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Sebastian Siewior <bigeasy@linutronix.de> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Frederic Weisbecker <frederic@kernel.org> --- block/blk-mq.c | 2 +- include/linux/interrupt.h | 2 +- kernel/softirq.c | 6 +++--- lib/irq_poll.c | 4 ++-- net/core/dev.c | 8 ++++---- 5 files changed, 11 insertions(+), 11 deletions(-)