@@ -112,6 +112,7 @@ void send_ipi(const struct cpumask *cpumask, enum ipi_message_type msg)
unsigned long cpu;
unsigned long retval;
+ get_online_cpus_atomic();
local_irq_save(flags);
for_each_cpu(cpu, cpumask) {
@@ -128,6 +129,7 @@ void send_ipi(const struct cpumask *cpumask, enum ipi_message_type msg)
}
local_irq_restore(flags);
+ put_online_cpus_atomic();
}
static struct irqaction ipi_intdesc = {
@@ -241,9 +243,12 @@ void smp_send_reschedule(int cpu)
void smp_send_stop(void)
{
struct cpumask targets;
+
+ get_online_cpus_atomic();
cpumask_copy(&targets, cpu_online_mask);
cpumask_clear_cpu(smp_processor_id(), &targets);
send_ipi(&targets, IPI_CPU_STOP);
+ put_online_cpus_atomic();
}
void arch_send_call_function_single_ipi(int cpu)
Once stop_machine() is gone from the CPU offline path, we won't be able to depend on preempt_disable() or local_irq_disable() to prevent CPUs from going offline from under us. Use the get/put_online_cpus_atomic() APIs to prevent CPUs from going offline, while invoking from atomic context. Cc: Richard Kuo <rkuo@codeaurora.org> Cc: linux-hexagon@vger.kernel.org Signed-off-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com> --- arch/hexagon/kernel/smp.c | 5 +++++ 1 file changed, 5 insertions(+)