@@ -688,12 +688,12 @@ int on_each_cpu(void (*func) (void *info), void *info, int wait)
unsigned long flags;
int ret = 0;
- preempt_disable();
+ get_online_cpus_atomic();
ret = smp_call_function(func, info, wait);
local_irq_save(flags);
func(info);
local_irq_restore(flags);
- preempt_enable();
+ put_online_cpus_atomic();
return ret;
}
EXPORT_SYMBOL(on_each_cpu);
@@ -715,7 +715,11 @@ EXPORT_SYMBOL(on_each_cpu);
void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
void *info, bool wait)
{
- int cpu = get_cpu();
+ int cpu;
+
+ get_online_cpus_atomic();
+
+ cpu = smp_processor_id();
smp_call_function_many(mask, func, info, wait);
if (cpumask_test_cpu(cpu, mask)) {
@@ -723,7 +727,7 @@ void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
func(info);
local_irq_enable();
}
- put_cpu();
+ put_online_cpus_atomic();
}
EXPORT_SYMBOL(on_each_cpu_mask);
@@ -748,8 +752,9 @@ EXPORT_SYMBOL(on_each_cpu_mask);
* The function might sleep if the GFP flags indicates a non
* atomic allocation is allowed.
*
- * Preemption is disabled to protect against CPUs going offline but not online.
- * CPUs going online during the call will not be seen or sent an IPI.
+ * We use get/put_online_cpus_atomic() to prevent CPUs from going
+ * offline in-between our operation. CPUs coming online during the
+ * call will not be seen or sent an IPI.
*
* You must not call this function with disabled interrupts or
* from a hardware interrupt handler or from a bottom half handler.
@@ -764,26 +769,26 @@ void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
might_sleep_if(gfp_flags & __GFP_WAIT);
if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) {
- preempt_disable();
+ get_online_cpus_atomic();
for_each_online_cpu(cpu)
if (cond_func(cpu, info))
cpumask_set_cpu(cpu, cpus);
on_each_cpu_mask(cpus, func, info, wait);
- preempt_enable();
+ put_online_cpus_atomic();
free_cpumask_var(cpus);
} else {
/*
* No free cpumask, bother. No matter, we'll
* just have to IPI them one by one.
*/
- preempt_disable();
+ get_online_cpus_atomic();
for_each_online_cpu(cpu)
if (cond_func(cpu, info)) {
ret = smp_call_function_single(cpu, func,
info, wait);
WARN_ON_ONCE(!ret);
}
- preempt_enable();
+ put_online_cpus_atomic();
}
}
EXPORT_SYMBOL(on_each_cpu_cond);
Once stop_machine() is gone from the CPU offline path, we won't be able to depend on preempt_disable() to prevent CPUs from going offline from under us. Use the get/put_online_cpus_atomic() APIs to prevent CPUs from going offline, while invoking from atomic context. Signed-off-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com> --- kernel/smp.c | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-)