@@ -357,7 +357,7 @@ static void flush_tlb_mm_ipi(void *mm)
*/
void flush_tlb_mm(struct mm_struct *mm)
{
- preempt_disable();
+ get_online_cpus_atomic();
if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1);
@@ -369,7 +369,7 @@ void flush_tlb_mm(struct mm_struct *mm)
}
local_flush_tlb_mm(mm);
- preempt_enable();
+ put_online_cpus_atomic();
}
struct flush_tlb_data {
@@ -390,7 +390,7 @@ void flush_tlb_range(struct vm_area_struct *vma,
{
struct mm_struct *mm = vma->vm_mm;
- preempt_disable();
+ get_online_cpus_atomic();
if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
struct flush_tlb_data fd;
@@ -405,7 +405,7 @@ void flush_tlb_range(struct vm_area_struct *vma,
cpu_context(i, mm) = 0;
}
local_flush_tlb_range(vma, start, end);
- preempt_enable();
+ put_online_cpus_atomic();
}
static void flush_tlb_kernel_range_ipi(void *info)
@@ -433,7 +433,7 @@ static void flush_tlb_page_ipi(void *info)
void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
{
- preempt_disable();
+ get_online_cpus_atomic();
if ((atomic_read(&vma->vm_mm->mm_users) != 1) ||
(current->mm != vma->vm_mm)) {
struct flush_tlb_data fd;
@@ -448,7 +448,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
cpu_context(i, vma->vm_mm) = 0;
}
local_flush_tlb_page(vma, page);
- preempt_enable();
+ put_online_cpus_atomic();
}
static void flush_tlb_one_ipi(void *info)
Once stop_machine() is gone from the CPU offline path, we won't be able to depend on preempt_disable() or local_irq_disable() to prevent CPUs from going offline from under us. Use the get/put_online_cpus_atomic() APIs to prevent CPUs from going offline, while invoking from atomic context. Cc: Paul Mundt <lethal@linux-sh.org> Cc: linux-sh@vger.kernel.org Signed-off-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com> --- arch/sh/kernel/smp.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-)