mm/memory.c | 8 +++++---
1 file changed, 5 insertions(+), 3 deletions(-)
=================================================================
@@ -899,9 +899,10 @@ unsigned long unmap_vmas(struct mmu_gath
unsigned long start = start_addr;
spinlock_t *i_mmap_lock = details? details->i_mmap_lock: NULL;
int fullmm = (*tlbp)->fullmm;
- struct mm_struct *mm = vma->vm_mm;
+ struct mm_struct *mm = vma ? vma->vm_mm : NULL;
- mmu_notifier_invalidate_range_start(mm, start_addr, end_addr);
+ if (mm)
+ mmu_notifier_invalidate_range_start(mm, start_addr, end_addr);
for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) {
unsigned long end;
@@ -966,7 +967,8 @@ unsigned long unmap_vmas(struct mmu_gath
}
}
out:
- mmu_notifier_invalidate_range_end(mm, start_addr, end_addr);
+ if (mm)
+ mmu_notifier_invalidate_range_end(mm, start_addr, end_addr);
return start; /* which is now the end (or restart) address */
}
This cropped up in stress testing of a backport of the mmu notifier mechanism, however it still exists in 2.6.28.8 as well. Patch attached. Signed-off-by: john.cooper@redhat.com