@@ -291,6 +291,7 @@ static inline bool is_data_mapping(vm_fl
/* mm/util.c */
void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
struct vm_area_struct *prev, struct rb_node *rb_parent);
+void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma);
#ifdef CONFIG_MMU
extern long populate_vma_page_range(struct vm_area_struct *vma,
@@ -686,18 +686,8 @@ static __always_inline void __vma_unlink
struct vm_area_struct *vma,
struct vm_area_struct *ignore)
{
- struct vm_area_struct *prev, *next;
-
vma_rb_erase_ignore(vma, &mm->mm_rb, ignore);
- next = vma->vm_next;
- prev = vma->vm_prev;
- if (prev)
- prev->vm_next = next;
- else
- mm->mmap = next;
- if (next)
- next->vm_prev = prev;
-
+ __vma_unlink_list(mm, vma);
/* Kill the cache */
vmacache_invalidate(mm);
}
@@ -684,13 +684,7 @@ static void delete_vma_from_mm(struct vm
/* remove from the MM's tree and list */
rb_erase(&vma->vm_rb, &mm->mm_rb);
- if (vma->vm_prev)
- vma->vm_prev->vm_next = vma->vm_next;
- else
- mm->mmap = vma->vm_next;
-
- if (vma->vm_next)
- vma->vm_next->vm_prev = vma->vm_prev;
+ __vma_unlink_list(mm, vma);
}
/*
@@ -292,6 +292,20 @@ void __vma_link_list(struct mm_struct *m
next->vm_prev = vma;
}
+void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma)
+{
+ struct vm_area_struct *prev, *next;
+
+ next = vma->vm_next;
+ prev = vma->vm_prev;
+ if (prev)
+ prev->vm_next = next;
+ else
+ mm->mmap = next;
+ if (next)
+ next->vm_prev = prev;
+}
+
/* Check if the vma is being used as a stack by this task */
int vma_is_stack_for_current(struct vm_area_struct *vma)
{