@@ -2478,6 +2478,25 @@ static int ksm_enter(struct mm_struct *mm, struct vm_area_struct *vma,
return 0;
}
+static int ksm_leave(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end, unsigned long *vm_flags)
+{
+ int err;
+
+ if (!(*vm_flags & VM_MERGEABLE))
+ return 0; /* just ignore the advice */
+
+ if (vma->anon_vma) {
+ err = unmerge_ksm_pages(vma, start, end);
+ if (err)
+ return err;
+ }
+
+ *vm_flags &= ~VM_MERGEABLE;
+
+ return 0;
+}
+
int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
unsigned long end, int advice, unsigned long *vm_flags)
{
@@ -2492,16 +2511,9 @@ int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
break;
case MADV_UNMERGEABLE:
- if (!(*vm_flags & VM_MERGEABLE))
- return 0; /* just ignore the advice */
-
- if (vma->anon_vma) {
- err = unmerge_ksm_pages(vma, start, end);
- if (err)
- return err;
- }
-
- *vm_flags &= ~VM_MERGEABLE;
+ err = ksm_leave(vma, start, end, vm_flags);
+ if (err)
+ return err;
break;
}
Move MADV_UNMERGEABLE part of ksm_madvise() into a dedicated helper since it will be further used for unmerging VMAs forcibly. This does not bring any functional changes. Signed-off-by: Oleksandr Natalenko <oleksandr@redhat.com> --- mm/ksm.c | 32 ++++++++++++++++++++++---------- 1 file changed, 22 insertions(+), 10 deletions(-)