@@ -1063,6 +1063,7 @@ static int unmerge_and_remove_all_rmap_items(void)
mm_slot_free(mm_slot_cache, mm_slot);
clear_bit(MMF_VM_MERGEABLE, &mm->flags);
+ clear_bit(MMF_VM_MERGE_ANY, &mm->flags);
mmdrop(mm);
} else
spin_unlock(&ksm_mmlist_lock);
@@ -2329,6 +2330,17 @@ static struct ksm_rmap_item *get_next_rmap_item(struct ksm_mm_slot *mm_slot,
return rmap_item;
}
+static bool vma_ksm_mergeable(struct vm_area_struct *vma)
+{
+ if (vma->vm_flags & VM_MERGEABLE)
+ return true;
+
+ if (test_bit(MMF_VM_MERGE_ANY, &vma->vm_mm->flags))
+ return true;
+
+ return false;
+}
+
static struct ksm_rmap_item *scan_get_next_rmap_item(struct page **page)
{
struct mm_struct *mm;
@@ -2405,8 +2417,20 @@ static struct ksm_rmap_item *scan_get_next_rmap_item(struct page **page)
goto no_vmas;
for_each_vma(vmi, vma) {
- if (!(vma->vm_flags & VM_MERGEABLE))
+ if (!vma_ksm_mergeable(vma))
continue;
+ if (!(vma->vm_flags & VM_MERGEABLE)) {
+ unsigned long flags = vma->vm_flags;
+
+ /* madvise failed, use next vma */
+ if (ksm_madvise(vma, vma->vm_start, vma->vm_end, MADV_MERGEABLE, &flags))
+ continue;
+ /* vma, not supported as being mergeable */
+ if (!(flags & VM_MERGEABLE))
+ continue;
+
+ vm_flags_set(vma, VM_MERGEABLE);
+ }
if (ksm_scan.address < vma->vm_start)
ksm_scan.address = vma->vm_start;
if (!vma->anon_vma)
@@ -2491,6 +2515,7 @@ static struct ksm_rmap_item *scan_get_next_rmap_item(struct page **page)
mm_slot_free(mm_slot_cache, mm_slot);
clear_bit(MMF_VM_MERGEABLE, &mm->flags);
+ clear_bit(MMF_VM_MERGE_ANY, &mm->flags);
mmap_read_unlock(mm);
mmdrop(mm);
} else {
If the new flag MMF_VM_MERGE_ANY has been set for a process, iterate over all the vmas and enable ksm if possible. For the vmas that can be ksm enabled this is only done once. Signed-off-by: Stefan Roesch <shr@devkernel.io> --- mm/ksm.c | 27 ++++++++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-)