diff mbox series

[v2,11/15] mm/mmap: Track start and end of munmap in vma_munmap_struct

Message ID 20240625191145.3382793-12-Liam.Howlett@oracle.com (mailing list archive)
State New
Headers show
Series Avoid MAP_FIXED gap exposure | expand

Commit Message

Liam R. Howlett June 25, 2024, 7:11 p.m. UTC
From: "Liam R. Howlett" <Liam.Howlett@Oracle.com>

Set the start and end address for munmap when the prev and next are
gathered.  This is needed to avoid incorrect addresses being used during
the vms_complete_munmap_vmas() function if the prev/next vma are
expanded.

Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com>
---
 mm/internal.h |  2 ++
 mm/mmap.c     | 13 +++++++++----
 2 files changed, 11 insertions(+), 4 deletions(-)
diff mbox series

Patch

diff --git a/mm/internal.h b/mm/internal.h
index 90cab15c3b81..b0300cb22353 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -1492,6 +1492,8 @@  struct vma_munmap_struct {
 	struct list_head *uf;		/* Userfaultfd list_head */
 	unsigned long start;		/* Aligned start addr */
 	unsigned long end;		/* Aligned end addr */
+	unsigned long unmap_start;
+	unsigned long unmap_end;
 	int vma_count;			/* Number of vmas that will be removed */
 	unsigned long nr_pages;		/* Number of pages being removed */
 	unsigned long locked_vm;	/* Number of locked pages */
diff --git a/mm/mmap.c b/mm/mmap.c
index ecf55d32e804..5efcba084e12 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -525,6 +525,8 @@  static inline void init_vma_munmap(struct vma_munmap_struct *vms,
 	vms->vma_count = 0;
 	vms->nr_pages = vms->locked_vm = vms->nr_accounted = 0;
 	vms->exec_vm = vms->stack_vm = vms->data_vm = 0;
+	vms->unmap_start = FIRST_USER_ADDRESS;
+	vms->unmap_end = USER_PGTABLES_CEILING;
 }
 
 /*
@@ -2409,9 +2411,7 @@  static void unmap_region(struct mm_struct *mm, struct ma_state *mas,
 	update_hiwater_rss(mm);
 	unmap_vmas(&tlb, mas, vma, start, end, tree_end, mm_wr_locked);
 	mas_set(mas, mt_start);
-	free_pgtables(&tlb, mas, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
-				 next ? next->vm_start : USER_PGTABLES_CEILING,
-				 mm_wr_locked);
+	free_pgtables(&tlb, mas, vma, start, end, mm_wr_locked);
 	tlb_finish_mmu(&tlb);
 }
 
@@ -2637,7 +2637,8 @@  static void vms_complete_munmap_vmas(struct vma_munmap_struct *vms,
 	 */
 	mas_set(mas_detach, 1);
 	unmap_region(mm, mas_detach, vms->vma, vms->prev, vms->next,
-		     vms->start, vms->end, vms->vma_count, !vms->unlock);
+		     vms->unmap_start, vms->unmap_end, vms->vma_count,
+		     !vms->unlock);
 	/* Update high watermark before we lower total_vm */
 	update_hiwater_vm(mm);
 	/* Stat accounting */
@@ -2699,6 +2700,8 @@  static int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
 			goto start_split_failed;
 	}
 	vms->prev = vma_prev(vms->vmi);
+	if (vms->prev)
+		vms->unmap_start = vms->prev->vm_end;
 
 	/*
 	 * Detach a range of VMAs from the mm. Using next as a temp variable as
@@ -2757,6 +2760,8 @@  static int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
 	}
 
 	vms->next = vma_next(vms->vmi);
+	if (vms->next)
+		vms->unmap_end = vms->next->vm_start;
 
 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
 	/* Make sure no VMAs are about to be lost. */