diff mbox series

[RFC,4/5] mm/mmap: Change munmap to use vma_munmap_struct() for accounting and surrounding vmas

Message ID 20240531163217.1584450-5-Liam.Howlett@oracle.com (mailing list archive)
State RFC
Headers show
Series Avoid MAP_FIXED gap exposure | expand

Checks

Context Check Description
netdev/tree_selection success Not a local patch

Commit Message

Liam R. Howlett May 31, 2024, 4:32 p.m. UTC
Clean up the code by changing the munmap operation to use a structure
for the accounting and munmap variables.

Since remove_mt() is only called in one location and the contents will
be reduce to almost nothing.  The remains of the function can be added
to vms_complete_munmap_vmas().

Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
---
 mm/internal.h |  6 ++++
 mm/mmap.c     | 85 +++++++++++++++++++++++++++------------------------
 2 files changed, 51 insertions(+), 40 deletions(-)

Comments

Suren Baghdasaryan June 7, 2024, 2:38 p.m. UTC | #1
On Fri, May 31, 2024 at 9:33 AM Liam R. Howlett <Liam.Howlett@oracle.com> wrote:
>
> Clean up the code by changing the munmap operation to use a structure
> for the accounting and munmap variables.
>
> Since remove_mt() is only called in one location and the contents will
> be reduce to almost nothing.  The remains of the function can be added
> to vms_complete_munmap_vmas().
>
> Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
> ---
>  mm/internal.h |  6 ++++
>  mm/mmap.c     | 85 +++++++++++++++++++++++++++------------------------
>  2 files changed, 51 insertions(+), 40 deletions(-)
>
> diff --git a/mm/internal.h b/mm/internal.h
> index 6ebf77853d68..8c02ebf5736c 100644
> --- a/mm/internal.h
> +++ b/mm/internal.h
> @@ -1435,12 +1435,18 @@ struct vma_munmap_struct {
>         struct vma_iterator *vmi;
>         struct mm_struct *mm;
>         struct vm_area_struct *vma;     /* The first vma to munmap */
> +       struct vm_area_struct *next;    /* vma after the munmap area */
> +       struct vm_area_struct *prev;    /* vma before the munmap area */
>         struct list_head *uf;           /* Userfaultfd list_head */
>         unsigned long start;            /* Aligned start addr */
>         unsigned long end;              /* Aligned end addr */
>         int vma_count;                  /* Number of vmas that will be removed */
>         unsigned long nr_pages;         /* Number of pages being removed */
>         unsigned long locked_vm;        /* Number of locked pages */
> +       unsigned long nr_accounted;     /* Number of VM_ACCOUNT pages */
> +       unsigned long exec_vm;
> +       unsigned long stack_vm;
> +       unsigned long data_vm;
>         bool unlock;                    /* Unlock after the munmap */
>  };
>
> diff --git a/mm/mmap.c b/mm/mmap.c
> index 57f2383245ea..3e0930c09213 100644
> --- a/mm/mmap.c
> +++ b/mm/mmap.c
> @@ -482,7 +482,8 @@ static inline void init_vma_munmap(struct vma_munmap_struct *vms,
>         vms->unlock = unlock;
>         vms->uf = uf;
>         vms->vma_count = 0;
> -       vms->nr_pages = vms->locked_vm = 0;
> +       vms->nr_pages = vms->locked_vm = vms->nr_accounted = 0;
> +       vms->exec_vm = vms->stack_vm = vms->data_vm = 0;
>  }
>
>  /*
> @@ -604,7 +605,6 @@ static inline void vma_complete(struct vma_prepare *vp,
>         }
>         if (vp->insert && vp->file)
>                 uprobe_mmap(vp->insert);
> -       validate_mm(mm);

vma_complete() is used in places other than vma_shrink(). You
effectively removed validate_mm() for all those other users. Is that
intentional? If so, that should be documented in the changelog.

>  }
>
>  /*
> @@ -733,6 +733,7 @@ int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma,
>         vma_iter_clear(vmi);
>         vma_set_range(vma, start, end, pgoff);
>         vma_complete(&vp, vmi, vma->vm_mm);
> +       validate_mm(vma->vm_mm);
>         return 0;
>  }
>
> @@ -2347,30 +2348,6 @@ struct vm_area_struct *expand_stack(struct mm_struct *mm, unsigned long addr)
>         return vma;
>  }
>
> -/*
> - * Ok - we have the memory areas we should free on a maple tree so release them,
> - * and do the vma updates.
> - *
> - * Called with the mm semaphore held.
> - */
> -static inline void remove_mt(struct mm_struct *mm, struct ma_state *mas)
> -{
> -       unsigned long nr_accounted = 0;
> -       struct vm_area_struct *vma;
> -
> -       /* Update high watermark before we lower total_vm */
> -       update_hiwater_vm(mm);
> -       mas_for_each(mas, vma, ULONG_MAX) {
> -               long nrpages = vma_pages(vma);
> -
> -               if (vma->vm_flags & VM_ACCOUNT)
> -                       nr_accounted += nrpages;
> -               vm_stat_account(mm, vma->vm_flags, -nrpages);
> -               remove_vma(vma, false);
> -       }
> -       vm_unacct_memory(nr_accounted);
> -}
> -
>  /*
>   * Get rid of page table information in the indicated region.
>   *
> @@ -2625,13 +2602,14 @@ static int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
>                 if (error)
>                         goto start_split_failed;
>         }
> +       vms->prev = vma_prev(vms->vmi);
>
>         /*
>          * Detach a range of VMAs from the mm. Using next as a temp variable as
>          * it is always overwritten.
>          */
> -       next = vms->vma;
> -       do {
> +       for_each_vma_range(*(vms->vmi), next, vms->end) {
> +               long nrpages;
>                 /* Does it split the end? */
>                 if (next->vm_end > vms->end) {
>                         error = __split_vma(vms->vmi, next, vms->end, 0);
> @@ -2640,8 +2618,21 @@ static int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
>                 }
>                 vma_start_write(next);
>                 mas_set(mas_detach, vms->vma_count++);
> +               nrpages = vma_pages(next);
> +
> +               vms->nr_pages += nrpages;
>                 if (next->vm_flags & VM_LOCKED)
> -                       vms->locked_vm += vma_pages(next);
> +                       vms->locked_vm += nrpages;
> +
> +               if (next->vm_flags & VM_ACCOUNT)
> +                       vms->nr_accounted += nrpages;
> +
> +               if (is_exec_mapping(next->vm_flags))
> +                       vms->exec_vm += nrpages;
> +               else if (is_stack_mapping(next->vm_flags))
> +                       vms->stack_vm += nrpages;
> +               else if (is_data_mapping(next->vm_flags))
> +                       vms->data_vm += nrpages;
>
>                 error = mas_store_gfp(mas_detach, next, GFP_KERNEL);
>                 if (error)
> @@ -2667,7 +2658,9 @@ static int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
>                 BUG_ON(next->vm_start < vms->start);
>                 BUG_ON(next->vm_start > vms->end);
>  #endif
> -       } for_each_vma_range(*(vms->vmi), next, vms->end);
> +       }
> +
> +       vms->next = vma_next(vms->vmi);
>
>  #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
>         /* Make sure no VMAs are about to be lost. */
> @@ -2712,10 +2705,11 @@ static int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
>   * @mas_detach: The maple state of the detached vmas
>   *
>   */
> +static inline void vms_vm_stat_account(struct vma_munmap_struct *vms);
>  static void vms_complete_munmap_vmas(struct vma_munmap_struct *vms,
>                 struct ma_state *mas_detach)
>  {
> -       struct vm_area_struct *prev, *next;
> +       struct vm_area_struct *vma;
>         struct mm_struct *mm;
>
>         mm = vms->mm;
> @@ -2724,21 +2718,21 @@ static void vms_complete_munmap_vmas(struct vma_munmap_struct *vms,
>         if (vms->unlock)
>                 mmap_write_downgrade(mm);
>
> -       prev = vma_iter_prev_range(vms->vmi);
> -       next = vma_next(vms->vmi);
> -       if (next)
> -               vma_iter_prev_range(vms->vmi);
> -
>         /*
>          * We can free page tables without write-locking mmap_lock because VMAs
>          * were isolated before we downgraded mmap_lock.
>          */
>         mas_set(mas_detach, 1);
> -       unmap_region(mm, mas_detach, vms->vma, prev, next, vms->start, vms->end,
> -                    vms->vma_count, !vms->unlock);
> -       /* Statistics and freeing VMAs */
> +       unmap_region(mm, mas_detach, vms->vma, vms->prev, vms->next,
> +                    vms->start, vms->end, vms->vma_count, !vms->unlock);
> +       /* Update high watermark before we lower total_vm */
> +       update_hiwater_vm(mm);
> +       vms_vm_stat_account(vms);
>         mas_set(mas_detach, 0);
> -       remove_mt(mm, mas_detach);
> +       mas_for_each(mas_detach, vma, ULONG_MAX)
> +               remove_vma(vma, false);
> +
> +       vm_unacct_memory(vms->nr_accounted);
>         validate_mm(mm);
>         if (vms->unlock)
>                 mmap_read_unlock(mm);
> @@ -3631,6 +3625,17 @@ void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, long npages)
>                 mm->data_vm += npages;
>  }
>
> +/* Accounting for munmap */
> +static inline void vms_vm_stat_account(struct vma_munmap_struct *vms)
> +{
> +       struct mm_struct *mm = vms->mm;
> +
> +       WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm) - vms->nr_pages);
> +       mm->exec_vm -= vms->exec_vm;
> +       mm->stack_vm -= vms->stack_vm;
> +       mm->data_vm -= vms->data_vm;
> +}
> +
>  static vm_fault_t special_mapping_fault(struct vm_fault *vmf);
>
>  /*
> --
> 2.43.0
>
Liam R. Howlett June 7, 2024, 3:24 p.m. UTC | #2
* Suren Baghdasaryan <surenb@google.com> [240607 10:38]:
> On Fri, May 31, 2024 at 9:33 AM Liam R. Howlett <Liam.Howlett@oracle.com> wrote:
> >
> > Clean up the code by changing the munmap operation to use a structure
> > for the accounting and munmap variables.
> >
> > Since remove_mt() is only called in one location and the contents will
> > be reduce to almost nothing.  The remains of the function can be added
> > to vms_complete_munmap_vmas().
> >
> > Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
> > ---
> >  mm/internal.h |  6 ++++
> >  mm/mmap.c     | 85 +++++++++++++++++++++++++++------------------------
> >  2 files changed, 51 insertions(+), 40 deletions(-)
> >
> > diff --git a/mm/internal.h b/mm/internal.h
> > index 6ebf77853d68..8c02ebf5736c 100644
> > --- a/mm/internal.h
> > +++ b/mm/internal.h
> > @@ -1435,12 +1435,18 @@ struct vma_munmap_struct {
> >         struct vma_iterator *vmi;
> >         struct mm_struct *mm;
> >         struct vm_area_struct *vma;     /* The first vma to munmap */
> > +       struct vm_area_struct *next;    /* vma after the munmap area */
> > +       struct vm_area_struct *prev;    /* vma before the munmap area */
> >         struct list_head *uf;           /* Userfaultfd list_head */
> >         unsigned long start;            /* Aligned start addr */
> >         unsigned long end;              /* Aligned end addr */
> >         int vma_count;                  /* Number of vmas that will be removed */
> >         unsigned long nr_pages;         /* Number of pages being removed */
> >         unsigned long locked_vm;        /* Number of locked pages */
> > +       unsigned long nr_accounted;     /* Number of VM_ACCOUNT pages */
> > +       unsigned long exec_vm;
> > +       unsigned long stack_vm;
> > +       unsigned long data_vm;
> >         bool unlock;                    /* Unlock after the munmap */
> >  };
> >
> > diff --git a/mm/mmap.c b/mm/mmap.c
> > index 57f2383245ea..3e0930c09213 100644
> > --- a/mm/mmap.c
> > +++ b/mm/mmap.c
> > @@ -482,7 +482,8 @@ static inline void init_vma_munmap(struct vma_munmap_struct *vms,
> >         vms->unlock = unlock;
> >         vms->uf = uf;
> >         vms->vma_count = 0;
> > -       vms->nr_pages = vms->locked_vm = 0;
> > +       vms->nr_pages = vms->locked_vm = vms->nr_accounted = 0;
> > +       vms->exec_vm = vms->stack_vm = vms->data_vm = 0;
> >  }
> >
> >  /*
> > @@ -604,7 +605,6 @@ static inline void vma_complete(struct vma_prepare *vp,
> >         }
> >         if (vp->insert && vp->file)
> >                 uprobe_mmap(vp->insert);
> > -       validate_mm(mm);
> 
> vma_complete() is used in places other than vma_shrink(). You
> effectively removed validate_mm() for all those other users. Is that
> intentional? If so, that should be documented in the changelog.

Oh, right.  Yes.  This needs to be extracted into its own patch.

We cannot validate_mm() in vma_complete() due to vma_expand() being
called prior to the completion of the MAP_FIXED munmap.

I will extract this into its own patch for the next revision.

> 
> >  }
> >
> >  /*
> > @@ -733,6 +733,7 @@ int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma,
> >         vma_iter_clear(vmi);
> >         vma_set_range(vma, start, end, pgoff);
> >         vma_complete(&vp, vmi, vma->vm_mm);
> > +       validate_mm(vma->vm_mm);
> >         return 0;
> >  }
> >
> > @@ -2347,30 +2348,6 @@ struct vm_area_struct *expand_stack(struct mm_struct *mm, unsigned long addr)
> >         return vma;
> >  }
> >
> > -/*
> > - * Ok - we have the memory areas we should free on a maple tree so release them,
> > - * and do the vma updates.
> > - *
> > - * Called with the mm semaphore held.
> > - */
> > -static inline void remove_mt(struct mm_struct *mm, struct ma_state *mas)
> > -{
> > -       unsigned long nr_accounted = 0;
> > -       struct vm_area_struct *vma;
> > -
> > -       /* Update high watermark before we lower total_vm */
> > -       update_hiwater_vm(mm);
> > -       mas_for_each(mas, vma, ULONG_MAX) {
> > -               long nrpages = vma_pages(vma);
> > -
> > -               if (vma->vm_flags & VM_ACCOUNT)
> > -                       nr_accounted += nrpages;
> > -               vm_stat_account(mm, vma->vm_flags, -nrpages);
> > -               remove_vma(vma, false);
> > -       }
> > -       vm_unacct_memory(nr_accounted);
> > -}
> > -
> >  /*
> >   * Get rid of page table information in the indicated region.
> >   *
> > @@ -2625,13 +2602,14 @@ static int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
> >                 if (error)
> >                         goto start_split_failed;
> >         }
> > +       vms->prev = vma_prev(vms->vmi);
> >
> >         /*
> >          * Detach a range of VMAs from the mm. Using next as a temp variable as
> >          * it is always overwritten.
> >          */
> > -       next = vms->vma;
> > -       do {
> > +       for_each_vma_range(*(vms->vmi), next, vms->end) {
> > +               long nrpages;
> >                 /* Does it split the end? */
> >                 if (next->vm_end > vms->end) {
> >                         error = __split_vma(vms->vmi, next, vms->end, 0);
> > @@ -2640,8 +2618,21 @@ static int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
> >                 }
> >                 vma_start_write(next);
> >                 mas_set(mas_detach, vms->vma_count++);
> > +               nrpages = vma_pages(next);
> > +
> > +               vms->nr_pages += nrpages;
> >                 if (next->vm_flags & VM_LOCKED)
> > -                       vms->locked_vm += vma_pages(next);
> > +                       vms->locked_vm += nrpages;
> > +
> > +               if (next->vm_flags & VM_ACCOUNT)
> > +                       vms->nr_accounted += nrpages;
> > +
> > +               if (is_exec_mapping(next->vm_flags))
> > +                       vms->exec_vm += nrpages;
> > +               else if (is_stack_mapping(next->vm_flags))
> > +                       vms->stack_vm += nrpages;
> > +               else if (is_data_mapping(next->vm_flags))
> > +                       vms->data_vm += nrpages;
> >
> >                 error = mas_store_gfp(mas_detach, next, GFP_KERNEL);
> >                 if (error)
> > @@ -2667,7 +2658,9 @@ static int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
> >                 BUG_ON(next->vm_start < vms->start);
> >                 BUG_ON(next->vm_start > vms->end);
> >  #endif
> > -       } for_each_vma_range(*(vms->vmi), next, vms->end);
> > +       }
> > +
> > +       vms->next = vma_next(vms->vmi);
> >
> >  #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
> >         /* Make sure no VMAs are about to be lost. */
> > @@ -2712,10 +2705,11 @@ static int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
> >   * @mas_detach: The maple state of the detached vmas
> >   *
> >   */
> > +static inline void vms_vm_stat_account(struct vma_munmap_struct *vms);
> >  static void vms_complete_munmap_vmas(struct vma_munmap_struct *vms,
> >                 struct ma_state *mas_detach)
> >  {
> > -       struct vm_area_struct *prev, *next;
> > +       struct vm_area_struct *vma;
> >         struct mm_struct *mm;
> >
> >         mm = vms->mm;
> > @@ -2724,21 +2718,21 @@ static void vms_complete_munmap_vmas(struct vma_munmap_struct *vms,
> >         if (vms->unlock)
> >                 mmap_write_downgrade(mm);
> >
> > -       prev = vma_iter_prev_range(vms->vmi);
> > -       next = vma_next(vms->vmi);
> > -       if (next)
> > -               vma_iter_prev_range(vms->vmi);
> > -
> >         /*
> >          * We can free page tables without write-locking mmap_lock because VMAs
> >          * were isolated before we downgraded mmap_lock.
> >          */
> >         mas_set(mas_detach, 1);
> > -       unmap_region(mm, mas_detach, vms->vma, prev, next, vms->start, vms->end,
> > -                    vms->vma_count, !vms->unlock);
> > -       /* Statistics and freeing VMAs */
> > +       unmap_region(mm, mas_detach, vms->vma, vms->prev, vms->next,
> > +                    vms->start, vms->end, vms->vma_count, !vms->unlock);
> > +       /* Update high watermark before we lower total_vm */
> > +       update_hiwater_vm(mm);
> > +       vms_vm_stat_account(vms);
> >         mas_set(mas_detach, 0);
> > -       remove_mt(mm, mas_detach);
> > +       mas_for_each(mas_detach, vma, ULONG_MAX)
> > +               remove_vma(vma, false);
> > +
> > +       vm_unacct_memory(vms->nr_accounted);
> >         validate_mm(mm);
> >         if (vms->unlock)
> >                 mmap_read_unlock(mm);
> > @@ -3631,6 +3625,17 @@ void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, long npages)
> >                 mm->data_vm += npages;
> >  }
> >
> > +/* Accounting for munmap */
> > +static inline void vms_vm_stat_account(struct vma_munmap_struct *vms)
> > +{
> > +       struct mm_struct *mm = vms->mm;
> > +
> > +       WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm) - vms->nr_pages);
> > +       mm->exec_vm -= vms->exec_vm;
> > +       mm->stack_vm -= vms->stack_vm;
> > +       mm->data_vm -= vms->data_vm;
> > +}
> > +
> >  static vm_fault_t special_mapping_fault(struct vm_fault *vmf);
> >
> >  /*
> > --
> > 2.43.0
> >
>
diff mbox series

Patch

diff --git a/mm/internal.h b/mm/internal.h
index 6ebf77853d68..8c02ebf5736c 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -1435,12 +1435,18 @@  struct vma_munmap_struct {
 	struct vma_iterator *vmi;
 	struct mm_struct *mm;
 	struct vm_area_struct *vma;	/* The first vma to munmap */
+	struct vm_area_struct *next;	/* vma after the munmap area */
+	struct vm_area_struct *prev;    /* vma before the munmap area */
 	struct list_head *uf;		/* Userfaultfd list_head */
 	unsigned long start;		/* Aligned start addr */
 	unsigned long end;		/* Aligned end addr */
 	int vma_count;			/* Number of vmas that will be removed */
 	unsigned long nr_pages;		/* Number of pages being removed */
 	unsigned long locked_vm;	/* Number of locked pages */
+	unsigned long nr_accounted;	/* Number of VM_ACCOUNT pages */
+	unsigned long exec_vm;
+	unsigned long stack_vm;
+	unsigned long data_vm;
 	bool unlock;			/* Unlock after the munmap */
 };
 
diff --git a/mm/mmap.c b/mm/mmap.c
index 57f2383245ea..3e0930c09213 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -482,7 +482,8 @@  static inline void init_vma_munmap(struct vma_munmap_struct *vms,
 	vms->unlock = unlock;
 	vms->uf = uf;
 	vms->vma_count = 0;
-	vms->nr_pages = vms->locked_vm = 0;
+	vms->nr_pages = vms->locked_vm = vms->nr_accounted = 0;
+	vms->exec_vm = vms->stack_vm = vms->data_vm = 0;
 }
 
 /*
@@ -604,7 +605,6 @@  static inline void vma_complete(struct vma_prepare *vp,
 	}
 	if (vp->insert && vp->file)
 		uprobe_mmap(vp->insert);
-	validate_mm(mm);
 }
 
 /*
@@ -733,6 +733,7 @@  int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma,
 	vma_iter_clear(vmi);
 	vma_set_range(vma, start, end, pgoff);
 	vma_complete(&vp, vmi, vma->vm_mm);
+	validate_mm(vma->vm_mm);
 	return 0;
 }
 
@@ -2347,30 +2348,6 @@  struct vm_area_struct *expand_stack(struct mm_struct *mm, unsigned long addr)
 	return vma;
 }
 
-/*
- * Ok - we have the memory areas we should free on a maple tree so release them,
- * and do the vma updates.
- *
- * Called with the mm semaphore held.
- */
-static inline void remove_mt(struct mm_struct *mm, struct ma_state *mas)
-{
-	unsigned long nr_accounted = 0;
-	struct vm_area_struct *vma;
-
-	/* Update high watermark before we lower total_vm */
-	update_hiwater_vm(mm);
-	mas_for_each(mas, vma, ULONG_MAX) {
-		long nrpages = vma_pages(vma);
-
-		if (vma->vm_flags & VM_ACCOUNT)
-			nr_accounted += nrpages;
-		vm_stat_account(mm, vma->vm_flags, -nrpages);
-		remove_vma(vma, false);
-	}
-	vm_unacct_memory(nr_accounted);
-}
-
 /*
  * Get rid of page table information in the indicated region.
  *
@@ -2625,13 +2602,14 @@  static int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
 		if (error)
 			goto start_split_failed;
 	}
+	vms->prev = vma_prev(vms->vmi);
 
 	/*
 	 * Detach a range of VMAs from the mm. Using next as a temp variable as
 	 * it is always overwritten.
 	 */
-	next = vms->vma;
-	do {
+	for_each_vma_range(*(vms->vmi), next, vms->end) {
+		long nrpages;
 		/* Does it split the end? */
 		if (next->vm_end > vms->end) {
 			error = __split_vma(vms->vmi, next, vms->end, 0);
@@ -2640,8 +2618,21 @@  static int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
 		}
 		vma_start_write(next);
 		mas_set(mas_detach, vms->vma_count++);
+		nrpages = vma_pages(next);
+
+		vms->nr_pages += nrpages;
 		if (next->vm_flags & VM_LOCKED)
-			vms->locked_vm += vma_pages(next);
+			vms->locked_vm += nrpages;
+
+		if (next->vm_flags & VM_ACCOUNT)
+			vms->nr_accounted += nrpages;
+
+		if (is_exec_mapping(next->vm_flags))
+			vms->exec_vm += nrpages;
+		else if (is_stack_mapping(next->vm_flags))
+			vms->stack_vm += nrpages;
+		else if (is_data_mapping(next->vm_flags))
+			vms->data_vm += nrpages;
 
 		error = mas_store_gfp(mas_detach, next, GFP_KERNEL);
 		if (error)
@@ -2667,7 +2658,9 @@  static int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
 		BUG_ON(next->vm_start < vms->start);
 		BUG_ON(next->vm_start > vms->end);
 #endif
-	} for_each_vma_range(*(vms->vmi), next, vms->end);
+	}
+
+	vms->next = vma_next(vms->vmi);
 
 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
 	/* Make sure no VMAs are about to be lost. */
@@ -2712,10 +2705,11 @@  static int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
  * @mas_detach: The maple state of the detached vmas
  *
  */
+static inline void vms_vm_stat_account(struct vma_munmap_struct *vms);
 static void vms_complete_munmap_vmas(struct vma_munmap_struct *vms,
 		struct ma_state *mas_detach)
 {
-	struct vm_area_struct *prev, *next;
+	struct vm_area_struct *vma;
 	struct mm_struct *mm;
 
 	mm = vms->mm;
@@ -2724,21 +2718,21 @@  static void vms_complete_munmap_vmas(struct vma_munmap_struct *vms,
 	if (vms->unlock)
 		mmap_write_downgrade(mm);
 
-	prev = vma_iter_prev_range(vms->vmi);
-	next = vma_next(vms->vmi);
-	if (next)
-		vma_iter_prev_range(vms->vmi);
-
 	/*
 	 * We can free page tables without write-locking mmap_lock because VMAs
 	 * were isolated before we downgraded mmap_lock.
 	 */
 	mas_set(mas_detach, 1);
-	unmap_region(mm, mas_detach, vms->vma, prev, next, vms->start, vms->end,
-		     vms->vma_count, !vms->unlock);
-	/* Statistics and freeing VMAs */
+	unmap_region(mm, mas_detach, vms->vma, vms->prev, vms->next,
+		     vms->start, vms->end, vms->vma_count, !vms->unlock);
+	/* Update high watermark before we lower total_vm */
+	update_hiwater_vm(mm);
+	vms_vm_stat_account(vms);
 	mas_set(mas_detach, 0);
-	remove_mt(mm, mas_detach);
+	mas_for_each(mas_detach, vma, ULONG_MAX)
+		remove_vma(vma, false);
+
+	vm_unacct_memory(vms->nr_accounted);
 	validate_mm(mm);
 	if (vms->unlock)
 		mmap_read_unlock(mm);
@@ -3631,6 +3625,17 @@  void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, long npages)
 		mm->data_vm += npages;
 }
 
+/* Accounting for munmap */
+static inline void vms_vm_stat_account(struct vma_munmap_struct *vms)
+{
+	struct mm_struct *mm = vms->mm;
+
+	WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm) - vms->nr_pages);
+	mm->exec_vm -= vms->exec_vm;
+	mm->stack_vm -= vms->stack_vm;
+	mm->data_vm -= vms->data_vm;
+}
+
 static vm_fault_t special_mapping_fault(struct vm_fault *vmf);
 
 /*