diff mbox series

[mmotm] mm/thp: fix NR_FILE_MAPPED accounting in page_*_file_rmap()

Message ID e02e52a1-8550-a57c-ed29-f51191ea2375@google.com (mailing list archive)
State New
Headers show
Series [mmotm] mm/thp: fix NR_FILE_MAPPED accounting in page_*_file_rmap() | expand

Commit Message

Hugh Dickins March 3, 2022, 1:46 a.m. UTC
NR_FILE_MAPPED accounting in mm/rmap.c (for /proc/meminfo "Mapped" and
/proc/vmstat "nr_mapped" and the memcg's memory.stat "mapped_file") is
slightly flawed for file or shmem huge pages.

It is well thought out, and looks convincing, but there's a racy case
when the careful counting in page_remove_file_rmap() (without page lock)
gets discarded.  So that in a workload like two "make -j20" kernel builds
under memory pressure, with cc1 on hugepage text, "Mapped" can easily
grow by a spurious 5MB or more on each iteration, ending up implausibly
bigger than most other numbers in /proc/meminfo.  And, hypothetically,
might grow to the point of seriously interfering in mm/vmscan.c's
heuristics, which do take NR_FILE_MAPPED into some consideration.

Fixed by moving the __mod_lruvec_page_state() down to where it will not
be missed before return (and I've grown a bit tired of that oft-repeated
but-not-everywhere comment on the __ness: it gets lost in the move here).

Does page_add_file_rmap() need the same change?  I suspect not, because
page lock is held in all relevant cases, and its skipping case looks safe;
but it's much easier to be sure, if we do make the same change.

Fixes: dd78fedde4b9 ("rmap: support file thp")
Signed-off-by: Hugh Dickins <hughd@google.com>
---
If this were thought serious enough to backport (I don't feel strongly,
but it is something I keep in my own trees), it needs a little more care
near "out", because the mm/munlock series has removed some action there.

 mm/rmap.c | 31 ++++++++++++++-----------------
 1 file changed, 14 insertions(+), 17 deletions(-)

Comments

Yang Shi March 3, 2022, 8:08 p.m. UTC | #1
On Wed, Mar 2, 2022 at 5:46 PM Hugh Dickins <hughd@google.com> wrote:
>
> NR_FILE_MAPPED accounting in mm/rmap.c (for /proc/meminfo "Mapped" and
> /proc/vmstat "nr_mapped" and the memcg's memory.stat "mapped_file") is
> slightly flawed for file or shmem huge pages.
>
> It is well thought out, and looks convincing, but there's a racy case
> when the careful counting in page_remove_file_rmap() (without page lock)
> gets discarded.  So that in a workload like two "make -j20" kernel builds
> under memory pressure, with cc1 on hugepage text, "Mapped" can easily
> grow by a spurious 5MB or more on each iteration, ending up implausibly
> bigger than most other numbers in /proc/meminfo.  And, hypothetically,
> might grow to the point of seriously interfering in mm/vmscan.c's
> heuristics, which do take NR_FILE_MAPPED into some consideration.
>
> Fixed by moving the __mod_lruvec_page_state() down to where it will not
> be missed before return (and I've grown a bit tired of that oft-repeated
> but-not-everywhere comment on the __ness: it gets lost in the move here).
>
> Does page_add_file_rmap() need the same change?  I suspect not, because
> page lock is held in all relevant cases, and its skipping case looks safe;
> but it's much easier to be sure, if we do make the same change.
>
> Fixes: dd78fedde4b9 ("rmap: support file thp")
> Signed-off-by: Hugh Dickins <hughd@google.com>

Reviewed-by: Yang Shi <shy828301@gmail.com>

> ---
> If this were thought serious enough to backport (I don't feel strongly,
> but it is something I keep in my own trees), it needs a little more care
> near "out", because the mm/munlock series has removed some action there.
>
>  mm/rmap.c | 31 ++++++++++++++-----------------
>  1 file changed, 14 insertions(+), 17 deletions(-)
>
> --- a/mm/rmap.c
> +++ b/mm/rmap.c
> @@ -1238,14 +1238,14 @@ void page_add_new_anon_rmap(struct page *page,
>  void page_add_file_rmap(struct page *page,
>         struct vm_area_struct *vma, bool compound)
>  {
> -       int i, nr = 1;
> +       int i, nr = 0;
>
>         VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page);
>         lock_page_memcg(page);
>         if (compound && PageTransHuge(page)) {
>                 int nr_pages = thp_nr_pages(page);
>
> -               for (i = 0, nr = 0; i < nr_pages; i++) {
> +               for (i = 0; i < nr_pages; i++) {
>                         if (atomic_inc_and_test(&page[i]._mapcount))
>                                 nr++;
>                 }
> @@ -1262,11 +1262,12 @@ void page_add_file_rmap(struct page *page,
>                         VM_WARN_ON_ONCE(!PageLocked(page));
>                         SetPageDoubleMap(compound_head(page));
>                 }
> -               if (!atomic_inc_and_test(&page->_mapcount))
> -                       goto out;
> +               if (atomic_inc_and_test(&page->_mapcount))
> +                       nr++;
>         }
> -       __mod_lruvec_page_state(page, NR_FILE_MAPPED, nr);
>  out:
> +       if (nr)
> +               __mod_lruvec_page_state(page, NR_FILE_MAPPED, nr);
>         unlock_page_memcg(page);
>
>         mlock_vma_page(page, vma, compound);
> @@ -1274,7 +1275,7 @@ void page_add_file_rmap(struct page *page,
>
>  static void page_remove_file_rmap(struct page *page, bool compound)
>  {
> -       int i, nr = 1;
> +       int i, nr = 0;
>
>         VM_BUG_ON_PAGE(compound && !PageHead(page), page);
>
> @@ -1289,12 +1290,12 @@ static void page_remove_file_rmap(struct page *page, bool compound)
>         if (compound && PageTransHuge(page)) {
>                 int nr_pages = thp_nr_pages(page);
>
> -               for (i = 0, nr = 0; i < nr_pages; i++) {
> +               for (i = 0; i < nr_pages; i++) {
>                         if (atomic_add_negative(-1, &page[i]._mapcount))
>                                 nr++;
>                 }
>                 if (!atomic_add_negative(-1, compound_mapcount_ptr(page)))
> -                       return;
> +                       goto out;
>                 if (PageSwapBacked(page))
>                         __mod_lruvec_page_state(page, NR_SHMEM_PMDMAPPED,
>                                                 -nr_pages);
> @@ -1302,16 +1303,12 @@ static void page_remove_file_rmap(struct page *page, bool compound)
>                         __mod_lruvec_page_state(page, NR_FILE_PMDMAPPED,
>                                                 -nr_pages);
>         } else {
> -               if (!atomic_add_negative(-1, &page->_mapcount))
> -                       return;
> +               if (atomic_add_negative(-1, &page->_mapcount))
> +                       nr++;
>         }
> -
> -       /*
> -        * We use the irq-unsafe __{inc|mod}_lruvec_page_state because
> -        * these counters are not modified in interrupt context, and
> -        * pte lock(a spinlock) is held, which implies preemption disabled.
> -        */
> -       __mod_lruvec_page_state(page, NR_FILE_MAPPED, -nr);
> +out:
> +       if (nr)
> +               __mod_lruvec_page_state(page, NR_FILE_MAPPED, -nr);
>  }
>
>  static void page_remove_anon_compound_rmap(struct page *page)
diff mbox series

Patch

--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1238,14 +1238,14 @@  void page_add_new_anon_rmap(struct page *page,
 void page_add_file_rmap(struct page *page,
 	struct vm_area_struct *vma, bool compound)
 {
-	int i, nr = 1;
+	int i, nr = 0;
 
 	VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page);
 	lock_page_memcg(page);
 	if (compound && PageTransHuge(page)) {
 		int nr_pages = thp_nr_pages(page);
 
-		for (i = 0, nr = 0; i < nr_pages; i++) {
+		for (i = 0; i < nr_pages; i++) {
 			if (atomic_inc_and_test(&page[i]._mapcount))
 				nr++;
 		}
@@ -1262,11 +1262,12 @@  void page_add_file_rmap(struct page *page,
 			VM_WARN_ON_ONCE(!PageLocked(page));
 			SetPageDoubleMap(compound_head(page));
 		}
-		if (!atomic_inc_and_test(&page->_mapcount))
-			goto out;
+		if (atomic_inc_and_test(&page->_mapcount))
+			nr++;
 	}
-	__mod_lruvec_page_state(page, NR_FILE_MAPPED, nr);
 out:
+	if (nr)
+		__mod_lruvec_page_state(page, NR_FILE_MAPPED, nr);
 	unlock_page_memcg(page);
 
 	mlock_vma_page(page, vma, compound);
@@ -1274,7 +1275,7 @@  void page_add_file_rmap(struct page *page,
 
 static void page_remove_file_rmap(struct page *page, bool compound)
 {
-	int i, nr = 1;
+	int i, nr = 0;
 
 	VM_BUG_ON_PAGE(compound && !PageHead(page), page);
 
@@ -1289,12 +1290,12 @@  static void page_remove_file_rmap(struct page *page, bool compound)
 	if (compound && PageTransHuge(page)) {
 		int nr_pages = thp_nr_pages(page);
 
-		for (i = 0, nr = 0; i < nr_pages; i++) {
+		for (i = 0; i < nr_pages; i++) {
 			if (atomic_add_negative(-1, &page[i]._mapcount))
 				nr++;
 		}
 		if (!atomic_add_negative(-1, compound_mapcount_ptr(page)))
-			return;
+			goto out;
 		if (PageSwapBacked(page))
 			__mod_lruvec_page_state(page, NR_SHMEM_PMDMAPPED,
 						-nr_pages);
@@ -1302,16 +1303,12 @@  static void page_remove_file_rmap(struct page *page, bool compound)
 			__mod_lruvec_page_state(page, NR_FILE_PMDMAPPED,
 						-nr_pages);
 	} else {
-		if (!atomic_add_negative(-1, &page->_mapcount))
-			return;
+		if (atomic_add_negative(-1, &page->_mapcount))
+			nr++;
 	}
-
-	/*
-	 * We use the irq-unsafe __{inc|mod}_lruvec_page_state because
-	 * these counters are not modified in interrupt context, and
-	 * pte lock(a spinlock) is held, which implies preemption disabled.
-	 */
-	__mod_lruvec_page_state(page, NR_FILE_MAPPED, -nr);
+out:
+	if (nr)
+		__mod_lruvec_page_state(page, NR_FILE_MAPPED, -nr);
 }
 
 static void page_remove_anon_compound_rmap(struct page *page)