diff mbox series

[2/2] mm: fix numa stats for thp migration

Message ID 20201227181310.3235210-2-shakeelb@google.com (mailing list archive)
State New, archived
Headers show
Series [1/2] mm: memcg: fix memcg file_dirty numa stat | expand

Commit Message

Shakeel Butt Dec. 27, 2020, 6:13 p.m. UTC
Currently the kernel is not correctly updating the numa stats for
NR_FILE_PAGES and NR_SHMEM on THP migration. Fix that. For NR_FILE_DIRTY
and NR_ZONE_WRITE_PENDING, although at the moment there is no need to
handle THP migration as kernel still does not have write support for
file THP but to be more future proof, this patch adds the THP support
for those stats as well.

Fixes: e71769ae52609 ("mm: enable thp migration for shmem thp")
Signed-off-by: Shakeel Butt <shakeelb@google.com>
Cc: <stable@vger.kernel.org>
---
 mm/migrate.c | 23 ++++++++++++-----------
 1 file changed, 12 insertions(+), 11 deletions(-)

Comments

Shakeel Butt Dec. 27, 2020, 6:16 p.m. UTC | #1
On Sun, Dec 27, 2020 at 10:14 AM Shakeel Butt <shakeelb@google.com> wrote:
>
> Currently the kernel is not correctly updating the numa stats for
> NR_FILE_PAGES and NR_SHMEM on THP migration. Fix that. For NR_FILE_DIRTY
> and NR_ZONE_WRITE_PENDING, although at the moment there is no need to
> handle THP migration as kernel still does not have write support for
> file THP but to be more future proof, this patch adds the THP support
> for those stats as well.
>
> Fixes: e71769ae52609 ("mm: enable thp migration for shmem thp")
> Signed-off-by: Shakeel Butt <shakeelb@google.com>
> Cc: <stable@vger.kernel.org>
> ---
>  mm/migrate.c | 23 ++++++++++++-----------
>  1 file changed, 12 insertions(+), 11 deletions(-)
>
> diff --git a/mm/migrate.c b/mm/migrate.c
> index 613794f6a433..ade163c6ecdf 100644
> --- a/mm/migrate.c
> +++ b/mm/migrate.c
> @@ -402,6 +402,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
>         struct zone *oldzone, *newzone;
>         int dirty;
>         int expected_count = expected_page_refs(mapping, page) + extra_count;
> +       int nr = thp_nr_pages(page);
>
>         if (!mapping) {
>                 /* Anonymous page without mapping */
> @@ -437,7 +438,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
>          */
>         newpage->index = page->index;
>         newpage->mapping = page->mapping;
> -       page_ref_add(newpage, thp_nr_pages(page)); /* add cache reference */
> +       page_ref_add(newpage, nr); /* add cache reference */
>         if (PageSwapBacked(page)) {
>                 __SetPageSwapBacked(newpage);
>                 if (PageSwapCache(page)) {
> @@ -459,7 +460,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
>         if (PageTransHuge(page)) {
>                 int i;
>
> -               for (i = 1; i < HPAGE_PMD_NR; i++) {
> +               for (i = 1; i < nr; i++) {
>                         xas_next(&xas);
>                         xas_store(&xas, newpage);
>                 }
> @@ -470,7 +471,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
>          * to one less reference.
>          * We know this isn't the last reference.
>          */
> -       page_ref_unfreeze(page, expected_count - thp_nr_pages(page));
> +       page_ref_unfreeze(page, expected_count - nr);
>
>         xas_unlock(&xas);
>         /* Leave irq disabled to prevent preemption while updating stats */
> @@ -493,17 +494,17 @@ int migrate_page_move_mapping(struct address_space *mapping,
>                 old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat);
>                 new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat);
>
> -               __dec_lruvec_state(old_lruvec, NR_FILE_PAGES);
> -               __inc_lruvec_state(new_lruvec, NR_FILE_PAGES);
> +               __mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr);
> +               __mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr);
>                 if (PageSwapBacked(page) && !PageSwapCache(page)) {
> -                       __dec_lruvec_state(old_lruvec, NR_SHMEM);
> -                       __inc_lruvec_state(new_lruvec, NR_SHMEM);
> +                       __mod_lruvec_state(old_lruvec, NR_SHMEM, -nr);
> +                       __mod_lruvec_state(new_lruvec, NR_SHMEM, nr);
>                 }
>                 if (dirty && mapping_can_writeback(mapping)) {
> -                       __dec_lruvec_state(old_lruvec, NR_FILE_DIRTY);
> -                       __dec_zone_state(oldzone, NR_ZONE_WRITE_PENDING);
> -                       __inc_lruvec_state(new_lruvec, NR_FILE_DIRTY);
> -                       __inc_zone_state(newzone, NR_ZONE_WRITE_PENDING);
> +                       __mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr);
> +                       __mod_zone_page_tate(oldzone, NR_ZONE_WRITE_PENDING, -nr);

This should be __mod_zone_page_state(). I fixed locally but sent the
older patch by mistake.

> +                       __mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr);
> +                       __mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr);
>                 }
>         }
>         local_irq_enable();
> --
> 2.29.2.729.g45daf8777d-goog
>
Yang Shi Dec. 28, 2020, 5:31 p.m. UTC | #2
On Sun, Dec 27, 2020 at 10:16 AM Shakeel Butt <shakeelb@google.com> wrote:
>
> On Sun, Dec 27, 2020 at 10:14 AM Shakeel Butt <shakeelb@google.com> wrote:
> >
> > Currently the kernel is not correctly updating the numa stats for
> > NR_FILE_PAGES and NR_SHMEM on THP migration. Fix that. For NR_FILE_DIRTY
> > and NR_ZONE_WRITE_PENDING, although at the moment there is no need to
> > handle THP migration as kernel still does not have write support for
> > file THP but to be more future proof, this patch adds the THP support
> > for those stats as well.
> >
> > Fixes: e71769ae52609 ("mm: enable thp migration for shmem thp")
> > Signed-off-by: Shakeel Butt <shakeelb@google.com>
> > Cc: <stable@vger.kernel.org>
> > ---
> >  mm/migrate.c | 23 ++++++++++++-----------
> >  1 file changed, 12 insertions(+), 11 deletions(-)
> >
> > diff --git a/mm/migrate.c b/mm/migrate.c
> > index 613794f6a433..ade163c6ecdf 100644
> > --- a/mm/migrate.c
> > +++ b/mm/migrate.c
> > @@ -402,6 +402,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
> >         struct zone *oldzone, *newzone;
> >         int dirty;
> >         int expected_count = expected_page_refs(mapping, page) + extra_count;
> > +       int nr = thp_nr_pages(page);
> >
> >         if (!mapping) {
> >                 /* Anonymous page without mapping */
> > @@ -437,7 +438,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
> >          */
> >         newpage->index = page->index;
> >         newpage->mapping = page->mapping;
> > -       page_ref_add(newpage, thp_nr_pages(page)); /* add cache reference */
> > +       page_ref_add(newpage, nr); /* add cache reference */
> >         if (PageSwapBacked(page)) {
> >                 __SetPageSwapBacked(newpage);
> >                 if (PageSwapCache(page)) {
> > @@ -459,7 +460,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
> >         if (PageTransHuge(page)) {
> >                 int i;
> >
> > -               for (i = 1; i < HPAGE_PMD_NR; i++) {
> > +               for (i = 1; i < nr; i++) {
> >                         xas_next(&xas);
> >                         xas_store(&xas, newpage);
> >                 }
> > @@ -470,7 +471,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
> >          * to one less reference.
> >          * We know this isn't the last reference.
> >          */
> > -       page_ref_unfreeze(page, expected_count - thp_nr_pages(page));
> > +       page_ref_unfreeze(page, expected_count - nr);
> >
> >         xas_unlock(&xas);
> >         /* Leave irq disabled to prevent preemption while updating stats */
> > @@ -493,17 +494,17 @@ int migrate_page_move_mapping(struct address_space *mapping,
> >                 old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat);
> >                 new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat);
> >
> > -               __dec_lruvec_state(old_lruvec, NR_FILE_PAGES);
> > -               __inc_lruvec_state(new_lruvec, NR_FILE_PAGES);
> > +               __mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr);
> > +               __mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr);
> >                 if (PageSwapBacked(page) && !PageSwapCache(page)) {
> > -                       __dec_lruvec_state(old_lruvec, NR_SHMEM);
> > -                       __inc_lruvec_state(new_lruvec, NR_SHMEM);
> > +                       __mod_lruvec_state(old_lruvec, NR_SHMEM, -nr);
> > +                       __mod_lruvec_state(new_lruvec, NR_SHMEM, nr);
> >                 }
> >                 if (dirty && mapping_can_writeback(mapping)) {
> > -                       __dec_lruvec_state(old_lruvec, NR_FILE_DIRTY);
> > -                       __dec_zone_state(oldzone, NR_ZONE_WRITE_PENDING);
> > -                       __inc_lruvec_state(new_lruvec, NR_FILE_DIRTY);
> > -                       __inc_zone_state(newzone, NR_ZONE_WRITE_PENDING);
> > +                       __mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr);
> > +                       __mod_zone_page_tate(oldzone, NR_ZONE_WRITE_PENDING, -nr);
>
> This should be __mod_zone_page_state(). I fixed locally but sent the
> older patch by mistake.

Acked-by: Yang Shi <shy828301@gmail.com>

>
> > +                       __mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr);
> > +                       __mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr);
> >                 }
> >         }
> >         local_irq_enable();
> > --
> > 2.29.2.729.g45daf8777d-goog
> >
>
Roman Gushchin Dec. 28, 2020, 7:44 p.m. UTC | #3
On Sun, Dec 27, 2020 at 10:13:10AM -0800, Shakeel Butt wrote:
> Currently the kernel is not correctly updating the numa stats for
> NR_FILE_PAGES and NR_SHMEM on THP migration. Fix that. For NR_FILE_DIRTY
> and NR_ZONE_WRITE_PENDING, although at the moment there is no need to
> handle THP migration as kernel still does not have write support for
> file THP but to be more future proof, this patch adds the THP support
> for those stats as well.
> 
> Fixes: e71769ae52609 ("mm: enable thp migration for shmem thp")
> Signed-off-by: Shakeel Butt <shakeelb@google.com>
> Cc: <stable@vger.kernel.org>

With the typo fix ("__mod_zone_page_tate")

Reviewed-by: Roman Gushchin <guro@fb.com>

Thanks!
diff mbox series

Patch

diff --git a/mm/migrate.c b/mm/migrate.c
index 613794f6a433..ade163c6ecdf 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -402,6 +402,7 @@  int migrate_page_move_mapping(struct address_space *mapping,
 	struct zone *oldzone, *newzone;
 	int dirty;
 	int expected_count = expected_page_refs(mapping, page) + extra_count;
+	int nr = thp_nr_pages(page);
 
 	if (!mapping) {
 		/* Anonymous page without mapping */
@@ -437,7 +438,7 @@  int migrate_page_move_mapping(struct address_space *mapping,
 	 */
 	newpage->index = page->index;
 	newpage->mapping = page->mapping;
-	page_ref_add(newpage, thp_nr_pages(page)); /* add cache reference */
+	page_ref_add(newpage, nr); /* add cache reference */
 	if (PageSwapBacked(page)) {
 		__SetPageSwapBacked(newpage);
 		if (PageSwapCache(page)) {
@@ -459,7 +460,7 @@  int migrate_page_move_mapping(struct address_space *mapping,
 	if (PageTransHuge(page)) {
 		int i;
 
-		for (i = 1; i < HPAGE_PMD_NR; i++) {
+		for (i = 1; i < nr; i++) {
 			xas_next(&xas);
 			xas_store(&xas, newpage);
 		}
@@ -470,7 +471,7 @@  int migrate_page_move_mapping(struct address_space *mapping,
 	 * to one less reference.
 	 * We know this isn't the last reference.
 	 */
-	page_ref_unfreeze(page, expected_count - thp_nr_pages(page));
+	page_ref_unfreeze(page, expected_count - nr);
 
 	xas_unlock(&xas);
 	/* Leave irq disabled to prevent preemption while updating stats */
@@ -493,17 +494,17 @@  int migrate_page_move_mapping(struct address_space *mapping,
 		old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat);
 		new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat);
 
-		__dec_lruvec_state(old_lruvec, NR_FILE_PAGES);
-		__inc_lruvec_state(new_lruvec, NR_FILE_PAGES);
+		__mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr);
+		__mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr);
 		if (PageSwapBacked(page) && !PageSwapCache(page)) {
-			__dec_lruvec_state(old_lruvec, NR_SHMEM);
-			__inc_lruvec_state(new_lruvec, NR_SHMEM);
+			__mod_lruvec_state(old_lruvec, NR_SHMEM, -nr);
+			__mod_lruvec_state(new_lruvec, NR_SHMEM, nr);
 		}
 		if (dirty && mapping_can_writeback(mapping)) {
-			__dec_lruvec_state(old_lruvec, NR_FILE_DIRTY);
-			__dec_zone_state(oldzone, NR_ZONE_WRITE_PENDING);
-			__inc_lruvec_state(new_lruvec, NR_FILE_DIRTY);
-			__inc_zone_state(newzone, NR_ZONE_WRITE_PENDING);
+			__mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr);
+			__mod_zone_page_tate(oldzone, NR_ZONE_WRITE_PENDING, -nr);
+			__mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr);
+			__mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr);
 		}
 	}
 	local_irq_enable();