diff mbox series

[2/3] mm, memcg: inline mem_cgroup_{charge/uncharge} to improve disabled memcg config

Message ID 20210709000509.2618345-3-surenb@google.com (mailing list archive)
State New
Headers show
Series mm, memcg: Optimizations to minimize overhead when memcgs are disabled | expand

Commit Message

Suren Baghdasaryan July 9, 2021, 12:05 a.m. UTC
Inline mem_cgroup_{charge/uncharge} and mem_cgroup_uncharge_list functions
functions to perform mem_cgroup_disabled static key check inline before
calling the main body of the function. This minimizes the memcg overhead
in the pagefault and exit_mmap paths when memcgs are disabled using
cgroup_disable=memory command-line option.
This change results in ~0.4% overhead reduction when running PFT test
comparing {CONFIG_MEMCG=n} against {CONFIG_MEMCG=y, cgroup_disable=memory}
configurationon on an 8-core ARM64 Android device.

Signed-off-by: Suren Baghdasaryan <surenb@google.com>
---
 include/linux/memcontrol.h | 54 ++++++++++++++++++++++++++++++++++----
 mm/memcontrol.c            | 43 +++---------------------------
 2 files changed, 53 insertions(+), 44 deletions(-)

Comments

Johannes Weiner July 9, 2021, 2:48 p.m. UTC | #1
On Thu, Jul 08, 2021 at 05:05:08PM -0700, Suren Baghdasaryan wrote:
> Inline mem_cgroup_{charge/uncharge} and mem_cgroup_uncharge_list functions
> functions to perform mem_cgroup_disabled static key check inline before
> calling the main body of the function. This minimizes the memcg overhead
> in the pagefault and exit_mmap paths when memcgs are disabled using
> cgroup_disable=memory command-line option.
> This change results in ~0.4% overhead reduction when running PFT test
> comparing {CONFIG_MEMCG=n} against {CONFIG_MEMCG=y, cgroup_disable=memory}
> configurationon on an 8-core ARM64 Android device.
> 
> Signed-off-by: Suren Baghdasaryan <surenb@google.com>

Sounds reasonable to me as well. One comment:

> @@ -693,13 +693,59 @@ static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg)
>  		page_counter_read(&memcg->memory);
>  }
>  
> -int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask);
> +struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm);
> +
> +int __mem_cgroup_charge(struct page *page, struct mem_cgroup *memcg,
> +			gfp_t gfp);
> +/**
> + * mem_cgroup_charge - charge a newly allocated page to a cgroup
> + * @page: page to charge
> + * @mm: mm context of the victim
> + * @gfp_mask: reclaim mode
> + *
> + * Try to charge @page to the memcg that @mm belongs to, reclaiming
> + * pages according to @gfp_mask if necessary. if @mm is NULL, try to
> + * charge to the active memcg.
> + *
> + * Do not use this for pages allocated for swapin.
> + *
> + * Returns 0 on success. Otherwise, an error code is returned.
> + */
> +static inline int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
> +				    gfp_t gfp_mask)
> +{
> +	struct mem_cgroup *memcg;
> +	int ret;
> +
> +	if (mem_cgroup_disabled())
> +		return 0;
> +
> +	memcg = get_mem_cgroup_from_mm(mm);
> +	ret = __mem_cgroup_charge(page, memcg, gfp_mask);
> +	css_put(&memcg->css);
> +
> +	return ret;

Why not do

int __mem_cgroup_charge(struct page *page, struct mm_struct *mm,
			gfp_t gfp_mask);

static inline int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
				    gfp_t gfp_mask)
{
	if (mem_cgroup_disabled())
		return 0;

	return __mem_cgroup_charge(page, memcg, gfp_mask);
}

like in the other cases as well?

That would avoid inlining two separate function calls into all the
callsites...

There is an (internal) __mem_cgroup_charge() already, but you can
rename it charge_memcg().
Suren Baghdasaryan July 9, 2021, 3:18 p.m. UTC | #2
On Fri, Jul 9, 2021 at 7:48 AM Johannes Weiner <hannes@cmpxchg.org> wrote:
>
> On Thu, Jul 08, 2021 at 05:05:08PM -0700, Suren Baghdasaryan wrote:
> > Inline mem_cgroup_{charge/uncharge} and mem_cgroup_uncharge_list functions
> > functions to perform mem_cgroup_disabled static key check inline before
> > calling the main body of the function. This minimizes the memcg overhead
> > in the pagefault and exit_mmap paths when memcgs are disabled using
> > cgroup_disable=memory command-line option.
> > This change results in ~0.4% overhead reduction when running PFT test
> > comparing {CONFIG_MEMCG=n} against {CONFIG_MEMCG=y, cgroup_disable=memory}
> > configurationon on an 8-core ARM64 Android device.
> >
> > Signed-off-by: Suren Baghdasaryan <surenb@google.com>
>
> Sounds reasonable to me as well. One comment:
>
> > @@ -693,13 +693,59 @@ static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg)
> >               page_counter_read(&memcg->memory);
> >  }
> >
> > -int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask);
> > +struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm);
> > +
> > +int __mem_cgroup_charge(struct page *page, struct mem_cgroup *memcg,
> > +                     gfp_t gfp);
> > +/**
> > + * mem_cgroup_charge - charge a newly allocated page to a cgroup
> > + * @page: page to charge
> > + * @mm: mm context of the victim
> > + * @gfp_mask: reclaim mode
> > + *
> > + * Try to charge @page to the memcg that @mm belongs to, reclaiming
> > + * pages according to @gfp_mask if necessary. if @mm is NULL, try to
> > + * charge to the active memcg.
> > + *
> > + * Do not use this for pages allocated for swapin.
> > + *
> > + * Returns 0 on success. Otherwise, an error code is returned.
> > + */
> > +static inline int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
> > +                                 gfp_t gfp_mask)
> > +{
> > +     struct mem_cgroup *memcg;
> > +     int ret;
> > +
> > +     if (mem_cgroup_disabled())
> > +             return 0;
> > +
> > +     memcg = get_mem_cgroup_from_mm(mm);
> > +     ret = __mem_cgroup_charge(page, memcg, gfp_mask);
> > +     css_put(&memcg->css);
> > +
> > +     return ret;
>
> Why not do
>
> int __mem_cgroup_charge(struct page *page, struct mm_struct *mm,
>                         gfp_t gfp_mask);
>
> static inline int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
>                                     gfp_t gfp_mask)
> {
>         if (mem_cgroup_disabled())
>                 return 0;
>
>         return __mem_cgroup_charge(page, memcg, gfp_mask);
> }
>
> like in the other cases as well?
>
> That would avoid inlining two separate function calls into all the
> callsites...
>
> There is an (internal) __mem_cgroup_charge() already, but you can
> rename it charge_memcg().

Sounds good. I'll post an updated version with your suggestion.
Thanks for the review, Johannes!

>
Suren Baghdasaryan July 9, 2021, 5:17 p.m. UTC | #3
On Fri, Jul 9, 2021 at 8:18 AM Suren Baghdasaryan <surenb@google.com> wrote:
>
> On Fri, Jul 9, 2021 at 7:48 AM Johannes Weiner <hannes@cmpxchg.org> wrote:
> >
> > On Thu, Jul 08, 2021 at 05:05:08PM -0700, Suren Baghdasaryan wrote:
> > > Inline mem_cgroup_{charge/uncharge} and mem_cgroup_uncharge_list functions
> > > functions to perform mem_cgroup_disabled static key check inline before
> > > calling the main body of the function. This minimizes the memcg overhead
> > > in the pagefault and exit_mmap paths when memcgs are disabled using
> > > cgroup_disable=memory command-line option.
> > > This change results in ~0.4% overhead reduction when running PFT test
> > > comparing {CONFIG_MEMCG=n} against {CONFIG_MEMCG=y, cgroup_disable=memory}
> > > configurationon on an 8-core ARM64 Android device.
> > >
> > > Signed-off-by: Suren Baghdasaryan <surenb@google.com>
> >
> > Sounds reasonable to me as well. One comment:
> >
> > > @@ -693,13 +693,59 @@ static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg)
> > >               page_counter_read(&memcg->memory);
> > >  }
> > >
> > > -int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask);
> > > +struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm);
> > > +
> > > +int __mem_cgroup_charge(struct page *page, struct mem_cgroup *memcg,
> > > +                     gfp_t gfp);
> > > +/**
> > > + * mem_cgroup_charge - charge a newly allocated page to a cgroup
> > > + * @page: page to charge
> > > + * @mm: mm context of the victim
> > > + * @gfp_mask: reclaim mode
> > > + *
> > > + * Try to charge @page to the memcg that @mm belongs to, reclaiming
> > > + * pages according to @gfp_mask if necessary. if @mm is NULL, try to
> > > + * charge to the active memcg.
> > > + *
> > > + * Do not use this for pages allocated for swapin.
> > > + *
> > > + * Returns 0 on success. Otherwise, an error code is returned.
> > > + */
> > > +static inline int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
> > > +                                 gfp_t gfp_mask)
> > > +{
> > > +     struct mem_cgroup *memcg;
> > > +     int ret;
> > > +
> > > +     if (mem_cgroup_disabled())
> > > +             return 0;
> > > +
> > > +     memcg = get_mem_cgroup_from_mm(mm);
> > > +     ret = __mem_cgroup_charge(page, memcg, gfp_mask);
> > > +     css_put(&memcg->css);
> > > +
> > > +     return ret;
> >
> > Why not do
> >
> > int __mem_cgroup_charge(struct page *page, struct mm_struct *mm,
> >                         gfp_t gfp_mask);
> >
> > static inline int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
> >                                     gfp_t gfp_mask)
> > {
> >         if (mem_cgroup_disabled())
> >                 return 0;
> >
> >         return __mem_cgroup_charge(page, memcg, gfp_mask);
> > }
> >
> > like in the other cases as well?
> >
> > That would avoid inlining two separate function calls into all the
> > callsites...
> >
> > There is an (internal) __mem_cgroup_charge() already, but you can
> > rename it charge_memcg().
>
> Sounds good. I'll post an updated version with your suggestion.
> Thanks for the review, Johannes!

Posted v2 just for this patch at
https://lore.kernel.org/patchwork/patch/1455550 . Please let me know
if you want me to resent the whole patchset instead of just this
patch.
>
> >
diff mbox series

Patch

diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index bfe5c486f4ad..480815feb116 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -693,13 +693,59 @@  static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg)
 		page_counter_read(&memcg->memory);
 }
 
-int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask);
+struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm);
+
+int __mem_cgroup_charge(struct page *page, struct mem_cgroup *memcg,
+			gfp_t gfp);
+/**
+ * mem_cgroup_charge - charge a newly allocated page to a cgroup
+ * @page: page to charge
+ * @mm: mm context of the victim
+ * @gfp_mask: reclaim mode
+ *
+ * Try to charge @page to the memcg that @mm belongs to, reclaiming
+ * pages according to @gfp_mask if necessary. if @mm is NULL, try to
+ * charge to the active memcg.
+ *
+ * Do not use this for pages allocated for swapin.
+ *
+ * Returns 0 on success. Otherwise, an error code is returned.
+ */
+static inline int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
+				    gfp_t gfp_mask)
+{
+	struct mem_cgroup *memcg;
+	int ret;
+
+	if (mem_cgroup_disabled())
+		return 0;
+
+	memcg = get_mem_cgroup_from_mm(mm);
+	ret = __mem_cgroup_charge(page, memcg, gfp_mask);
+	css_put(&memcg->css);
+
+	return ret;
+}
+
 int mem_cgroup_swapin_charge_page(struct page *page, struct mm_struct *mm,
 				  gfp_t gfp, swp_entry_t entry);
 void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry);
 
-void mem_cgroup_uncharge(struct page *page);
-void mem_cgroup_uncharge_list(struct list_head *page_list);
+void __mem_cgroup_uncharge(struct page *page);
+static inline void mem_cgroup_uncharge(struct page *page)
+{
+	if (mem_cgroup_disabled())
+		return;
+	__mem_cgroup_uncharge(page);
+}
+
+void __mem_cgroup_uncharge_list(struct list_head *page_list);
+static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
+{
+	if (mem_cgroup_disabled())
+		return;
+	__mem_cgroup_uncharge_list(page_list);
+}
 
 void mem_cgroup_migrate(struct page *oldpage, struct page *newpage);
 
@@ -756,8 +802,6 @@  static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page)
 
 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
 
-struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm);
-
 struct lruvec *lock_page_lruvec(struct page *page);
 struct lruvec *lock_page_lruvec_irq(struct page *page);
 struct lruvec *lock_page_lruvec_irqsave(struct page *page,
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index a228cd51c4bd..da677b55b2fe 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -6701,8 +6701,8 @@  void mem_cgroup_calculate_protection(struct mem_cgroup *root,
 			atomic_long_read(&parent->memory.children_low_usage)));
 }
 
-static int __mem_cgroup_charge(struct page *page, struct mem_cgroup *memcg,
-			       gfp_t gfp)
+int __mem_cgroup_charge(struct page *page, struct mem_cgroup *memcg,
+			gfp_t gfp)
 {
 	unsigned int nr_pages = thp_nr_pages(page);
 	int ret;
@@ -6722,35 +6722,6 @@  static int __mem_cgroup_charge(struct page *page, struct mem_cgroup *memcg,
 	return ret;
 }
 
-/**
- * mem_cgroup_charge - charge a newly allocated page to a cgroup
- * @page: page to charge
- * @mm: mm context of the victim
- * @gfp_mask: reclaim mode
- *
- * Try to charge @page to the memcg that @mm belongs to, reclaiming
- * pages according to @gfp_mask if necessary. if @mm is NULL, try to
- * charge to the active memcg.
- *
- * Do not use this for pages allocated for swapin.
- *
- * Returns 0 on success. Otherwise, an error code is returned.
- */
-int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask)
-{
-	struct mem_cgroup *memcg;
-	int ret;
-
-	if (mem_cgroup_disabled())
-		return 0;
-
-	memcg = get_mem_cgroup_from_mm(mm);
-	ret = __mem_cgroup_charge(page, memcg, gfp_mask);
-	css_put(&memcg->css);
-
-	return ret;
-}
-
 /**
  * mem_cgroup_swapin_charge_page - charge a newly allocated page for swapin
  * @page: page to charge
@@ -6921,13 +6892,10 @@  static void uncharge_page(struct page *page, struct uncharge_gather *ug)
  *
  * Uncharge a page previously charged with mem_cgroup_charge().
  */
-void mem_cgroup_uncharge(struct page *page)
+void __mem_cgroup_uncharge(struct page *page)
 {
 	struct uncharge_gather ug;
 
-	if (mem_cgroup_disabled())
-		return;
-
 	/* Don't touch page->lru of any random page, pre-check: */
 	if (!page_memcg(page))
 		return;
@@ -6944,14 +6912,11 @@  void mem_cgroup_uncharge(struct page *page)
  * Uncharge a list of pages previously charged with
  * mem_cgroup_charge().
  */
-void mem_cgroup_uncharge_list(struct list_head *page_list)
+void __mem_cgroup_uncharge_list(struct list_head *page_list)
 {
 	struct uncharge_gather ug;
 	struct page *page;
 
-	if (mem_cgroup_disabled())
-		return;
-
 	uncharge_gather_clear(&ug);
 	list_for_each_entry(page, page_list, lru)
 		uncharge_page(page, &ug);