diff mbox series

[v8,4/8] mm: Provide a new count_objcg_events() API for batch event updates.

Message ID 20240928021620.8369-5-kanchana.p.sridhar@intel.com (mailing list archive)
State New
Headers show
Series mm: zswap swap-out of large folios | expand

Commit Message

Sridhar, Kanchana P Sept. 28, 2024, 2:16 a.m. UTC
With the introduction of zswap_store() swapping out large folios,
we need to efficiently update the objcg's memcg events once per
successfully stored folio. For instance, the 'ZSWPOUT' event needs
to be incremented by folio_nr_pages().

Signed-off-by: Kanchana P Sridhar <kanchana.p.sridhar@intel.com>
---
 include/linux/memcontrol.h | 20 ++++++++++++++++++++
 1 file changed, 20 insertions(+)

Comments

Yosry Ahmed Sept. 28, 2024, 3:02 a.m. UTC | #1
On Fri, Sep 27, 2024 at 7:16 PM Kanchana P Sridhar
<kanchana.p.sridhar@intel.com> wrote:
>
> With the introduction of zswap_store() swapping out large folios,
> we need to efficiently update the objcg's memcg events once per
> successfully stored folio. For instance, the 'ZSWPOUT' event needs
> to be incremented by folio_nr_pages().
>
> Signed-off-by: Kanchana P Sridhar <kanchana.p.sridhar@intel.com>
> ---
>  include/linux/memcontrol.h | 20 ++++++++++++++++++++
>  1 file changed, 20 insertions(+)
>
> diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
> index 15c2716f9aa3..f47fd00c5eea 100644
> --- a/include/linux/memcontrol.h
> +++ b/include/linux/memcontrol.h
> @@ -1778,6 +1778,21 @@ static inline void count_objcg_event(struct obj_cgroup *objcg,
>         rcu_read_unlock();
>  }
>
> +static inline void count_objcg_events(struct obj_cgroup *objcg,
> +                                     enum vm_event_item idx,
> +                                     unsigned long count)
> +{
> +       struct mem_cgroup *memcg;
> +
> +       if (!memcg_kmem_online())
> +               return;
> +
> +       rcu_read_lock();
> +       memcg = obj_cgroup_memcg(objcg);
> +       count_memcg_events(memcg, idx, count);
> +       rcu_read_unlock();
> +}

Instead of replicating the code in count_objcg_event(), we should
change count_objcg_event() to become count_objcg_events() (i.e. add a
count parameter). The existing callers can pass in 1, there's only 3
of them anyway (2 after patch 6), and they are all in zswap.

> +
>  #else
>  static inline bool mem_cgroup_kmem_disabled(void)
>  {
> @@ -1834,6 +1849,11 @@ static inline void count_objcg_event(struct obj_cgroup *objcg,
>  {
>  }
>
> +static inline void count_objcg_events(struct obj_cgroup *objcg,
> +                                     enum vm_event_item idx,
> +                                     unsigned long count)
> +{
> +}
>  #endif /* CONFIG_MEMCG */
>
>  #if defined(CONFIG_MEMCG) && defined(CONFIG_ZSWAP)
> --
> 2.27.0
>
Chengming Zhou Sept. 28, 2024, 5:46 a.m. UTC | #2
On 2024/9/28 11:02, Yosry Ahmed wrote:
> On Fri, Sep 27, 2024 at 7:16 PM Kanchana P Sridhar
> <kanchana.p.sridhar@intel.com> wrote:
>>
>> With the introduction of zswap_store() swapping out large folios,
>> we need to efficiently update the objcg's memcg events once per
>> successfully stored folio. For instance, the 'ZSWPOUT' event needs
>> to be incremented by folio_nr_pages().
>>
>> Signed-off-by: Kanchana P Sridhar <kanchana.p.sridhar@intel.com>
>> ---
>>   include/linux/memcontrol.h | 20 ++++++++++++++++++++
>>   1 file changed, 20 insertions(+)
>>
>> diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
>> index 15c2716f9aa3..f47fd00c5eea 100644
>> --- a/include/linux/memcontrol.h
>> +++ b/include/linux/memcontrol.h
>> @@ -1778,6 +1778,21 @@ static inline void count_objcg_event(struct obj_cgroup *objcg,
>>          rcu_read_unlock();
>>   }
>>
>> +static inline void count_objcg_events(struct obj_cgroup *objcg,
>> +                                     enum vm_event_item idx,
>> +                                     unsigned long count)
>> +{
>> +       struct mem_cgroup *memcg;
>> +
>> +       if (!memcg_kmem_online())
>> +               return;
>> +
>> +       rcu_read_lock();
>> +       memcg = obj_cgroup_memcg(objcg);
>> +       count_memcg_events(memcg, idx, count);
>> +       rcu_read_unlock();
>> +}
> 
> Instead of replicating the code in count_objcg_event(), we should
> change count_objcg_event() to become count_objcg_events() (i.e. add a
> count parameter). The existing callers can pass in 1, there's only 3
> of them anyway (2 after patch 6), and they are all in zswap.

Right, agree.

> 
>> +
>>   #else
>>   static inline bool mem_cgroup_kmem_disabled(void)
>>   {
>> @@ -1834,6 +1849,11 @@ static inline void count_objcg_event(struct obj_cgroup *objcg,
>>   {
>>   }
>>
>> +static inline void count_objcg_events(struct obj_cgroup *objcg,
>> +                                     enum vm_event_item idx,
>> +                                     unsigned long count)
>> +{
>> +}
>>   #endif /* CONFIG_MEMCG */
>>
>>   #if defined(CONFIG_MEMCG) && defined(CONFIG_ZSWAP)
>> --
>> 2.27.0
>>
Sridhar, Kanchana P Sept. 29, 2024, 9 p.m. UTC | #3
> -----Original Message-----
> From: Yosry Ahmed <yosryahmed@google.com>
> Sent: Friday, September 27, 2024 8:02 PM
> To: Sridhar, Kanchana P <kanchana.p.sridhar@intel.com>
> Cc: linux-kernel@vger.kernel.org; linux-mm@kvack.org;
> hannes@cmpxchg.org; nphamcs@gmail.com; chengming.zhou@linux.dev;
> usamaarif642@gmail.com; shakeel.butt@linux.dev; ryan.roberts@arm.com;
> Huang, Ying <ying.huang@intel.com>; 21cnbao@gmail.com; akpm@linux-
> foundation.org; Zou, Nanhai <nanhai.zou@intel.com>; Feghali, Wajdi K
> <wajdi.k.feghali@intel.com>; Gopal, Vinodh <vinodh.gopal@intel.com>
> Subject: Re: [PATCH v8 4/8] mm: Provide a new count_objcg_events() API for
> batch event updates.
> 
> On Fri, Sep 27, 2024 at 7:16 PM Kanchana P Sridhar
> <kanchana.p.sridhar@intel.com> wrote:
> >
> > With the introduction of zswap_store() swapping out large folios,
> > we need to efficiently update the objcg's memcg events once per
> > successfully stored folio. For instance, the 'ZSWPOUT' event needs
> > to be incremented by folio_nr_pages().
> >
> > Signed-off-by: Kanchana P Sridhar <kanchana.p.sridhar@intel.com>
> > ---
> >  include/linux/memcontrol.h | 20 ++++++++++++++++++++
> >  1 file changed, 20 insertions(+)
> >
> > diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
> > index 15c2716f9aa3..f47fd00c5eea 100644
> > --- a/include/linux/memcontrol.h
> > +++ b/include/linux/memcontrol.h
> > @@ -1778,6 +1778,21 @@ static inline void count_objcg_event(struct
> obj_cgroup *objcg,
> >         rcu_read_unlock();
> >  }
> >
> > +static inline void count_objcg_events(struct obj_cgroup *objcg,
> > +                                     enum vm_event_item idx,
> > +                                     unsigned long count)
> > +{
> > +       struct mem_cgroup *memcg;
> > +
> > +       if (!memcg_kmem_online())
> > +               return;
> > +
> > +       rcu_read_lock();
> > +       memcg = obj_cgroup_memcg(objcg);
> > +       count_memcg_events(memcg, idx, count);
> > +       rcu_read_unlock();
> > +}
> 
> Instead of replicating the code in count_objcg_event(), we should
> change count_objcg_event() to become count_objcg_events() (i.e. add a
> count parameter). The existing callers can pass in 1, there's only 3
> of them anyway (2 after patch 6), and they are all in zswap.

Thanks Yosry. This makes sense. I will incorporate this in v9.

Thanks,
Kanchana

> 
> > +
> >  #else
> >  static inline bool mem_cgroup_kmem_disabled(void)
> >  {
> > @@ -1834,6 +1849,11 @@ static inline void count_objcg_event(struct
> obj_cgroup *objcg,
> >  {
> >  }
> >
> > +static inline void count_objcg_events(struct obj_cgroup *objcg,
> > +                                     enum vm_event_item idx,
> > +                                     unsigned long count)
> > +{
> > +}
> >  #endif /* CONFIG_MEMCG */
> >
> >  #if defined(CONFIG_MEMCG) && defined(CONFIG_ZSWAP)
> > --
> > 2.27.0
> >
diff mbox series

Patch

diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 15c2716f9aa3..f47fd00c5eea 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -1778,6 +1778,21 @@  static inline void count_objcg_event(struct obj_cgroup *objcg,
 	rcu_read_unlock();
 }
 
+static inline void count_objcg_events(struct obj_cgroup *objcg,
+				      enum vm_event_item idx,
+				      unsigned long count)
+{
+	struct mem_cgroup *memcg;
+
+	if (!memcg_kmem_online())
+		return;
+
+	rcu_read_lock();
+	memcg = obj_cgroup_memcg(objcg);
+	count_memcg_events(memcg, idx, count);
+	rcu_read_unlock();
+}
+
 #else
 static inline bool mem_cgroup_kmem_disabled(void)
 {
@@ -1834,6 +1849,11 @@  static inline void count_objcg_event(struct obj_cgroup *objcg,
 {
 }
 
+static inline void count_objcg_events(struct obj_cgroup *objcg,
+				      enum vm_event_item idx,
+				      unsigned long count)
+{
+}
 #endif /* CONFIG_MEMCG */
 
 #if defined(CONFIG_MEMCG) && defined(CONFIG_ZSWAP)