diff mbox series

[v1,6/9] mm: memcg: put memcg1-specific struct mem_cgroup's members under CONFIG_MEMCG_V1

Message ID 20240628210317.272856-7-roman.gushchin@linux.dev (mailing list archive)
State New
Headers show
Series mm: memcg: put cgroup v1-specific memcg data under CONFIG_MEMCG_V1 | expand

Commit Message

Roman Gushchin June 28, 2024, 9:03 p.m. UTC
Put memcg1-specific members of struct mem_cgroup under the
CONFIG_MEMCG_V1 config option. Also group them close to the end
of struct mem_cgroup just before the dynamic per-node part.

Signed-off-by: Roman Gushchin <roman.gushchin@linux.dev>
---
 include/linux/memcontrol.h | 103 +++++++++++++++++++------------------
 1 file changed, 53 insertions(+), 50 deletions(-)

Comments

Shakeel Butt June 29, 2024, 12:48 a.m. UTC | #1
On Fri, Jun 28, 2024 at 09:03:14PM GMT, Roman Gushchin wrote:
> Put memcg1-specific members of struct mem_cgroup under the
> CONFIG_MEMCG_V1 config option. Also group them close to the end
> of struct mem_cgroup just before the dynamic per-node part.
> 
> Signed-off-by: Roman Gushchin <roman.gushchin@linux.dev>
> ---
>  include/linux/memcontrol.h | 103 +++++++++++++++++++------------------
>  1 file changed, 53 insertions(+), 50 deletions(-)
> 
> diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
> index 44ab6394c9ed..107b0c5d6eab 100644
> --- a/include/linux/memcontrol.h
> +++ b/include/linux/memcontrol.h
> @@ -188,10 +188,6 @@ struct mem_cgroup {
>  		struct page_counter memsw;	/* v1 only */
>  	};
>  
> -	/* Legacy consumer-oriented counters */
> -	struct page_counter kmem;		/* v1 only */
> -	struct page_counter tcpmem;		/* v1 only */
> -
>  	/* Range enforcement for interrupt charges */
>  	struct work_struct high_work;
>  
> @@ -205,8 +201,6 @@ struct mem_cgroup {
>  	bool zswap_writeback;
>  #endif
>  
> -	unsigned long soft_limit;
> -
>  	/* vmpressure notifications */
>  	struct vmpressure vmpressure;
>  
> @@ -215,13 +209,7 @@ struct mem_cgroup {
>  	 */
>  	bool oom_group;
>  
> -	/* protected by memcg_oom_lock */
> -	bool		oom_lock;
> -	int		under_oom;
> -
> -	int	swappiness;
> -	/* OOM-Killer disable */
> -	int		oom_kill_disable;
> +	int swappiness;
>  
>  	/* memory.events and memory.events.local */
>  	struct cgroup_file events_file;
> @@ -230,27 +218,6 @@ struct mem_cgroup {
>  	/* handle for "memory.swap.events" */
>  	struct cgroup_file swap_events_file;
>  
> -	/* protect arrays of thresholds */
> -	struct mutex thresholds_lock;
> -
> -	/* thresholds for memory usage. RCU-protected */
> -	struct mem_cgroup_thresholds thresholds;
> -
> -	/* thresholds for mem+swap usage. RCU-protected */
> -	struct mem_cgroup_thresholds memsw_thresholds;
> -
> -	/* For oom notifier event fd */
> -	struct list_head oom_notify;
> -
> -	/*
> -	 * Should we move charges of a task when a task is moved into this
> -	 * mem_cgroup ? And what type of charges should we move ?
> -	 */
> -	unsigned long move_charge_at_immigrate;
> -	/* taken only while moving_account > 0 */
> -	spinlock_t		move_lock;
> -	unsigned long		move_lock_flags;
> -
>  	CACHELINE_PADDING(_pad1_);

Let's also remove these _pad1_ and also _pad2_ as well as this
rearrangement nullifies the reasons behind these paddings. We need to
run some perf benchmarks to identify the newer false cache sharing
ields.
Shakeel Butt July 3, 2024, 3:12 p.m. UTC | #2
On Fri, Jun 28, 2024 at 09:03:14PM GMT, Roman Gushchin wrote:
> Put memcg1-specific members of struct mem_cgroup under the
> CONFIG_MEMCG_V1 config option. Also group them close to the end
> of struct mem_cgroup just before the dynamic per-node part.
> 
> Signed-off-by: Roman Gushchin <roman.gushchin@linux.dev>

Acked-by: Shakeel Butt <shakeel.butt@linux.dev>
Andrew Morton July 4, 2024, 11:35 p.m. UTC | #3
On Fri, 28 Jun 2024 17:48:54 -0700 Shakeel Butt <shakeel.butt@linux.dev> wrote:

> > -	/* For oom notifier event fd */
> > -	struct list_head oom_notify;
> > -
> > -	/*
> > -	 * Should we move charges of a task when a task is moved into this
> > -	 * mem_cgroup ? And what type of charges should we move ?
> > -	 */
> > -	unsigned long move_charge_at_immigrate;
> > -	/* taken only while moving_account > 0 */
> > -	spinlock_t		move_lock;
> > -	unsigned long		move_lock_flags;
> > -
> >  	CACHELINE_PADDING(_pad1_);
> 
> Let's also remove these _pad1_ and also _pad2_ as well as this
> rearrangement nullifies the reasons behind these paddings. We need to
> run some perf benchmarks to identify the newer false cache sharing
> ields.

I guess this is going to be a followup patch (please).
Shakeel Butt July 5, 2024, 3:43 a.m. UTC | #4
On Thu, Jul 4, 2024 at 4:35 PM Andrew Morton <akpm@linux-foundation.org> wrote:
>
> On Fri, 28 Jun 2024 17:48:54 -0700 Shakeel Butt <shakeel.butt@linux.dev> wrote:
>
> > > -   /* For oom notifier event fd */
> > > -   struct list_head oom_notify;
> > > -
> > > -   /*
> > > -    * Should we move charges of a task when a task is moved into this
> > > -    * mem_cgroup ? And what type of charges should we move ?
> > > -    */
> > > -   unsigned long move_charge_at_immigrate;
> > > -   /* taken only while moving_account > 0 */
> > > -   spinlock_t              move_lock;
> > > -   unsigned long           move_lock_flags;
> > > -
> > >     CACHELINE_PADDING(_pad1_);
> >
> > Let's also remove these _pad1_ and also _pad2_ as well as this
> > rearrangement nullifies the reasons behind these paddings. We need to
> > run some perf benchmarks to identify the newer false cache sharing
> > ields.
>
> I guess this is going to be a followup patch (please).

Already posted [1] and has been picked.

[1] https://lore.kernel.org/linux-mm/20240701185932.704807-1-roman.gushchin@linux.dev/
diff mbox series

Patch

diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 44ab6394c9ed..107b0c5d6eab 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -188,10 +188,6 @@  struct mem_cgroup {
 		struct page_counter memsw;	/* v1 only */
 	};
 
-	/* Legacy consumer-oriented counters */
-	struct page_counter kmem;		/* v1 only */
-	struct page_counter tcpmem;		/* v1 only */
-
 	/* Range enforcement for interrupt charges */
 	struct work_struct high_work;
 
@@ -205,8 +201,6 @@  struct mem_cgroup {
 	bool zswap_writeback;
 #endif
 
-	unsigned long soft_limit;
-
 	/* vmpressure notifications */
 	struct vmpressure vmpressure;
 
@@ -215,13 +209,7 @@  struct mem_cgroup {
 	 */
 	bool oom_group;
 
-	/* protected by memcg_oom_lock */
-	bool		oom_lock;
-	int		under_oom;
-
-	int	swappiness;
-	/* OOM-Killer disable */
-	int		oom_kill_disable;
+	int swappiness;
 
 	/* memory.events and memory.events.local */
 	struct cgroup_file events_file;
@@ -230,27 +218,6 @@  struct mem_cgroup {
 	/* handle for "memory.swap.events" */
 	struct cgroup_file swap_events_file;
 
-	/* protect arrays of thresholds */
-	struct mutex thresholds_lock;
-
-	/* thresholds for memory usage. RCU-protected */
-	struct mem_cgroup_thresholds thresholds;
-
-	/* thresholds for mem+swap usage. RCU-protected */
-	struct mem_cgroup_thresholds memsw_thresholds;
-
-	/* For oom notifier event fd */
-	struct list_head oom_notify;
-
-	/*
-	 * Should we move charges of a task when a task is moved into this
-	 * mem_cgroup ? And what type of charges should we move ?
-	 */
-	unsigned long move_charge_at_immigrate;
-	/* taken only while moving_account > 0 */
-	spinlock_t		move_lock;
-	unsigned long		move_lock_flags;
-
 	CACHELINE_PADDING(_pad1_);
 
 	/* memory.stat */
@@ -267,10 +234,6 @@  struct mem_cgroup {
 	 */
 	unsigned long		socket_pressure;
 
-	/* Legacy tcp memory accounting */
-	bool			tcpmem_active;
-	int			tcpmem_pressure;
-
 #ifdef CONFIG_MEMCG_KMEM
 	int kmemcg_id;
 	/*
@@ -284,14 +247,6 @@  struct mem_cgroup {
 	struct list_head objcg_list;
 #endif
 
-	CACHELINE_PADDING(_pad2_);
-
-	/*
-	 * set > 0 if pages under this cgroup are moving to other cgroup.
-	 */
-	atomic_t		moving_account;
-	struct task_struct	*move_lock_task;
-
 	struct memcg_vmstats_percpu __percpu *vmstats_percpu;
 
 #ifdef CONFIG_CGROUP_WRITEBACK
@@ -300,10 +255,6 @@  struct mem_cgroup {
 	struct memcg_cgwb_frn cgwb_frn[MEMCG_CGWB_FRN_CNT];
 #endif
 
-	/* List of events which userspace want to receive */
-	struct list_head event_list;
-	spinlock_t event_list_lock;
-
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 	struct deferred_split deferred_split_queue;
 #endif
@@ -313,6 +264,58 @@  struct mem_cgroup {
 	struct lru_gen_mm_list mm_list;
 #endif
 
+#ifdef CONFIG_MEMCG_V1
+	/* Legacy consumer-oriented counters */
+	struct page_counter kmem;		/* v1 only */
+	struct page_counter tcpmem;		/* v1 only */
+
+	unsigned long soft_limit;
+
+	/* protected by memcg_oom_lock */
+	bool oom_lock;
+	int under_oom;
+
+	/* OOM-Killer disable */
+	int oom_kill_disable;
+
+	/* protect arrays of thresholds */
+	struct mutex thresholds_lock;
+
+	/* thresholds for memory usage. RCU-protected */
+	struct mem_cgroup_thresholds thresholds;
+
+	/* thresholds for mem+swap usage. RCU-protected */
+	struct mem_cgroup_thresholds memsw_thresholds;
+
+	/* For oom notifier event fd */
+	struct list_head oom_notify;
+
+	/*
+	 * Should we move charges of a task when a task is moved into this
+	 * mem_cgroup ? And what type of charges should we move ?
+	 */
+	unsigned long move_charge_at_immigrate;
+	/* taken only while moving_account > 0 */
+	spinlock_t move_lock;
+	unsigned long move_lock_flags;
+
+	/* Legacy tcp memory accounting */
+	bool tcpmem_active;
+	int tcpmem_pressure;
+
+	CACHELINE_PADDING(_pad2_);
+
+	/*
+	 * set > 0 if pages under this cgroup are moving to other cgroup.
+	 */
+	atomic_t moving_account;
+	struct task_struct *move_lock_task;
+
+	/* List of events which userspace want to receive */
+	struct list_head event_list;
+	spinlock_t event_list_lock;
+#endif /* CONFIG_MEMCG_V1 */
+
 	struct mem_cgroup_per_node *nodeinfo[];
 };