diff mbox series

[v3,1/2] mm/memcg: alignment memcg_data define condition

Message ID 20240710054336.190410-1-alexs@kernel.org (mailing list archive)
State New
Headers show
Series [v3,1/2] mm/memcg: alignment memcg_data define condition | expand

Commit Message

alexs@kernel.org July 10, 2024, 5:43 a.m. UTC
From: "Alex Shi (Tencent)" <alexs@kernel.org>

commit 21c690a349ba ("mm: introduce slabobj_ext to support slab object
extensions") changed the folio/page->memcg_data define condition from
MEMCG to SLAB_OBJ_EXT. And selected SLAB_OBJ_EXT for MEMCG, just for
SLAB_MATCH(memcg_data, obj_exts), even no other relationship between them.

Above action make memcg_data exposed and include SLAB_OBJ_EXT for
!MEMCG. That's incorrect in logcial and pay on code size.

As Vlastimil Babka suggested, let's add _unused_slab_obj_ext for
SLAB_MATCH for slab.obj_exts while !MEMCG. That could resolve the match
issue, clean up the feature logical. And decouple the SLAB_OBJ_EXT from
MEMCG in next patch.

Signed-off-by: Alex Shi (Tencent) <alexs@kernel.org>
Cc: Randy Dunlap <rdunlap@infradead.org>
Cc: Yoann Congal <yoann.congal@smile.fr>
Cc: Masahiro Yamada <masahiroy@kernel.org>
Cc: Petr Mladek <pmladek@suse.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
---
v1->v3: take Vlastimil's suggestion and move SLAB_OBJ_EXT/MEMCG decouple
to 2nd patch.
---
 include/linux/mm_types.h | 8 ++++++--
 mm/slab.h                | 4 ++++
 2 files changed, 10 insertions(+), 2 deletions(-)

Comments

Vlastimil Babka July 11, 2024, 8:13 a.m. UTC | #1
On 7/10/24 7:43 AM, alexs@kernel.org wrote:
> From: "Alex Shi (Tencent)" <alexs@kernel.org>
> 
> commit 21c690a349ba ("mm: introduce slabobj_ext to support slab object
> extensions") changed the folio/page->memcg_data define condition from
> MEMCG to SLAB_OBJ_EXT. And selected SLAB_OBJ_EXT for MEMCG, just for
> SLAB_MATCH(memcg_data, obj_exts), even no other relationship between them.
> 
> Above action make memcg_data exposed and include SLAB_OBJ_EXT for
> !MEMCG. That's incorrect in logcial and pay on code size.
> 
> As Vlastimil Babka suggested, let's add _unused_slab_obj_ext for
> SLAB_MATCH for slab.obj_exts while !MEMCG. That could resolve the match
> issue, clean up the feature logical. And decouple the SLAB_OBJ_EXT from
> MEMCG in next patch.
> 
> Signed-off-by: Alex Shi (Tencent) <alexs@kernel.org>
> Cc: Randy Dunlap <rdunlap@infradead.org>
> Cc: Yoann Congal <yoann.congal@smile.fr>
> Cc: Masahiro Yamada <masahiroy@kernel.org>
> Cc: Petr Mladek <pmladek@suse.com>
> Cc: Suren Baghdasaryan <surenb@google.com>
> Cc: Vlastimil Babka <vbabka@suse.cz>
> ---
> v1->v3: take Vlastimil's suggestion and move SLAB_OBJ_EXT/MEMCG decouple
> to 2nd patch.
> ---
>  include/linux/mm_types.h | 8 ++++++--
>  mm/slab.h                | 4 ++++
>  2 files changed, 10 insertions(+), 2 deletions(-)
> 
> diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
> index ef09c4eef6d3..4ac3abc673d3 100644
> --- a/include/linux/mm_types.h
> +++ b/include/linux/mm_types.h
> @@ -180,8 +180,10 @@ struct page {
>  	/* Usage count. *DO NOT USE DIRECTLY*. See page_ref.h */
>  	atomic_t _refcount;
>  
> -#ifdef CONFIG_SLAB_OBJ_EXT
> +#ifdef CONFIG_MEMCG
>  	unsigned long memcg_data;
> +#elif defined(CONFIG_SLAB_OBJ_EXT)
> +	unsigned long _unused_slab_obj_ext;
>  #endif
>  
>  	/*
> @@ -343,8 +345,10 @@ struct folio {
>  			};
>  			atomic_t _mapcount;
>  			atomic_t _refcount;
> -#ifdef CONFIG_SLAB_OBJ_EXT
> +#ifdef CONFIG_MEMCG
>  			unsigned long memcg_data;
> +#elif defined(CONFIG_SLAB_OBJ_EXT)
> +			unsigned long _unused_slab_obj_ext;
>  #endif
>  #if defined(WANT_PAGE_VIRTUAL)
>  			void *virtual;
> diff --git a/mm/slab.h b/mm/slab.h
> index 3586e6183224..8ffdd4f315f8 100644
> --- a/mm/slab.h
> +++ b/mm/slab.h
> @@ -98,7 +98,11 @@ SLAB_MATCH(flags, __page_flags);
>  SLAB_MATCH(compound_head, slab_cache);	/* Ensure bit 0 is clear */
>  SLAB_MATCH(_refcount, __page_refcount);
>  #ifdef CONFIG_SLAB_OBJ_EXT
> +#ifdef CONFIG_MEMCG
>  SLAB_MATCH(memcg_data, obj_exts);
> +#else
> +SLAB_MATCH(_unused_slab_obj_ext, obj_exts);
> +#endif
>  #endif

Why not also #ifdef / #elif like above, instead of this nesting?

>  #undef SLAB_MATCH
>  static_assert(sizeof(struct slab) <= sizeof(struct page));
Alex Shi July 11, 2024, 11:51 a.m. UTC | #2
On 7/11/24 4:13 PM, Vlastimil Babka wrote:
> On 7/10/24 7:43 AM, alexs@kernel.org wrote:
>> From: "Alex Shi (Tencent)" <alexs@kernel.org>
>>
>> commit 21c690a349ba ("mm: introduce slabobj_ext to support slab object
>> extensions") changed the folio/page->memcg_data define condition from
>> MEMCG to SLAB_OBJ_EXT. And selected SLAB_OBJ_EXT for MEMCG, just for
>> SLAB_MATCH(memcg_data, obj_exts), even no other relationship between them.
>>
>> Above action make memcg_data exposed and include SLAB_OBJ_EXT for
>> !MEMCG. That's incorrect in logcial and pay on code size.
>>
>> As Vlastimil Babka suggested, let's add _unused_slab_obj_ext for
>> SLAB_MATCH for slab.obj_exts while !MEMCG. That could resolve the match
>> issue, clean up the feature logical. And decouple the SLAB_OBJ_EXT from
>> MEMCG in next patch.
>>
>> Signed-off-by: Alex Shi (Tencent) <alexs@kernel.org>
>> Cc: Randy Dunlap <rdunlap@infradead.org>
>> Cc: Yoann Congal <yoann.congal@smile.fr>
>> Cc: Masahiro Yamada <masahiroy@kernel.org>
>> Cc: Petr Mladek <pmladek@suse.com>
>> Cc: Suren Baghdasaryan <surenb@google.com>
>> Cc: Vlastimil Babka <vbabka@suse.cz>
>> ---
>> v1->v3: take Vlastimil's suggestion and move SLAB_OBJ_EXT/MEMCG decouple
>> to 2nd patch.
>> ---
>>  include/linux/mm_types.h | 8 ++++++--
>>  mm/slab.h                | 4 ++++
>>  2 files changed, 10 insertions(+), 2 deletions(-)
>>
>> diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
>> index ef09c4eef6d3..4ac3abc673d3 100644
>> --- a/include/linux/mm_types.h
>> +++ b/include/linux/mm_types.h
>> @@ -180,8 +180,10 @@ struct page {
>>  	/* Usage count. *DO NOT USE DIRECTLY*. See page_ref.h */
>>  	atomic_t _refcount;
>>  
>> -#ifdef CONFIG_SLAB_OBJ_EXT
>> +#ifdef CONFIG_MEMCG
>>  	unsigned long memcg_data;
>> +#elif defined(CONFIG_SLAB_OBJ_EXT)
>> +	unsigned long _unused_slab_obj_ext;
>>  #endif
>>  
>>  	/*
>> @@ -343,8 +345,10 @@ struct folio {
>>  			};
>>  			atomic_t _mapcount;
>>  			atomic_t _refcount;
>> -#ifdef CONFIG_SLAB_OBJ_EXT
>> +#ifdef CONFIG_MEMCG
>>  			unsigned long memcg_data;
>> +#elif defined(CONFIG_SLAB_OBJ_EXT)
>> +			unsigned long _unused_slab_obj_ext;
>>  #endif
>>  #if defined(WANT_PAGE_VIRTUAL)
>>  			void *virtual;
>> diff --git a/mm/slab.h b/mm/slab.h
>> index 3586e6183224..8ffdd4f315f8 100644
>> --- a/mm/slab.h
>> +++ b/mm/slab.h
>> @@ -98,7 +98,11 @@ SLAB_MATCH(flags, __page_flags);
>>  SLAB_MATCH(compound_head, slab_cache);	/* Ensure bit 0 is clear */
>>  SLAB_MATCH(_refcount, __page_refcount);
>>  #ifdef CONFIG_SLAB_OBJ_EXT
>> +#ifdef CONFIG_MEMCG
>>  SLAB_MATCH(memcg_data, obj_exts);
>> +#else
>> +SLAB_MATCH(_unused_slab_obj_ext, obj_exts);
>> +#endif
>>  #endif
> 
> Why not also #ifdef / #elif like above, instead of this nesting?

Uh, it works too if MEMCG/SLAB_OBJ_EXT decoupled.
but right, it could be written with #ifdef/#elif.

Thanks
Alex
> 
>>  #undef SLAB_MATCH
>>  static_assert(sizeof(struct slab) <= sizeof(struct page));
>
Suren Baghdasaryan July 11, 2024, 2:55 p.m. UTC | #3
On Thu, Jul 11, 2024 at 4:51 AM Alex Shi <seakeel@gmail.com> wrote:
>
>
>
> On 7/11/24 4:13 PM, Vlastimil Babka wrote:
> > On 7/10/24 7:43 AM, alexs@kernel.org wrote:
> >> From: "Alex Shi (Tencent)" <alexs@kernel.org>
> >>
> >> commit 21c690a349ba ("mm: introduce slabobj_ext to support slab object
> >> extensions") changed the folio/page->memcg_data define condition from
> >> MEMCG to SLAB_OBJ_EXT. And selected SLAB_OBJ_EXT for MEMCG, just for
> >> SLAB_MATCH(memcg_data, obj_exts), even no other relationship between them.
> >>
> >> Above action make memcg_data exposed and include SLAB_OBJ_EXT for
> >> !MEMCG. That's incorrect in logcial and pay on code size.
> >>
> >> As Vlastimil Babka suggested, let's add _unused_slab_obj_ext for
> >> SLAB_MATCH for slab.obj_exts while !MEMCG. That could resolve the match
> >> issue, clean up the feature logical. And decouple the SLAB_OBJ_EXT from
> >> MEMCG in next patch.
> >>
> >> Signed-off-by: Alex Shi (Tencent) <alexs@kernel.org>
> >> Cc: Randy Dunlap <rdunlap@infradead.org>
> >> Cc: Yoann Congal <yoann.congal@smile.fr>
> >> Cc: Masahiro Yamada <masahiroy@kernel.org>
> >> Cc: Petr Mladek <pmladek@suse.com>
> >> Cc: Suren Baghdasaryan <surenb@google.com>
> >> Cc: Vlastimil Babka <vbabka@suse.cz>
> >> ---
> >> v1->v3: take Vlastimil's suggestion and move SLAB_OBJ_EXT/MEMCG decouple
> >> to 2nd patch.
> >> ---
> >>  include/linux/mm_types.h | 8 ++++++--
> >>  mm/slab.h                | 4 ++++
> >>  2 files changed, 10 insertions(+), 2 deletions(-)
> >>
> >> diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
> >> index ef09c4eef6d3..4ac3abc673d3 100644
> >> --- a/include/linux/mm_types.h
> >> +++ b/include/linux/mm_types.h
> >> @@ -180,8 +180,10 @@ struct page {
> >>      /* Usage count. *DO NOT USE DIRECTLY*. See page_ref.h */
> >>      atomic_t _refcount;
> >>
> >> -#ifdef CONFIG_SLAB_OBJ_EXT
> >> +#ifdef CONFIG_MEMCG
> >>      unsigned long memcg_data;
> >> +#elif defined(CONFIG_SLAB_OBJ_EXT)
> >> +    unsigned long _unused_slab_obj_ext;
> >>  #endif
> >>
> >>      /*
> >> @@ -343,8 +345,10 @@ struct folio {
> >>                      };
> >>                      atomic_t _mapcount;
> >>                      atomic_t _refcount;
> >> -#ifdef CONFIG_SLAB_OBJ_EXT
> >> +#ifdef CONFIG_MEMCG
> >>                      unsigned long memcg_data;
> >> +#elif defined(CONFIG_SLAB_OBJ_EXT)
> >> +                    unsigned long _unused_slab_obj_ext;
> >>  #endif
> >>  #if defined(WANT_PAGE_VIRTUAL)
> >>                      void *virtual;
> >> diff --git a/mm/slab.h b/mm/slab.h
> >> index 3586e6183224..8ffdd4f315f8 100644
> >> --- a/mm/slab.h
> >> +++ b/mm/slab.h
> >> @@ -98,7 +98,11 @@ SLAB_MATCH(flags, __page_flags);
> >>  SLAB_MATCH(compound_head, slab_cache);      /* Ensure bit 0 is clear */
> >>  SLAB_MATCH(_refcount, __page_refcount);
> >>  #ifdef CONFIG_SLAB_OBJ_EXT
> >> +#ifdef CONFIG_MEMCG
> >>  SLAB_MATCH(memcg_data, obj_exts);
> >> +#else
> >> +SLAB_MATCH(_unused_slab_obj_ext, obj_exts);
> >> +#endif
> >>  #endif
> >
> > Why not also #ifdef / #elif like above, instead of this nesting?
>
> Uh, it works too if MEMCG/SLAB_OBJ_EXT decoupled.
> but right, it could be written with #ifdef/#elif.

Yes, please keep the same condition, otherwise it gets confusing.

>
> Thanks
> Alex
> >
> >>  #undef SLAB_MATCH
> >>  static_assert(sizeof(struct slab) <= sizeof(struct page));
> >
diff mbox series

Patch

diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index ef09c4eef6d3..4ac3abc673d3 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -180,8 +180,10 @@  struct page {
 	/* Usage count. *DO NOT USE DIRECTLY*. See page_ref.h */
 	atomic_t _refcount;
 
-#ifdef CONFIG_SLAB_OBJ_EXT
+#ifdef CONFIG_MEMCG
 	unsigned long memcg_data;
+#elif defined(CONFIG_SLAB_OBJ_EXT)
+	unsigned long _unused_slab_obj_ext;
 #endif
 
 	/*
@@ -343,8 +345,10 @@  struct folio {
 			};
 			atomic_t _mapcount;
 			atomic_t _refcount;
-#ifdef CONFIG_SLAB_OBJ_EXT
+#ifdef CONFIG_MEMCG
 			unsigned long memcg_data;
+#elif defined(CONFIG_SLAB_OBJ_EXT)
+			unsigned long _unused_slab_obj_ext;
 #endif
 #if defined(WANT_PAGE_VIRTUAL)
 			void *virtual;
diff --git a/mm/slab.h b/mm/slab.h
index 3586e6183224..8ffdd4f315f8 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -98,7 +98,11 @@  SLAB_MATCH(flags, __page_flags);
 SLAB_MATCH(compound_head, slab_cache);	/* Ensure bit 0 is clear */
 SLAB_MATCH(_refcount, __page_refcount);
 #ifdef CONFIG_SLAB_OBJ_EXT
+#ifdef CONFIG_MEMCG
 SLAB_MATCH(memcg_data, obj_exts);
+#else
+SLAB_MATCH(_unused_slab_obj_ext, obj_exts);
+#endif
 #endif
 #undef SLAB_MATCH
 static_assert(sizeof(struct slab) <= sizeof(struct page));