diff mbox series

[v5,4/4] mm/slub: extend redzone check to extra allocated kmalloc space than requested

Message ID 20220907071023.3838692-5-feng.tang@intel.com (mailing list archive)
State New
Headers show
Series mm/slub: some debug enhancements for kmalloc | expand

Commit Message

Feng Tang Sept. 7, 2022, 7:10 a.m. UTC
kmalloc will round up the request size to a fixed size (mostly power
of 2), so there could be a extra space than what is requested, whose
size is the actual buffer size minus original request size.

To better detect out of bound access or abuse of this space, add
redzone sanity check for it.

And in current kernel, some kmalloc user already knows the existence
of the space and utilizes it after calling 'ksize()' to know the real
size of the allocated buffer. So we skip the sanity check for objects
which have been called with ksize(), as treating them as legitimate
users.

Suggested-by: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Feng Tang <feng.tang@intel.com>
---
 mm/slab.h        |  4 ++++
 mm/slab_common.c |  4 ++++
 mm/slub.c        | 57 +++++++++++++++++++++++++++++++++++++++++++++---
 3 files changed, 62 insertions(+), 3 deletions(-)

Comments

Hyeonggon Yoo Sept. 9, 2022, 6:26 a.m. UTC | #1
On Wed, Sep 07, 2022 at 03:10:23PM +0800, Feng Tang wrote:
> kmalloc will round up the request size to a fixed size (mostly power
> of 2), so there could be a extra space than what is requested, whose
> size is the actual buffer size minus original request size.
> 
> To better detect out of bound access or abuse of this space, add
> redzone sanity check for it.
> 
> And in current kernel, some kmalloc user already knows the existence
> of the space and utilizes it after calling 'ksize()' to know the real
> size of the allocated buffer. So we skip the sanity check for objects
> which have been called with ksize(), as treating them as legitimate
> users.
> 
> Suggested-by: Vlastimil Babka <vbabka@suse.cz>
> Signed-off-by: Feng Tang <feng.tang@intel.com>
> ---
>  mm/slab.h        |  4 ++++
>  mm/slab_common.c |  4 ++++
>  mm/slub.c        | 57 +++++++++++++++++++++++++++++++++++++++++++++---
>  3 files changed, 62 insertions(+), 3 deletions(-)
> 
> diff --git a/mm/slab.h b/mm/slab.h
> index 20f9e2a9814f..0bc91b30b031 100644
> --- a/mm/slab.h
> +++ b/mm/slab.h
> @@ -885,4 +885,8 @@ void __check_heap_object(const void *ptr, unsigned long n,
>  }
>  #endif
>  
> +#ifdef CONFIG_SLUB_DEBUG
> +void skip_orig_size_check(struct kmem_cache *s, const void *object);
> +#endif
> +
>  #endif /* MM_SLAB_H */
> diff --git a/mm/slab_common.c b/mm/slab_common.c
> index 8e13e3aac53f..5106667d6adb 100644
> --- a/mm/slab_common.c
> +++ b/mm/slab_common.c
> @@ -1001,6 +1001,10 @@ size_t __ksize(const void *object)
>  		return folio_size(folio);
>  	}
>  
> +#ifdef CONFIG_SLUB_DEBUG
> +	skip_orig_size_check(folio_slab(folio)->slab_cache, object);
> +#endif
> +
>  	return slab_ksize(folio_slab(folio)->slab_cache);
>  }
>  
> diff --git a/mm/slub.c b/mm/slub.c
> index f523601d3fcf..2f0302136604 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -812,12 +812,27 @@ static inline void set_orig_size(struct kmem_cache *s,
>  	if (!slub_debug_orig_size(s))
>  		return;
>  
> +#ifdef CONFIG_KASAN_GENERIC
> +	/*
> +	 * KASAN could save its free meta data in the start part of object
> +	 * area, so skip the redzone check if kasan's meta data size is
> +	 * bigger enough to possibly overlap with kmalloc redzone
> +	 */
> +	if (s->kasan_info.free_meta_size_in_object * 2 >= s->object_size)
> +		orig_size = s->object_size;
> +#endif
> +
>  	p += get_info_end(s);
>  	p += sizeof(struct track) * 2;
>  
>  	*(unsigned int *)p = orig_size;
>  }
>  
> +void skip_orig_size_check(struct kmem_cache *s, const void *object)
> +{
> +	set_orig_size(s, (void *)object, s->object_size);
> +}
> +
>  static unsigned int get_orig_size(struct kmem_cache *s, void *object)
>  {
>  	void *p = kasan_reset_tag(object);
> @@ -949,13 +964,34 @@ static __printf(3, 4) void slab_err(struct kmem_cache *s, struct slab *slab,
>  static void init_object(struct kmem_cache *s, void *object, u8 val)
>  {
>  	u8 *p = kasan_reset_tag(object);
> +	unsigned int orig_size = s->object_size;
>  
> -	if (s->flags & SLAB_RED_ZONE)
> +	if (s->flags & SLAB_RED_ZONE) {
>  		memset(p - s->red_left_pad, val, s->red_left_pad);
>  
> +		if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) {
> +			unsigned int zone_start;
> +
> +			orig_size = get_orig_size(s, object);
> +			zone_start = orig_size;
> +
> +			if (!freeptr_outside_object(s))
> +				zone_start = max_t(unsigned int, orig_size,
> +						s->offset + sizeof(void *));
> +
> +			/*
> +			 * Redzone the extra allocated space by kmalloc
> +			 * than requested.
> +			 */
> +			if (zone_start < s->object_size)
> +				memset(p + zone_start, val,
> +					s->object_size - zone_start);
> +		}
> +	}
> +
>  	if (s->flags & __OBJECT_POISON) {
> -		memset(p, POISON_FREE, s->object_size - 1);
> -		p[s->object_size - 1] = POISON_END;
> +		memset(p, POISON_FREE, orig_size - 1);
> +		p[orig_size - 1] = POISON_END;
>  	}
>  
>  	if (s->flags & SLAB_RED_ZONE)
> @@ -1103,6 +1139,7 @@ static int check_object(struct kmem_cache *s, struct slab *slab,
>  {
>  	u8 *p = object;
>  	u8 *endobject = object + s->object_size;
> +	unsigned int orig_size;
>  
>  	if (s->flags & SLAB_RED_ZONE) {
>  		if (!check_bytes_and_report(s, slab, object, "Left Redzone",
> @@ -1112,6 +1149,20 @@ static int check_object(struct kmem_cache *s, struct slab *slab,
>  		if (!check_bytes_and_report(s, slab, object, "Right Redzone",
>  			endobject, val, s->inuse - s->object_size))
>  			return 0;
> +
> +		if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) {
> +			orig_size = get_orig_size(s, object);
> +
> +			if (!freeptr_outside_object(s))
> +				orig_size = max_t(unsigned int, orig_size,
> +						s->offset + sizeof(void *));
> +			if (s->object_size > orig_size  &&
> +				!check_bytes_and_report(s, slab, object,
> +					"kmalloc Redzone", p + orig_size,
> +					val, s->object_size - orig_size)) {
> +				return 0;
> +			}
> +		}
>  	} else {
>  		if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) {
>  			check_bytes_and_report(s, slab, p, "Alignment padding",
> -- 
> 2.34.1
> 

Looks good, but what about putting
free pointer outside object when slub_debug_orig_size(s)?

diff --git a/mm/slub.c b/mm/slub.c
index 9d1a985c9ede..7e57d9f718d1 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -970,22 +970,15 @@ static void init_object(struct kmem_cache *s, void *object, u8 val)
 		memset(p - s->red_left_pad, val, s->red_left_pad);
 
 		if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) {
-			unsigned int zone_start;
-
 			orig_size = get_orig_size(s, object);
-			zone_start = orig_size;
-
-			if (!freeptr_outside_object(s))
-				zone_start = max_t(unsigned int, orig_size,
-						s->offset + sizeof(void *));
 
 			/*
 			 * Redzone the extra allocated space by kmalloc
 			 * than requested.
 			 */
-			if (zone_start < s->object_size)
-				memset(p + zone_start, val,
-					s->object_size - zone_start);
+			if (orig_size < s->object_size)
+				memset(p + orig_size, val,
+				       s->object_size - orig_size);
 		}
 	}
 
@@ -1153,9 +1146,6 @@ static int check_object(struct kmem_cache *s, struct slab *slab,
 		if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) {
 			orig_size = get_orig_size(s, object);
 
-			if (!freeptr_outside_object(s))
-				orig_size = max_t(unsigned int, orig_size,
-						s->offset + sizeof(void *));
 			if (s->object_size > orig_size  &&
 				!check_bytes_and_report(s, slab, object,
 					"kmalloc Redzone", p + orig_size,
@@ -4234,7 +4224,8 @@ static int calculate_sizes(struct kmem_cache *s)
 	 */
 	s->inuse = size;
 
-	if ((flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) ||
+	if (slub_debug_orig_size(s) ||
+	    (flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) ||
 	    ((flags & SLAB_RED_ZONE) && s->object_size < sizeof(void *)) ||
 	    s->ctor) {
 		/*
Feng Tang Sept. 9, 2022, 7:33 a.m. UTC | #2
On Fri, Sep 09, 2022 at 02:26:34PM +0800, Hyeonggon Yoo wrote:
> On Wed, Sep 07, 2022 at 03:10:23PM +0800, Feng Tang wrote:
> > kmalloc will round up the request size to a fixed size (mostly power
> > of 2), so there could be a extra space than what is requested, whose
> > size is the actual buffer size minus original request size.
> > 
> > To better detect out of bound access or abuse of this space, add
> > redzone sanity check for it.
> > 
> > And in current kernel, some kmalloc user already knows the existence
> > of the space and utilizes it after calling 'ksize()' to know the real
> > size of the allocated buffer. So we skip the sanity check for objects
> > which have been called with ksize(), as treating them as legitimate
> > users.
> > 
> > Suggested-by: Vlastimil Babka <vbabka@suse.cz>
> > Signed-off-by: Feng Tang <feng.tang@intel.com>
> > ---
[...]

> > -	if (s->flags & SLAB_RED_ZONE)
> > +	if (s->flags & SLAB_RED_ZONE) {
> >  		memset(p - s->red_left_pad, val, s->red_left_pad);
> >  
> > +		if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) {
> > +			unsigned int zone_start;
> > +
> > +			orig_size = get_orig_size(s, object);
> > +			zone_start = orig_size;
> > +
> > +			if (!freeptr_outside_object(s))
> > +				zone_start = max_t(unsigned int, orig_size,
> > +						s->offset + sizeof(void *));
> > +
> > +			/*
> > +			 * Redzone the extra allocated space by kmalloc
> > +			 * than requested.
> > +			 */
> > +			if (zone_start < s->object_size)
> > +				memset(p + zone_start, val,
> > +					s->object_size - zone_start);
> > +		}
> > +	}
> > +
> >  	if (s->flags & __OBJECT_POISON) {
> > -		memset(p, POISON_FREE, s->object_size - 1);
> > -		p[s->object_size - 1] = POISON_END;
> > +		memset(p, POISON_FREE, orig_size - 1);
> > +		p[orig_size - 1] = POISON_END;
> >  	}
> >  
> >  	if (s->flags & SLAB_RED_ZONE)
> > @@ -1103,6 +1139,7 @@ static int check_object(struct kmem_cache *s, struct slab *slab,
> >  {
> >  	u8 *p = object;
> >  	u8 *endobject = object + s->object_size;
> > +	unsigned int orig_size;
> >  
> >  	if (s->flags & SLAB_RED_ZONE) {
> >  		if (!check_bytes_and_report(s, slab, object, "Left Redzone",
> > @@ -1112,6 +1149,20 @@ static int check_object(struct kmem_cache *s, struct slab *slab,
> >  		if (!check_bytes_and_report(s, slab, object, "Right Redzone",
> >  			endobject, val, s->inuse - s->object_size))
> >  			return 0;
> > +
> > +		if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) {
> > +			orig_size = get_orig_size(s, object);
> > +
> > +			if (!freeptr_outside_object(s))
> > +				orig_size = max_t(unsigned int, orig_size,
> > +						s->offset + sizeof(void *));
> > +			if (s->object_size > orig_size  &&
> > +				!check_bytes_and_report(s, slab, object,
> > +					"kmalloc Redzone", p + orig_size,
> > +					val, s->object_size - orig_size)) {
> > +				return 0;
> > +			}
> > +		}
> >  	} else {
> >  		if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) {
> >  			check_bytes_and_report(s, slab, p, "Alignment padding",
> > -- 
> > 2.34.1
> > 
> 
> Looks good, but what about putting
> free pointer outside object when slub_debug_orig_size(s)?
 
Sounds good to me. This makes all kmalloc slabs covered by redzone
check. I just gave the code a shot and it just works with my test
case! Thanks!

- Feng


> diff --git a/mm/slub.c b/mm/slub.c
> index 9d1a985c9ede..7e57d9f718d1 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -970,22 +970,15 @@ static void init_object(struct kmem_cache *s, void *object, u8 val)
>  		memset(p - s->red_left_pad, val, s->red_left_pad);
>  
>  		if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) {
> -			unsigned int zone_start;
> -
>  			orig_size = get_orig_size(s, object);
> -			zone_start = orig_size;
> -
> -			if (!freeptr_outside_object(s))
> -				zone_start = max_t(unsigned int, orig_size,
> -						s->offset + sizeof(void *));
>  
>  			/*
>  			 * Redzone the extra allocated space by kmalloc
>  			 * than requested.
>  			 */
> -			if (zone_start < s->object_size)
> -				memset(p + zone_start, val,
> -					s->object_size - zone_start);
> +			if (orig_size < s->object_size)
> +				memset(p + orig_size, val,
> +				       s->object_size - orig_size);
>  		}
>  	}
>  
> @@ -1153,9 +1146,6 @@ static int check_object(struct kmem_cache *s, struct slab *slab,
>  		if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) {
>  			orig_size = get_orig_size(s, object);
>  
> -			if (!freeptr_outside_object(s))
> -				orig_size = max_t(unsigned int, orig_size,
> -						s->offset + sizeof(void *));
>  			if (s->object_size > orig_size  &&
>  				!check_bytes_and_report(s, slab, object,
>  					"kmalloc Redzone", p + orig_size,
> @@ -4234,7 +4224,8 @@ static int calculate_sizes(struct kmem_cache *s)
>  	 */
>  	s->inuse = size;
>  
> -	if ((flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) ||
> +	if (slub_debug_orig_size(s) ||
> +	    (flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) ||
>  	    ((flags & SLAB_RED_ZONE) && s->object_size < sizeof(void *)) ||
>  	    s->ctor) {
>  		/*
> 
> -- 
> Thanks,
> Hyeonggon
>
Andrey Konovalov Sept. 10, 2022, 11:12 p.m. UTC | #3
On Wed, Sep 7, 2022 at 9:11 AM Feng Tang <feng.tang@intel.com> wrote:
>
> kmalloc will round up the request size to a fixed size (mostly power
> of 2), so there could be a extra space than what is requested, whose
> size is the actual buffer size minus original request size.
>
> To better detect out of bound access or abuse of this space, add
> redzone sanity check for it.
>
> And in current kernel, some kmalloc user already knows the existence
> of the space and utilizes it after calling 'ksize()' to know the real
> size of the allocated buffer. So we skip the sanity check for objects
> which have been called with ksize(), as treating them as legitimate
> users.
>
> Suggested-by: Vlastimil Babka <vbabka@suse.cz>
> Signed-off-by: Feng Tang <feng.tang@intel.com>
> ---
>  mm/slab.h        |  4 ++++
>  mm/slab_common.c |  4 ++++
>  mm/slub.c        | 57 +++++++++++++++++++++++++++++++++++++++++++++---
>  3 files changed, 62 insertions(+), 3 deletions(-)
>
> diff --git a/mm/slab.h b/mm/slab.h
> index 20f9e2a9814f..0bc91b30b031 100644
> --- a/mm/slab.h
> +++ b/mm/slab.h
> @@ -885,4 +885,8 @@ void __check_heap_object(const void *ptr, unsigned long n,
>  }
>  #endif
>
> +#ifdef CONFIG_SLUB_DEBUG
> +void skip_orig_size_check(struct kmem_cache *s, const void *object);
> +#endif
> +
>  #endif /* MM_SLAB_H */
> diff --git a/mm/slab_common.c b/mm/slab_common.c
> index 8e13e3aac53f..5106667d6adb 100644
> --- a/mm/slab_common.c
> +++ b/mm/slab_common.c
> @@ -1001,6 +1001,10 @@ size_t __ksize(const void *object)
>                 return folio_size(folio);
>         }
>
> +#ifdef CONFIG_SLUB_DEBUG
> +       skip_orig_size_check(folio_slab(folio)->slab_cache, object);
> +#endif
> +
>         return slab_ksize(folio_slab(folio)->slab_cache);
>  }
>
> diff --git a/mm/slub.c b/mm/slub.c
> index f523601d3fcf..2f0302136604 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -812,12 +812,27 @@ static inline void set_orig_size(struct kmem_cache *s,
>         if (!slub_debug_orig_size(s))
>                 return;
>
> +#ifdef CONFIG_KASAN_GENERIC
> +       /*
> +        * KASAN could save its free meta data in the start part of object
> +        * area, so skip the redzone check if kasan's meta data size is
> +        * bigger enough to possibly overlap with kmalloc redzone
> +        */
> +       if (s->kasan_info.free_meta_size_in_object * 2 >= s->object_size)

Why is free_meta_size_in_object multiplied by 2? Looks cryptic,
probably needs a comment.

Thanks!

> +               orig_size = s->object_size;
> +#endif
> +
>         p += get_info_end(s);
>         p += sizeof(struct track) * 2;
>
>         *(unsigned int *)p = orig_size;
>  }
>
> +void skip_orig_size_check(struct kmem_cache *s, const void *object)
> +{
> +       set_orig_size(s, (void *)object, s->object_size);
> +}
> +
>  static unsigned int get_orig_size(struct kmem_cache *s, void *object)
>  {
>         void *p = kasan_reset_tag(object);
> @@ -949,13 +964,34 @@ static __printf(3, 4) void slab_err(struct kmem_cache *s, struct slab *slab,
>  static void init_object(struct kmem_cache *s, void *object, u8 val)
>  {
>         u8 *p = kasan_reset_tag(object);
> +       unsigned int orig_size = s->object_size;
>
> -       if (s->flags & SLAB_RED_ZONE)
> +       if (s->flags & SLAB_RED_ZONE) {
>                 memset(p - s->red_left_pad, val, s->red_left_pad);
>
> +               if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) {
> +                       unsigned int zone_start;
> +
> +                       orig_size = get_orig_size(s, object);
> +                       zone_start = orig_size;
> +
> +                       if (!freeptr_outside_object(s))
> +                               zone_start = max_t(unsigned int, orig_size,
> +                                               s->offset + sizeof(void *));
> +
> +                       /*
> +                        * Redzone the extra allocated space by kmalloc
> +                        * than requested.
> +                        */
> +                       if (zone_start < s->object_size)
> +                               memset(p + zone_start, val,
> +                                       s->object_size - zone_start);
> +               }
> +       }
> +
>         if (s->flags & __OBJECT_POISON) {
> -               memset(p, POISON_FREE, s->object_size - 1);
> -               p[s->object_size - 1] = POISON_END;
> +               memset(p, POISON_FREE, orig_size - 1);
> +               p[orig_size - 1] = POISON_END;
>         }
>
>         if (s->flags & SLAB_RED_ZONE)
> @@ -1103,6 +1139,7 @@ static int check_object(struct kmem_cache *s, struct slab *slab,
>  {
>         u8 *p = object;
>         u8 *endobject = object + s->object_size;
> +       unsigned int orig_size;
>
>         if (s->flags & SLAB_RED_ZONE) {
>                 if (!check_bytes_and_report(s, slab, object, "Left Redzone",
> @@ -1112,6 +1149,20 @@ static int check_object(struct kmem_cache *s, struct slab *slab,
>                 if (!check_bytes_and_report(s, slab, object, "Right Redzone",
>                         endobject, val, s->inuse - s->object_size))
>                         return 0;
> +
> +               if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) {
> +                       orig_size = get_orig_size(s, object);
> +
> +                       if (!freeptr_outside_object(s))
> +                               orig_size = max_t(unsigned int, orig_size,
> +                                               s->offset + sizeof(void *));
> +                       if (s->object_size > orig_size  &&
> +                               !check_bytes_and_report(s, slab, object,
> +                                       "kmalloc Redzone", p + orig_size,
> +                                       val, s->object_size - orig_size)) {
> +                               return 0;
> +                       }
> +               }
>         } else {
>                 if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) {
>                         check_bytes_and_report(s, slab, p, "Alignment padding",
> --
> 2.34.1
>
> --
> You received this message because you are subscribed to the Google Groups "kasan-dev" group.
> To unsubscribe from this group and stop receiving emails from it, send an email to kasan-dev+unsubscribe@googlegroups.com.
> To view this discussion on the web visit https://groups.google.com/d/msgid/kasan-dev/20220907071023.3838692-5-feng.tang%40intel.com.
Feng Tang Sept. 11, 2022, 4:10 a.m. UTC | #4
On Sun, Sep 11, 2022 at 07:12:05AM +0800, Andrey Konovalov wrote:
> On Wed, Sep 7, 2022 at 9:11 AM Feng Tang <feng.tang@intel.com> wrote:
> >
> > kmalloc will round up the request size to a fixed size (mostly power
> > of 2), so there could be a extra space than what is requested, whose
> > size is the actual buffer size minus original request size.
> >
> > To better detect out of bound access or abuse of this space, add
> > redzone sanity check for it.
> >
> > And in current kernel, some kmalloc user already knows the existence
> > of the space and utilizes it after calling 'ksize()' to know the real
> > size of the allocated buffer. So we skip the sanity check for objects
> > which have been called with ksize(), as treating them as legitimate
> > users.
> >
> > Suggested-by: Vlastimil Babka <vbabka@suse.cz>
> > Signed-off-by: Feng Tang <feng.tang@intel.com>
> > ---
> >  mm/slab.h        |  4 ++++
> >  mm/slab_common.c |  4 ++++
> >  mm/slub.c        | 57 +++++++++++++++++++++++++++++++++++++++++++++---
> >  3 files changed, 62 insertions(+), 3 deletions(-)
> >
> > diff --git a/mm/slab.h b/mm/slab.h
> > index 20f9e2a9814f..0bc91b30b031 100644
> > --- a/mm/slab.h
> > +++ b/mm/slab.h
> > @@ -885,4 +885,8 @@ void __check_heap_object(const void *ptr, unsigned long n,
> >  }
> >  #endif
> >
> > +#ifdef CONFIG_SLUB_DEBUG
> > +void skip_orig_size_check(struct kmem_cache *s, const void *object);
> > +#endif
> > +
> >  #endif /* MM_SLAB_H */
> > diff --git a/mm/slab_common.c b/mm/slab_common.c
> > index 8e13e3aac53f..5106667d6adb 100644
> > --- a/mm/slab_common.c
> > +++ b/mm/slab_common.c
> > @@ -1001,6 +1001,10 @@ size_t __ksize(const void *object)
> >                 return folio_size(folio);
> >         }
> >
> > +#ifdef CONFIG_SLUB_DEBUG
> > +       skip_orig_size_check(folio_slab(folio)->slab_cache, object);
> > +#endif
> > +
> >         return slab_ksize(folio_slab(folio)->slab_cache);
> >  }
> >
> > diff --git a/mm/slub.c b/mm/slub.c
> > index f523601d3fcf..2f0302136604 100644
> > --- a/mm/slub.c
> > +++ b/mm/slub.c
> > @@ -812,12 +812,27 @@ static inline void set_orig_size(struct kmem_cache *s,
> >         if (!slub_debug_orig_size(s))
> >                 return;
> >
> > +#ifdef CONFIG_KASAN_GENERIC
> > +       /*
> > +        * KASAN could save its free meta data in the start part of object
> > +        * area, so skip the redzone check if kasan's meta data size is
> > +        * bigger enough to possibly overlap with kmalloc redzone
> > +        */
> > +       if (s->kasan_info.free_meta_size_in_object * 2 >= s->object_size)
> 
> Why is free_meta_size_in_object multiplied by 2? Looks cryptic,
> probably needs a comment.
 
OK, will change, I didn't make it clear. 

The basic idea is kasan's free-meta could be saved in object's data
area at offset 0, and it could overlap the kmalloc's in-object
redzone, which can only be in the second half part of the data
area. And as long as kasan's free meta sits in the first half,
then it's fine.

Maybe I can change the check to

  if (s->kasan_info.free_meta_size_in_object > orig_size)
	...

Thanks,
Feng

> Thanks!
>
diff mbox series

Patch

diff --git a/mm/slab.h b/mm/slab.h
index 20f9e2a9814f..0bc91b30b031 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -885,4 +885,8 @@  void __check_heap_object(const void *ptr, unsigned long n,
 }
 #endif
 
+#ifdef CONFIG_SLUB_DEBUG
+void skip_orig_size_check(struct kmem_cache *s, const void *object);
+#endif
+
 #endif /* MM_SLAB_H */
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 8e13e3aac53f..5106667d6adb 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -1001,6 +1001,10 @@  size_t __ksize(const void *object)
 		return folio_size(folio);
 	}
 
+#ifdef CONFIG_SLUB_DEBUG
+	skip_orig_size_check(folio_slab(folio)->slab_cache, object);
+#endif
+
 	return slab_ksize(folio_slab(folio)->slab_cache);
 }
 
diff --git a/mm/slub.c b/mm/slub.c
index f523601d3fcf..2f0302136604 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -812,12 +812,27 @@  static inline void set_orig_size(struct kmem_cache *s,
 	if (!slub_debug_orig_size(s))
 		return;
 
+#ifdef CONFIG_KASAN_GENERIC
+	/*
+	 * KASAN could save its free meta data in the start part of object
+	 * area, so skip the redzone check if kasan's meta data size is
+	 * bigger enough to possibly overlap with kmalloc redzone
+	 */
+	if (s->kasan_info.free_meta_size_in_object * 2 >= s->object_size)
+		orig_size = s->object_size;
+#endif
+
 	p += get_info_end(s);
 	p += sizeof(struct track) * 2;
 
 	*(unsigned int *)p = orig_size;
 }
 
+void skip_orig_size_check(struct kmem_cache *s, const void *object)
+{
+	set_orig_size(s, (void *)object, s->object_size);
+}
+
 static unsigned int get_orig_size(struct kmem_cache *s, void *object)
 {
 	void *p = kasan_reset_tag(object);
@@ -949,13 +964,34 @@  static __printf(3, 4) void slab_err(struct kmem_cache *s, struct slab *slab,
 static void init_object(struct kmem_cache *s, void *object, u8 val)
 {
 	u8 *p = kasan_reset_tag(object);
+	unsigned int orig_size = s->object_size;
 
-	if (s->flags & SLAB_RED_ZONE)
+	if (s->flags & SLAB_RED_ZONE) {
 		memset(p - s->red_left_pad, val, s->red_left_pad);
 
+		if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) {
+			unsigned int zone_start;
+
+			orig_size = get_orig_size(s, object);
+			zone_start = orig_size;
+
+			if (!freeptr_outside_object(s))
+				zone_start = max_t(unsigned int, orig_size,
+						s->offset + sizeof(void *));
+
+			/*
+			 * Redzone the extra allocated space by kmalloc
+			 * than requested.
+			 */
+			if (zone_start < s->object_size)
+				memset(p + zone_start, val,
+					s->object_size - zone_start);
+		}
+	}
+
 	if (s->flags & __OBJECT_POISON) {
-		memset(p, POISON_FREE, s->object_size - 1);
-		p[s->object_size - 1] = POISON_END;
+		memset(p, POISON_FREE, orig_size - 1);
+		p[orig_size - 1] = POISON_END;
 	}
 
 	if (s->flags & SLAB_RED_ZONE)
@@ -1103,6 +1139,7 @@  static int check_object(struct kmem_cache *s, struct slab *slab,
 {
 	u8 *p = object;
 	u8 *endobject = object + s->object_size;
+	unsigned int orig_size;
 
 	if (s->flags & SLAB_RED_ZONE) {
 		if (!check_bytes_and_report(s, slab, object, "Left Redzone",
@@ -1112,6 +1149,20 @@  static int check_object(struct kmem_cache *s, struct slab *slab,
 		if (!check_bytes_and_report(s, slab, object, "Right Redzone",
 			endobject, val, s->inuse - s->object_size))
 			return 0;
+
+		if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) {
+			orig_size = get_orig_size(s, object);
+
+			if (!freeptr_outside_object(s))
+				orig_size = max_t(unsigned int, orig_size,
+						s->offset + sizeof(void *));
+			if (s->object_size > orig_size  &&
+				!check_bytes_and_report(s, slab, object,
+					"kmalloc Redzone", p + orig_size,
+					val, s->object_size - orig_size)) {
+				return 0;
+			}
+		}
 	} else {
 		if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) {
 			check_bytes_and_report(s, slab, p, "Alignment padding",