diff mbox series

[v4,3/3] mm/slub: Actually fix freelist pointer vs redzoning

Message ID 20210608183955.280836-4-keescook@chromium.org (mailing list archive)
State New, archived
Headers show
Series Actually fix freelist pointer vs redzoning | expand

Commit Message

Kees Cook June 8, 2021, 6:39 p.m. UTC
It turns out that SLUB redzoning ("slub_debug=Z") checks from
s->object_size rather than from s->inuse (which is normally bumped
to make room for the freelist pointer), so a cache created with an
object size less than 24 would have the freelist pointer written beyond
s->object_size, causing the redzone to be corrupted by the freelist
pointer. This was very visible with "slub_debug=ZF":

BUG test (Tainted: G    B            ): Right Redzone overwritten
-----------------------------------------------------------------------------

INFO: 0xffff957ead1c05de-0xffff957ead1c05df @offset=1502. First byte 0x1a instead of 0xbb
INFO: Slab 0xffffef3950b47000 objects=170 used=170 fp=0x0000000000000000 flags=0x8000000000000200
INFO: Object 0xffff957ead1c05d8 @offset=1496 fp=0xffff957ead1c0620

Redzone  (____ptrval____): bb bb bb bb bb bb bb bb               ........
Object   (____ptrval____): 00 00 00 00 00 f6 f4 a5               ........
Redzone  (____ptrval____): 40 1d e8 1a aa                        @....
Padding  (____ptrval____): 00 00 00 00 00 00 00 00               ........

Adjust the offset to stay within s->object_size.

(Note that no caches of in this size range are known to exist in the
kernel currently.)

Reported-by: Marco Elver <elver@google.com>
Reported-by: "Lin, Zhenpeng" <zplin@psu.edu>
Link: https://lore.kernel.org/linux-mm/20200807160627.GA1420741@elver.google.com/
Fixes: 89b83f282d8b (slub: avoid redzone when choosing freepointer location)
Cc: stable@vger.kernel.org
Tested-by: Marco Elver <elver@google.com>
Link: https://lore.kernel.org/lkml/CANpmjNOwZ5VpKQn+SYWovTkFB4VsT-RPwyENBmaK0dLcpqStkA@mail.gmail.com
Signed-off-by: Kees Cook <keescook@chromium.org>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Link: https://lore.kernel.org/lkml/0f7dd7b2-7496-5e2d-9488-2ec9f8e90441@suse.cz/
---
 mm/slub.c | 14 +++-----------
 1 file changed, 3 insertions(+), 11 deletions(-)

Comments

Andrew Morton June 8, 2021, 8:56 p.m. UTC | #1
On Tue,  8 Jun 2021 11:39:55 -0700 Kees Cook <keescook@chromium.org> wrote:

> It turns out that SLUB redzoning ("slub_debug=Z") checks from
> s->object_size rather than from s->inuse (which is normally bumped
> to make room for the freelist pointer), so a cache created with an
> object size less than 24 would have the freelist pointer written beyond
> s->object_size, causing the redzone to be corrupted by the freelist
> pointer. This was very visible with "slub_debug=ZF":
> 
> BUG test (Tainted: G    B            ): Right Redzone overwritten
> -----------------------------------------------------------------------------
> 
> INFO: 0xffff957ead1c05de-0xffff957ead1c05df @offset=1502. First byte 0x1a instead of 0xbb
> INFO: Slab 0xffffef3950b47000 objects=170 used=170 fp=0x0000000000000000 flags=0x8000000000000200
> INFO: Object 0xffff957ead1c05d8 @offset=1496 fp=0xffff957ead1c0620
> 
> Redzone  (____ptrval____): bb bb bb bb bb bb bb bb               ........
> Object   (____ptrval____): 00 00 00 00 00 f6 f4 a5               ........
> Redzone  (____ptrval____): 40 1d e8 1a aa                        @....
> Padding  (____ptrval____): 00 00 00 00 00 00 00 00               ........
> 
> Adjust the offset to stay within s->object_size.
> 
> (Note that no caches of in this size range are known to exist in the
> kernel currently.)

We already have
https://lkml.kernel.org/r/6746FEEA-FD69-4792-8DDA-C78F5FE7DA02@psu.edu.
Is this patch better?

> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -3689,7 +3689,6 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
>  {
>  	slab_flags_t flags = s->flags;
>  	unsigned int size = s->object_size;
> -	unsigned int freepointer_area;
>  	unsigned int order;
>  
>  	/*
> @@ -3698,13 +3697,6 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
>  	 * the possible location of the free pointer.
>  	 */
>  	size = ALIGN(size, sizeof(void *));
> -	/*
> -	 * This is the area of the object where a freepointer can be
> -	 * safely written. If redzoning adds more to the inuse size, we
> -	 * can't use that portion for writing the freepointer, so
> -	 * s->offset must be limited within this for the general case.
> -	 */
> -	freepointer_area = size;
>  
>  #ifdef CONFIG_SLUB_DEBUG
>  	/*
> @@ -3730,7 +3722,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
>  
>  	/*
>  	 * With that we have determined the number of bytes in actual use
> -	 * by the object. This is the potential offset to the free pointer.
> +	 * by the object and redzoning.
>  	 */
>  	s->inuse = size;
>  
> @@ -3753,13 +3745,13 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
>  		 */
>  		s->offset = size;
>  		size += sizeof(void *);
> -	} else if (freepointer_area > sizeof(void *)) {
> +	} else {
>  		/*
>  		 * Store freelist pointer near middle of object to keep
>  		 * it away from the edges of the object to avoid small
>  		 * sized over/underflows from neighboring allocations.
>  		 */
> -		s->offset = ALIGN(freepointer_area / 2, sizeof(void *));
> +		s->offset = ALIGN_DOWN(s->object_size / 2, sizeof(void *));
>  	}
>  
>  #ifdef CONFIG_SLUB_DEBUG
> -- 
> 2.25.1
Kees Cook June 8, 2021, 11:11 p.m. UTC | #2
On Tue, Jun 08, 2021 at 01:56:33PM -0700, Andrew Morton wrote:
> On Tue,  8 Jun 2021 11:39:55 -0700 Kees Cook <keescook@chromium.org> wrote:
> 
> > It turns out that SLUB redzoning ("slub_debug=Z") checks from
> > s->object_size rather than from s->inuse (which is normally bumped
> > to make room for the freelist pointer), so a cache created with an
> > object size less than 24 would have the freelist pointer written beyond
> > s->object_size, causing the redzone to be corrupted by the freelist
> > pointer. This was very visible with "slub_debug=ZF":
> > 
> > BUG test (Tainted: G    B            ): Right Redzone overwritten
> > -----------------------------------------------------------------------------
> > 
> > INFO: 0xffff957ead1c05de-0xffff957ead1c05df @offset=1502. First byte 0x1a instead of 0xbb
> > INFO: Slab 0xffffef3950b47000 objects=170 used=170 fp=0x0000000000000000 flags=0x8000000000000200
> > INFO: Object 0xffff957ead1c05d8 @offset=1496 fp=0xffff957ead1c0620
> > 
> > Redzone  (____ptrval____): bb bb bb bb bb bb bb bb               ........
> > Object   (____ptrval____): 00 00 00 00 00 f6 f4 a5               ........
> > Redzone  (____ptrval____): 40 1d e8 1a aa                        @....
> > Padding  (____ptrval____): 00 00 00 00 00 00 00 00               ........
> > 
> > Adjust the offset to stay within s->object_size.
> > 
> > (Note that no caches of in this size range are known to exist in the
> > kernel currently.)
> 
> We already have
> https://lkml.kernel.org/r/6746FEEA-FD69-4792-8DDA-C78F5FE7DA02@psu.edu.
> Is this patch better?

Yes, I believe so, since it reduces code and corrects the size checking
more directly (and more clearly demonstrates the redzone calculation
problem in the commit log).

-Kees

> 
> > --- a/mm/slub.c
> > +++ b/mm/slub.c
> > @@ -3689,7 +3689,6 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
> >  {
> >  	slab_flags_t flags = s->flags;
> >  	unsigned int size = s->object_size;
> > -	unsigned int freepointer_area;
> >  	unsigned int order;
> >  
> >  	/*
> > @@ -3698,13 +3697,6 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
> >  	 * the possible location of the free pointer.
> >  	 */
> >  	size = ALIGN(size, sizeof(void *));
> > -	/*
> > -	 * This is the area of the object where a freepointer can be
> > -	 * safely written. If redzoning adds more to the inuse size, we
> > -	 * can't use that portion for writing the freepointer, so
> > -	 * s->offset must be limited within this for the general case.
> > -	 */
> > -	freepointer_area = size;
> >  
> >  #ifdef CONFIG_SLUB_DEBUG
> >  	/*
> > @@ -3730,7 +3722,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
> >  
> >  	/*
> >  	 * With that we have determined the number of bytes in actual use
> > -	 * by the object. This is the potential offset to the free pointer.
> > +	 * by the object and redzoning.
> >  	 */
> >  	s->inuse = size;
> >  
> > @@ -3753,13 +3745,13 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
> >  		 */
> >  		s->offset = size;
> >  		size += sizeof(void *);
> > -	} else if (freepointer_area > sizeof(void *)) {
> > +	} else {
> >  		/*
> >  		 * Store freelist pointer near middle of object to keep
> >  		 * it away from the edges of the object to avoid small
> >  		 * sized over/underflows from neighboring allocations.
> >  		 */
> > -		s->offset = ALIGN(freepointer_area / 2, sizeof(void *));
> > +		s->offset = ALIGN_DOWN(s->object_size / 2, sizeof(void *));
> >  	}
> >  
> >  #ifdef CONFIG_SLUB_DEBUG
> > -- 
> > 2.25.1
diff mbox series

Patch

diff --git a/mm/slub.c b/mm/slub.c
index f58cfd456548..fe30df460fad 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3689,7 +3689,6 @@  static int calculate_sizes(struct kmem_cache *s, int forced_order)
 {
 	slab_flags_t flags = s->flags;
 	unsigned int size = s->object_size;
-	unsigned int freepointer_area;
 	unsigned int order;
 
 	/*
@@ -3698,13 +3697,6 @@  static int calculate_sizes(struct kmem_cache *s, int forced_order)
 	 * the possible location of the free pointer.
 	 */
 	size = ALIGN(size, sizeof(void *));
-	/*
-	 * This is the area of the object where a freepointer can be
-	 * safely written. If redzoning adds more to the inuse size, we
-	 * can't use that portion for writing the freepointer, so
-	 * s->offset must be limited within this for the general case.
-	 */
-	freepointer_area = size;
 
 #ifdef CONFIG_SLUB_DEBUG
 	/*
@@ -3730,7 +3722,7 @@  static int calculate_sizes(struct kmem_cache *s, int forced_order)
 
 	/*
 	 * With that we have determined the number of bytes in actual use
-	 * by the object. This is the potential offset to the free pointer.
+	 * by the object and redzoning.
 	 */
 	s->inuse = size;
 
@@ -3753,13 +3745,13 @@  static int calculate_sizes(struct kmem_cache *s, int forced_order)
 		 */
 		s->offset = size;
 		size += sizeof(void *);
-	} else if (freepointer_area > sizeof(void *)) {
+	} else {
 		/*
 		 * Store freelist pointer near middle of object to keep
 		 * it away from the edges of the object to avoid small
 		 * sized over/underflows from neighboring allocations.
 		 */
-		s->offset = ALIGN(freepointer_area / 2, sizeof(void *));
+		s->offset = ALIGN_DOWN(s->object_size / 2, sizeof(void *));
 	}
 
 #ifdef CONFIG_SLUB_DEBUG