Message ID | 431c6cfa0ac8fb2b33d7ab561a64aa84c844d1a0.1612208222.git.andreyknvl@google.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | kasan: optimizations and fixes for HW_TAGS | expand |
On Mon, Feb 01, 2021 at 08:43PM +0100, Andrey Konovalov wrote: > Currently, krealloc() always calls ksize(), which unpoisons the whole > object including the redzone. This is inefficient, as kasan_krealloc() > repoisons the redzone for objects that fit into the same buffer. > > This patch changes krealloc() instrumentation to use uninstrumented > __ksize() that doesn't unpoison the memory. Instead, kasan_kreallos() > is changed to unpoison the memory excluding the redzone. > > For objects that don't fit into the old allocation, this patch disables > KASAN accessibility checks when copying memory into a new object instead > of unpoisoning it. > > Signed-off-by: Andrey Konovalov <andreyknvl@google.com> > --- > mm/kasan/common.c | 12 ++++++++++-- > mm/slab_common.c | 20 ++++++++++++++------ > 2 files changed, 24 insertions(+), 8 deletions(-) > > diff --git a/mm/kasan/common.c b/mm/kasan/common.c > index 9c64a00bbf9c..a51d6ea580b0 100644 > --- a/mm/kasan/common.c > +++ b/mm/kasan/common.c > @@ -476,7 +476,7 @@ static void *____kasan_kmalloc(struct kmem_cache *cache, const void *object, > > /* > * The object has already been unpoisoned by kasan_slab_alloc() for > - * kmalloc() or by ksize() for krealloc(). > + * kmalloc() or by kasan_krealloc() for krealloc(). > */ > > /* > @@ -526,7 +526,7 @@ void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size, > > /* > * The object has already been unpoisoned by kasan_alloc_pages() for > - * alloc_pages() or by ksize() for krealloc(). > + * alloc_pages() or by kasan_krealloc() for krealloc(). > */ > > /* > @@ -554,8 +554,16 @@ void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flag > if (unlikely(object == ZERO_SIZE_PTR)) > return (void *)object; > > + /* > + * Unpoison the object's data. > + * Part of it might already have been unpoisoned, but it's unknown > + * how big that part is. > + */ > + kasan_unpoison(object, size); > + > page = virt_to_head_page(object); > > + /* Piggy-back on kmalloc() instrumentation to poison the redzone. */ > if (unlikely(!PageSlab(page))) > return __kasan_kmalloc_large(object, size, flags); > else > diff --git a/mm/slab_common.c b/mm/slab_common.c > index dad70239b54c..821f657d38b5 100644 > --- a/mm/slab_common.c > +++ b/mm/slab_common.c > @@ -1140,19 +1140,27 @@ static __always_inline void *__do_krealloc(const void *p, size_t new_size, > void *ret; > size_t ks; > > - if (likely(!ZERO_OR_NULL_PTR(p)) && !kasan_check_byte(p)) > - return NULL; > - > - ks = ksize(p); > + /* Don't use instrumented ksize to allow precise KASAN poisoning. */ > + if (likely(!ZERO_OR_NULL_PTR(p))) { > + if (!kasan_check_byte(p)) > + return NULL; > + ks = __ksize(p); > + } else > + ks = 0; > This unfortunately broke KFENCE: https://syzkaller.appspot.com/bug?extid=e444e1006d07feef0ef3 + various other false positives. We need to use ksize() here, as __ksize() is unaware of KFENCE. Or somehow add the same check here that ksize() uses to get the real object size. > + /* If the object still fits, repoison it precisely. */ > if (ks >= new_size) { > p = kasan_krealloc((void *)p, new_size, flags); > return (void *)p; > } > > ret = kmalloc_track_caller(new_size, flags); > - if (ret && p) > - memcpy(ret, p, ks); > + if (ret && p) { > + /* Disable KASAN checks as the object's redzone is accessed. */ > + kasan_disable_current(); > + memcpy(ret, kasan_reset_tag(p), ks); > + kasan_enable_current(); > + } > > return ret; > } > -- > 2.30.0.365.g02bc693789-goog >
diff --git a/mm/kasan/common.c b/mm/kasan/common.c index 9c64a00bbf9c..a51d6ea580b0 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -476,7 +476,7 @@ static void *____kasan_kmalloc(struct kmem_cache *cache, const void *object, /* * The object has already been unpoisoned by kasan_slab_alloc() for - * kmalloc() or by ksize() for krealloc(). + * kmalloc() or by kasan_krealloc() for krealloc(). */ /* @@ -526,7 +526,7 @@ void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size, /* * The object has already been unpoisoned by kasan_alloc_pages() for - * alloc_pages() or by ksize() for krealloc(). + * alloc_pages() or by kasan_krealloc() for krealloc(). */ /* @@ -554,8 +554,16 @@ void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flag if (unlikely(object == ZERO_SIZE_PTR)) return (void *)object; + /* + * Unpoison the object's data. + * Part of it might already have been unpoisoned, but it's unknown + * how big that part is. + */ + kasan_unpoison(object, size); + page = virt_to_head_page(object); + /* Piggy-back on kmalloc() instrumentation to poison the redzone. */ if (unlikely(!PageSlab(page))) return __kasan_kmalloc_large(object, size, flags); else diff --git a/mm/slab_common.c b/mm/slab_common.c index dad70239b54c..821f657d38b5 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -1140,19 +1140,27 @@ static __always_inline void *__do_krealloc(const void *p, size_t new_size, void *ret; size_t ks; - if (likely(!ZERO_OR_NULL_PTR(p)) && !kasan_check_byte(p)) - return NULL; - - ks = ksize(p); + /* Don't use instrumented ksize to allow precise KASAN poisoning. */ + if (likely(!ZERO_OR_NULL_PTR(p))) { + if (!kasan_check_byte(p)) + return NULL; + ks = __ksize(p); + } else + ks = 0; + /* If the object still fits, repoison it precisely. */ if (ks >= new_size) { p = kasan_krealloc((void *)p, new_size, flags); return (void *)p; } ret = kmalloc_track_caller(new_size, flags); - if (ret && p) - memcpy(ret, p, ks); + if (ret && p) { + /* Disable KASAN checks as the object's redzone is accessed. */ + kasan_disable_current(); + memcpy(ret, kasan_reset_tag(p), ks); + kasan_enable_current(); + } return ret; }
Currently, krealloc() always calls ksize(), which unpoisons the whole object including the redzone. This is inefficient, as kasan_krealloc() repoisons the redzone for objects that fit into the same buffer. This patch changes krealloc() instrumentation to use uninstrumented __ksize() that doesn't unpoison the memory. Instead, kasan_kreallos() is changed to unpoison the memory excluding the redzone. For objects that don't fit into the old allocation, this patch disables KASAN accessibility checks when copying memory into a new object instead of unpoisoning it. Signed-off-by: Andrey Konovalov <andreyknvl@google.com> --- mm/kasan/common.c | 12 ++++++++++-- mm/slab_common.c | 20 ++++++++++++++------ 2 files changed, 24 insertions(+), 8 deletions(-)