@@ -1202,8 +1202,10 @@ static void kernel_init_free_pages(struct page *page, int numpages)
/* s390's use of memset() could override KASAN redzones. */
kasan_disable_current();
- for (i = 0; i < numpages; i++)
+ for (i = 0; i < numpages; i++) {
+ page_kasan_tag_reset(page + i);
clear_highpage(page + i);
+ }
kasan_enable_current();
}
@@ -25,7 +25,7 @@ static void poison_page(struct page *page)
/* KASAN still think the page is in-use, so skip it. */
kasan_disable_current();
- memset(addr, PAGE_POISON, PAGE_SIZE);
+ memset(kasan_reset_tag(addr), PAGE_POISON, PAGE_SIZE);
kasan_enable_current();
kunmap_atomic(addr);
}
@@ -250,7 +250,7 @@ static inline void *freelist_ptr(const struct kmem_cache *s, void *ptr,
{
#ifdef CONFIG_SLAB_FREELIST_HARDENED
/*
- * When CONFIG_KASAN_SW_TAGS is enabled, ptr_addr might be tagged.
+ * When CONFIG_KASAN_SW/HW_TAGS is enabled, ptr_addr might be tagged.
* Normally, this doesn't cause any issues, as both set_freepointer()
* and get_freepointer() are called with a pointer with the same tag.
* However, there are some issues with CONFIG_SLUB_DEBUG code. For
@@ -276,6 +276,7 @@ static inline void *freelist_dereference(const struct kmem_cache *s,
static inline void *get_freepointer(struct kmem_cache *s, void *object)
{
+ object = kasan_reset_tag(object);
return freelist_dereference(s, object + s->offset);
}
@@ -305,6 +306,7 @@ static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
BUG_ON(object == fp); /* naive detection of double free or corruption */
#endif
+ freeptr_addr = (unsigned long)kasan_reset_tag((void *)freeptr_addr);
*(void **)freeptr_addr = freelist_ptr(s, fp, freeptr_addr);
}
@@ -539,8 +541,8 @@ static void print_section(char *level, char *text, u8 *addr,
unsigned int length)
{
metadata_access_enable();
- print_hex_dump(level, text, DUMP_PREFIX_ADDRESS, 16, 1, addr,
- length, 1);
+ print_hex_dump(level, kasan_reset_tag(text), DUMP_PREFIX_ADDRESS,
+ 16, 1, addr, length, 1);
metadata_access_disable();
}
@@ -571,7 +573,7 @@ static struct track *get_track(struct kmem_cache *s, void *object,
p = object + get_info_end(s);
- return p + alloc;
+ return kasan_reset_tag(p + alloc);
}
static void set_track(struct kmem_cache *s, void *object,
@@ -584,7 +586,8 @@ static void set_track(struct kmem_cache *s, void *object,
unsigned int nr_entries;
metadata_access_enable();
- nr_entries = stack_trace_save(p->addrs, TRACK_ADDRS_COUNT, 3);
+ nr_entries = stack_trace_save(kasan_reset_tag(p->addrs),
+ TRACK_ADDRS_COUNT, 3);
metadata_access_disable();
if (nr_entries < TRACK_ADDRS_COUNT)
@@ -748,7 +751,7 @@ static __printf(3, 4) void slab_err(struct kmem_cache *s, struct page *page,
static void init_object(struct kmem_cache *s, void *object, u8 val)
{
- u8 *p = object;
+ u8 *p = kasan_reset_tag(object);
if (s->flags & SLAB_RED_ZONE)
memset(p - s->red_left_pad, val, s->red_left_pad);
@@ -778,7 +781,7 @@ static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
u8 *addr = page_address(page);
metadata_access_enable();
- fault = memchr_inv(start, value, bytes);
+ fault = memchr_inv(kasan_reset_tag(start), value, bytes);
metadata_access_disable();
if (!fault)
return 1;
@@ -874,7 +877,7 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page)
pad = end - remainder;
metadata_access_enable();
- fault = memchr_inv(pad, POISON_INUSE, remainder);
+ fault = memchr_inv(kasan_reset_tag(pad), POISON_INUSE, remainder);
metadata_access_disable();
if (!fault)
return 1;
@@ -1119,7 +1122,7 @@ void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr)
return;
metadata_access_enable();
- memset(addr, POISON_INUSE, page_size(page));
+ memset(kasan_reset_tag(addr), POISON_INUSE, page_size(page));
metadata_access_disable();
}
@@ -1572,10 +1575,10 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s,
* Clear the object and the metadata, but don't touch
* the redzone.
*/
- memset(object, 0, s->object_size);
+ memset(kasan_reset_tag(object), 0, s->object_size);
rsize = (s->flags & SLAB_RED_ZONE) ? s->red_left_pad
: 0;
- memset((char *)object + s->inuse, 0,
+ memset((char *)kasan_reset_tag(object) + s->inuse, 0,
s->size - s->inuse - rsize);
}
@@ -2891,10 +2894,10 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s,
stat(s, ALLOC_FASTPATH);
}
- maybe_wipe_obj_freeptr(s, object);
+ maybe_wipe_obj_freeptr(s, kasan_reset_tag(object));
if (unlikely(slab_want_init_on_alloc(gfpflags, s)) && object)
- memset(object, 0, s->object_size);
+ memset(kasan_reset_tag(object), 0, s->object_size);
out:
slab_post_alloc_hook(s, objcg, gfpflags, 1, &object);