@@ -115,9 +115,6 @@ void __kasan_free_pages(struct page *page, unsigned int order)
*/
static inline unsigned int optimal_redzone(unsigned int object_size)
{
- if (!IS_ENABLED(CONFIG_KASAN_GENERIC))
- return 0;
-
return
object_size <= 64 - 16 ? 16 :
object_size <= 128 - 32 ? 32 :
@@ -131,47 +128,77 @@ static inline unsigned int optimal_redzone(unsigned int object_size)
void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
slab_flags_t *flags)
{
- unsigned int orig_size = *size;
- unsigned int redzone_size;
- int redzone_adjust;
+ unsigned int ok_size;
+ unsigned int optimal_size;
+
+ /*
+ * SLAB_KASAN is used to mark caches as ones that are sanitized by
+ * KASAN. Currently this flag is used in two places:
+ * 1. In slab_ksize() when calculating the size of the accessible
+ * memory within the object.
+ * 2. In slab_common.c to prevent merging of sanitized caches.
+ */
+ *flags |= SLAB_KASAN;
- if (!kasan_stack_collection_enabled()) {
- *flags |= SLAB_KASAN;
+ if (!kasan_stack_collection_enabled())
return;
- }
- /* Add alloc meta. */
+ ok_size = *size;
+
+ /* Add alloc meta into redzone. */
cache->kasan_info.alloc_meta_offset = *size;
*size += sizeof(struct kasan_alloc_meta);
- /* Add free meta. */
- if (IS_ENABLED(CONFIG_KASAN_GENERIC) &&
- (cache->flags & SLAB_TYPESAFE_BY_RCU || cache->ctor ||
- cache->object_size < sizeof(struct kasan_free_meta))) {
- cache->kasan_info.free_meta_offset = *size;
- *size += sizeof(struct kasan_free_meta);
+ /*
+ * If alloc meta doesn't fit, don't add it.
+ * This can only happen with SLAB, as it has KMALLOC_MAX_SIZE equal
+ * to KMALLOC_MAX_CACHE_SIZE and doesn't fall back to page_alloc for
+ * larger sizes.
+ */
+ if (*size > KMALLOC_MAX_SIZE) {
+ cache->kasan_info.alloc_meta_offset = 0;
+ *size = ok_size;
+ /* Continue, since free meta might still fit. */
}
- redzone_size = optimal_redzone(cache->object_size);
- redzone_adjust = redzone_size - (*size - cache->object_size);
- if (redzone_adjust > 0)
- *size += redzone_adjust;
-
- *size = min_t(unsigned int, KMALLOC_MAX_SIZE,
- max(*size, cache->object_size + redzone_size));
+ /* Only the generic mode uses free meta or flexible redzones. */
+ if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
+ cache->kasan_info.free_meta_offset = KASAN_NO_FREE_META;
+ return;
+ }
/*
- * If the metadata doesn't fit, don't enable KASAN at all.
+ * Add free meta into redzone when it's not possible to store
+ * it in the object. This is the case when:
+ * 1. Object is SLAB_TYPESAFE_BY_RCU, which means that it can
+ * be touched after it was freed, or
+ * 2. Object has a constructor, which means it's expected to
+ * retain its content until the next allocation, or
+ * 3. Object is too small.
+ * Otherwise cache->kasan_info.free_meta_offset = 0 is implied.
*/
- if (*size <= cache->kasan_info.alloc_meta_offset ||
- *size <= cache->kasan_info.free_meta_offset) {
- cache->kasan_info.alloc_meta_offset = 0;
- cache->kasan_info.free_meta_offset = 0;
- *size = orig_size;
- return;
+ if ((cache->flags & SLAB_TYPESAFE_BY_RCU) || cache->ctor ||
+ cache->object_size < sizeof(struct kasan_free_meta)) {
+ ok_size = *size;
+
+ cache->kasan_info.free_meta_offset = *size;
+ *size += sizeof(struct kasan_free_meta);
+
+ /* If free meta doesn't fit, don't add it. */
+ if (*size > KMALLOC_MAX_SIZE) {
+ cache->kasan_info.free_meta_offset = KASAN_NO_FREE_META;
+ *size = ok_size;
+ }
}
- *flags |= SLAB_KASAN;
+ /* Calculate size with optimal redzone. */
+ optimal_size = cache->object_size + optimal_redzone(cache->object_size);
+ /* Limit it with KMALLOC_MAX_SIZE (relevant for SLAB only). */
+ if (optimal_size > KMALLOC_MAX_SIZE)
+ optimal_size = KMALLOC_MAX_SIZE;
+ /* Use optimal size if the size with added metas is not large enough. */
+ if (*size < optimal_size)
+ *size = optimal_size;
}
size_t __kasan_metadata_size(struct kmem_cache *cache)
@@ -187,15 +214,21 @@ size_t __kasan_metadata_size(struct kmem_cache *cache)
struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache,
const void *object)
{
+ if (!cache->kasan_info.alloc_meta_offset)
+ return NULL;
return kasan_reset_tag(object) + cache->kasan_info.alloc_meta_offset;
}
+#ifdef CONFIG_KASAN_GENERIC
struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache,
const void *object)
{
BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
+ if (cache->kasan_info.free_meta_offset == KASAN_NO_FREE_META)
+ return NULL;
return kasan_reset_tag(object) + cache->kasan_info.free_meta_offset;
}
+#endif
void __kasan_poison_slab(struct page *page)
{
@@ -272,11 +305,9 @@ void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
struct kasan_alloc_meta *alloc_meta;
if (kasan_stack_collection_enabled()) {
- if (!(cache->flags & SLAB_KASAN))
- return (void *)object;
-
alloc_meta = kasan_get_alloc_meta(cache, object);
- __memset(alloc_meta, 0, sizeof(*alloc_meta));
+ if (alloc_meta)
+ __memset(alloc_meta, 0, sizeof(*alloc_meta));
}
/* Tag is ignored in set_tag() without CONFIG_KASAN_SW/HW_TAGS */
@@ -318,15 +349,12 @@ static bool ____kasan_slab_free(struct kmem_cache *cache, void *object,
if (!kasan_stack_collection_enabled())
return false;
- if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine) ||
- unlikely(!(cache->flags & SLAB_KASAN)))
+ if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine))
return false;
kasan_set_free_info(cache, object, tag);
- quarantine_put(cache, object);
-
- return IS_ENABLED(CONFIG_KASAN_GENERIC);
+ return quarantine_put(cache, object);
}
bool __kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip)
@@ -359,7 +387,11 @@ void __kasan_slab_free_mempool(void *ptr, unsigned long ip)
static void set_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
{
- kasan_set_track(&kasan_get_alloc_meta(cache, object)->alloc_track, flags);
+ struct kasan_alloc_meta *alloc_meta;
+
+ alloc_meta = kasan_get_alloc_meta(cache, object);
+ if (alloc_meta)
+ kasan_set_track(&alloc_meta->alloc_track, flags);
}
static void *____kasan_kmalloc(struct kmem_cache *cache, const void *object,
@@ -389,7 +421,7 @@ static void *____kasan_kmalloc(struct kmem_cache *cache, const void *object,
poison_range((void *)redzone_start, redzone_end - redzone_start,
KASAN_KMALLOC_REDZONE);
- if (kasan_stack_collection_enabled() && (cache->flags & SLAB_KASAN))
+ if (kasan_stack_collection_enabled())
set_alloc_info(cache, (void *)object, flags);
return set_tag(object, tag);
@@ -338,10 +338,10 @@ void kasan_record_aux_stack(void *addr)
cache = page->slab_cache;
object = nearest_obj(cache, page, addr);
alloc_meta = kasan_get_alloc_meta(cache, object);
+ if (!alloc_meta)
+ return;
- /*
- * record the last two call_rcu() call stacks.
- */
+ /* Record the last two call_rcu() call stacks. */
alloc_meta->aux_stack[1] = alloc_meta->aux_stack[0];
alloc_meta->aux_stack[0] = kasan_save_stack(GFP_NOWAIT);
}
@@ -352,11 +352,11 @@ void kasan_set_free_info(struct kmem_cache *cache,
struct kasan_free_meta *free_meta;
free_meta = kasan_get_free_meta(cache, object);
- kasan_set_track(&free_meta->free_track, GFP_NOWAIT);
+ if (!free_meta)
+ return;
- /*
- * the object was freed and has free track set
- */
+ kasan_set_track(&free_meta->free_track, GFP_NOWAIT);
+ /* The object was freed and has free track set. */
*(u8 *)kasan_mem_to_shadow(object) = KASAN_KMALLOC_FREETRACK;
}
@@ -365,5 +365,6 @@ struct kasan_track *kasan_get_free_track(struct kmem_cache *cache,
{
if (*(u8 *)kasan_mem_to_shadow(object) != KASAN_KMALLOC_FREETRACK)
return NULL;
+ /* Free meta must be present with KASAN_KMALLOC_FREETRACK. */
return &kasan_get_free_meta(cache, object)->free_track;
}
@@ -187,7 +187,8 @@ void kasan_set_free_info(struct kmem_cache *cache,
struct kasan_alloc_meta *alloc_meta;
alloc_meta = kasan_get_alloc_meta(cache, object);
- kasan_set_track(&alloc_meta->free_track[0], GFP_NOWAIT);
+ if (alloc_meta)
+ kasan_set_track(&alloc_meta->free_track[0], GFP_NOWAIT);
}
struct kasan_track *kasan_get_free_track(struct kmem_cache *cache,
@@ -196,5 +197,8 @@ struct kasan_track *kasan_get_free_track(struct kmem_cache *cache,
struct kasan_alloc_meta *alloc_meta;
alloc_meta = kasan_get_alloc_meta(cache, object);
+ if (!alloc_meta)
+ return NULL;
+
return &alloc_meta->free_track[0];
}
@@ -156,20 +156,31 @@ struct kasan_alloc_meta {
struct qlist_node {
struct qlist_node *next;
};
+
+/*
+ * Generic mode either stores free meta in the object itself or in the redzone
+ * after the object. In the former case free meta offset is 0, in the latter
+ * case it has some sane value smaller than INT_MAX. Use INT_MAX as free meta
+ * offset when free meta isn't present.
+ */
+#define KASAN_NO_FREE_META INT_MAX
+
struct kasan_free_meta {
+#ifdef CONFIG_KASAN_GENERIC
/* This field is used while the object is in the quarantine.
* Otherwise it might be used for the allocator freelist.
*/
struct qlist_node quarantine_link;
-#ifdef CONFIG_KASAN_GENERIC
struct kasan_track free_track;
#endif
};
struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache,
const void *object);
+#ifdef CONFIG_KASAN_GENERIC
struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache,
const void *object);
+#endif
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
@@ -234,11 +245,11 @@ struct kasan_track *kasan_get_free_track(struct kmem_cache *cache,
#if defined(CONFIG_KASAN_GENERIC) && \
(defined(CONFIG_SLAB) || defined(CONFIG_SLUB))
-void quarantine_put(struct kmem_cache *cache, void *object);
+bool quarantine_put(struct kmem_cache *cache, void *object);
void quarantine_reduce(void);
void quarantine_remove_cache(struct kmem_cache *cache);
#else
-static inline void quarantine_put(struct kmem_cache *cache, void *object) { }
+static inline bool quarantine_put(struct kmem_cache *cache, void *object) { return false; }
static inline void quarantine_reduce(void) { }
static inline void quarantine_remove_cache(struct kmem_cache *cache) { }
#endif
@@ -135,7 +135,12 @@ static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache)
if (IS_ENABLED(CONFIG_SLAB))
local_irq_save(flags);
+ /*
+ * As the object now gets freed from the quaratine, assume that its
+ * free track is no longer valid.
+ */
*(u8 *)kasan_mem_to_shadow(object) = KASAN_KMALLOC_FREE;
+
___cache_free(cache, object, _THIS_IP_);
if (IS_ENABLED(CONFIG_SLAB))
@@ -161,13 +166,20 @@ static void qlist_free_all(struct qlist_head *q, struct kmem_cache *cache)
qlist_init(q);
}
-void quarantine_put(struct kmem_cache *cache, void *object)
+bool quarantine_put(struct kmem_cache *cache, void *object)
{
unsigned long flags;
struct qlist_head *q;
struct qlist_head temp = QLIST_INIT;
struct kasan_free_meta *meta = kasan_get_free_meta(cache, object);
+ /*
+ * If there's no metadata for this object, don't put it into
+ * quarantine.
+ */
+ if (!meta)
+ return false;
+
/*
* Note: irq must be disabled until after we move the batch to the
* global quarantine. Otherwise quarantine_remove_cache() can miss
@@ -200,6 +212,8 @@ void quarantine_put(struct kmem_cache *cache, void *object)
}
local_irq_restore(flags);
+
+ return true;
}
void quarantine_reduce(void)
@@ -168,32 +168,35 @@ static void describe_object_addr(struct kmem_cache *cache, void *object,
static void describe_object_stacks(struct kmem_cache *cache, void *object,
const void *addr, u8 tag)
{
- struct kasan_alloc_meta *alloc_meta = kasan_get_alloc_meta(cache, object);
-
- if (cache->flags & SLAB_KASAN) {
- struct kasan_track *free_track;
+ struct kasan_alloc_meta *alloc_meta;
+ struct kasan_track *free_track;
+ alloc_meta = kasan_get_alloc_meta(cache, object);
+ if (alloc_meta) {
print_track(&alloc_meta->alloc_track, "Allocated");
pr_err("\n");
- free_track = kasan_get_free_track(cache, object, tag);
- if (free_track) {
- print_track(free_track, "Freed");
- pr_err("\n");
- }
+ }
+
+ free_track = kasan_get_free_track(cache, object, tag);
+ if (free_track) {
+ print_track(free_track, "Freed");
+ pr_err("\n");
+ }
#ifdef CONFIG_KASAN_GENERIC
- if (alloc_meta->aux_stack[0]) {
- pr_err("Last call_rcu():\n");
- print_stack(alloc_meta->aux_stack[0]);
- pr_err("\n");
- }
- if (alloc_meta->aux_stack[1]) {
- pr_err("Second to last call_rcu():\n");
- print_stack(alloc_meta->aux_stack[1]);
- pr_err("\n");
- }
-#endif
+ if (!alloc_meta)
+ return;
+ if (alloc_meta->aux_stack[0]) {
+ pr_err("Last call_rcu():\n");
+ print_stack(alloc_meta->aux_stack[0]);
+ pr_err("\n");
}
+ if (alloc_meta->aux_stack[1]) {
+ pr_err("Second to last call_rcu():\n");
+ print_stack(alloc_meta->aux_stack[1]);
+ pr_err("\n");
+ }
+#endif
}
static void describe_object(struct kmem_cache *cache, void *object,
@@ -48,9 +48,12 @@ const char *get_bug_type(struct kasan_access_info *info)
object = nearest_obj(cache, page, (void *)addr);
alloc_meta = kasan_get_alloc_meta(cache, object);
- for (i = 0; i < KASAN_NR_FREE_STACKS; i++)
- if (alloc_meta->free_pointer_tag[i] == tag)
- return "use-after-free";
+ if (alloc_meta) {
+ for (i = 0; i < KASAN_NR_FREE_STACKS; i++) {
+ if (alloc_meta->free_pointer_tag[i] == tag)
+ return "use-after-free";
+ }
+ }
return "out-of-bounds";
}
@@ -170,6 +170,8 @@ void kasan_set_free_info(struct kmem_cache *cache,
u8 idx = 0;
alloc_meta = kasan_get_alloc_meta(cache, object);
+ if (!alloc_meta)
+ return;
#ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY
idx = alloc_meta->free_track_idx;
@@ -187,6 +189,8 @@ struct kasan_track *kasan_get_free_track(struct kmem_cache *cache,
int i = 0;
alloc_meta = kasan_get_alloc_meta(cache, object);
+ if (!alloc_meta)
+ return NULL;
#ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY
for (i = 0; i < KASAN_NR_FREE_STACKS; i++) {