@@ -88,13 +88,10 @@ asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
}
#endif /* CONFIG_KASAN_STACK */
-/*
- * Only allow cache merging when stack collection is disabled and no metadata
- * is present.
- */
+/* Only allow cache merging when no per-object metadata is present. */
slab_flags_t __kasan_never_merge(void)
{
- if (kasan_stack_collection_enabled())
+ if (kasan_requires_meta())
return SLAB_KASAN;
return 0;
}
@@ -152,7 +149,7 @@ void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
*/
*flags |= SLAB_KASAN;
- if (!kasan_stack_collection_enabled())
+ if (!kasan_requires_meta())
return;
ok_size = *size;
@@ -220,7 +217,7 @@ void __kasan_cache_create_kmalloc(struct kmem_cache *cache)
size_t __kasan_metadata_size(struct kmem_cache *cache)
{
- if (!kasan_stack_collection_enabled())
+ if (!kasan_requires_meta())
return 0;
return (cache->kasan_info.alloc_meta_offset ?
sizeof(struct kasan_alloc_meta) : 0) +
@@ -295,7 +292,7 @@ void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
const void *object)
{
/* Initialize per-object metadata if it is present. */
- if (kasan_stack_collection_enabled())
+ if (kasan_requires_meta())
kasan_init_object_meta(cache, object);
/* Tag is ignored in set_tag() without CONFIG_KASAN_SW/HW_TAGS */
@@ -43,7 +43,7 @@ static inline bool kasan_sync_fault_possible(void)
return kasan_mode == KASAN_MODE_SYNC || kasan_mode == KASAN_MODE_ASYMM;
}
-#else
+#else /* CONFIG_KASAN_HW_TAGS */
static inline bool kasan_stack_collection_enabled(void)
{
@@ -60,7 +60,31 @@ static inline bool kasan_sync_fault_possible(void)
return true;
}
-#endif
+#endif /* CONFIG_KASAN_HW_TAGS */
+
+#ifdef CONFIG_KASAN_GENERIC
+
+/* Generic KASAN uses per-object metadata to store stack traces. */
+static inline bool kasan_requires_meta(void)
+{
+ /*
+ * Technically, Generic KASAN always collects stack traces right now.
+ * However, let's use kasan_stack_collection_enabled() in case the
+ * kasan.stacktrace command-line argument is changed to affect
+ * Generic KASAN.
+ */
+ return kasan_stack_collection_enabled();
+}
+
+#else /* CONFIG_KASAN_GENERIC */
+
+/* Tag-based KASAN modes do not use per-object metadata. */
+static inline bool kasan_requires_meta(void)
+{
+ return false;
+}
+
+#endif /* CONFIG_KASAN_GENERIC */
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
#define KASAN_GRANULE_SIZE (1UL << KASAN_SHADOW_SCALE_SHIFT)
@@ -272,13 +296,14 @@ void kasan_report_invalid_free(void *object, unsigned long ip, enum kasan_report
struct page *kasan_addr_to_page(const void *addr);
struct slab *kasan_addr_to_slab(const void *addr);
-void kasan_init_object_meta(struct kmem_cache *cache, const void *object);
-
#ifdef CONFIG_KASAN_GENERIC
+void kasan_init_object_meta(struct kmem_cache *cache, const void *object);
struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache,
const void *object);
struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache,
const void *object);
+#else
+static inline void kasan_init_object_meta(struct kmem_cache *cache, const void *object) { }
#endif
depot_stack_handle_t kasan_save_stack(gfp_t flags, bool can_alloc);
@@ -17,10 +17,6 @@
#include "kasan.h"
-void kasan_init_object_meta(struct kmem_cache *cache, const void *object)
-{
-}
-
void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
{
}