@@ -96,15 +96,6 @@ static inline bool kasan_has_integrated_init(void)
}
#ifdef CONFIG_KASAN
-
-struct kasan_cache {
-#ifdef CONFIG_KASAN_GENERIC
- int alloc_meta_offset;
- int free_meta_offset;
-#endif
- bool is_kmalloc;
-};
-
void __kasan_unpoison_range(const void *addr, size_t size);
static __always_inline void kasan_unpoison_range(const void *addr, size_t size)
{
@@ -129,13 +120,6 @@ static __always_inline bool kasan_unpoison_pages(struct page *page,
return false;
}
-void __kasan_cache_create_kmalloc(struct kmem_cache *cache);
-static __always_inline void kasan_cache_create_kmalloc(struct kmem_cache *cache)
-{
- if (kasan_enabled())
- __kasan_cache_create_kmalloc(cache);
-}
-
void __kasan_poison_slab(struct slab *slab);
static __always_inline void kasan_poison_slab(struct slab *slab)
{
@@ -255,7 +239,6 @@ static inline bool kasan_unpoison_pages(struct page *page, unsigned int order,
{
return false;
}
-static inline void kasan_cache_create_kmalloc(struct kmem_cache *cache) {}
static inline void kasan_poison_slab(struct slab *slab) {}
static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
void *object) {}
@@ -306,6 +289,11 @@ static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
#ifdef CONFIG_KASAN_GENERIC
+struct kasan_cache {
+ int alloc_meta_offset;
+ int free_meta_offset;
+};
+
size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object);
slab_flags_t kasan_never_merge(void);
void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
@@ -72,7 +72,7 @@ struct kmem_cache {
int obj_offset;
#endif /* CONFIG_DEBUG_SLAB */
-#ifdef CONFIG_KASAN
+#ifdef CONFIG_KASAN_GENERIC
struct kasan_cache kasan_info;
#endif
@@ -136,7 +136,7 @@ struct kmem_cache {
unsigned int *random_seq;
#endif
-#ifdef CONFIG_KASAN
+#ifdef CONFIG_KASAN_GENERIC
struct kasan_cache kasan_info;
#endif
@@ -122,11 +122,6 @@ void __kasan_poison_pages(struct page *page, unsigned int order, bool init)
KASAN_PAGE_FREE, init);
}
-void __kasan_cache_create_kmalloc(struct kmem_cache *cache)
-{
- cache->kasan_info.is_kmalloc = true;
-}
-
void __kasan_poison_slab(struct slab *slab)
{
struct page *page = slab_page(slab);
@@ -326,7 +321,7 @@ void * __must_check __kasan_slab_alloc(struct kmem_cache *cache,
kasan_unpoison(tagged_object, cache->object_size, init);
/* Save alloc info (if possible) for non-kmalloc() allocations. */
- if (kasan_stack_collection_enabled() && !cache->kasan_info.is_kmalloc)
+ if (kasan_stack_collection_enabled() && !is_kmalloc_cache(cache))
kasan_save_alloc_info(cache, tagged_object, flags);
return tagged_object;
@@ -372,7 +367,7 @@ static inline void *____kasan_kmalloc(struct kmem_cache *cache,
* Save alloc info (if possible) for kmalloc() allocations.
* This also rewrites the alloc info when called from kasan_krealloc().
*/
- if (kasan_stack_collection_enabled() && cache->kasan_info.is_kmalloc)
+ if (kasan_stack_collection_enabled() && is_kmalloc_cache(cache))
kasan_save_alloc_info(cache, (void *)object, flags);
/* Keep the tag that was set by kasan_slab_alloc(). */
@@ -670,7 +670,6 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name,
create_boot_cache(s, name, size, flags | SLAB_KMALLOC, useroffset,
usersize);
- kasan_cache_create_kmalloc(s);
list_add(&s->list, &slab_caches);
s->refcount = 1;
return s;