@@ -112,6 +112,8 @@ enum kfence_counter_id {
KFENCE_COUNTER_FREES,
KFENCE_COUNTER_ZOMBIES,
KFENCE_COUNTER_BUGS,
+ KFENCE_COUNTER_SKIP_INCOMPAT,
+ KFENCE_COUNTER_SKIP_CAPACITY,
KFENCE_COUNTER_COUNT,
};
static atomic_long_t counters[KFENCE_COUNTER_COUNT];
@@ -121,6 +123,8 @@ static const char *const counter_names[]
[KFENCE_COUNTER_FREES] = "total frees",
[KFENCE_COUNTER_ZOMBIES] = "zombie allocations",
[KFENCE_COUNTER_BUGS] = "total bugs",
+ [KFENCE_COUNTER_SKIP_INCOMPAT] = "skipped allocations (incompatible)",
+ [KFENCE_COUNTER_SKIP_CAPACITY] = "skipped allocations (capacity)",
};
static_assert(ARRAY_SIZE(counter_names) == KFENCE_COUNTER_COUNT);
@@ -271,8 +275,10 @@ static void *kfence_guarded_alloc(struct
list_del_init(&meta->list);
}
raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags);
- if (!meta)
+ if (!meta) {
+ atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_CAPACITY]);
return NULL;
+ }
if (unlikely(!raw_spin_trylock_irqsave(&meta->lock, flags))) {
/*
@@ -740,8 +746,10 @@ void *__kfence_alloc(struct kmem_cache *
* Perform size check before switching kfence_allocation_gate, so that
* we don't disable KFENCE without making an allocation.
*/
- if (size > PAGE_SIZE)
+ if (size > PAGE_SIZE) {
+ atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_INCOMPAT]);
return NULL;
+ }
/*
* Skip allocations from non-default zones, including DMA. We cannot
@@ -749,8 +757,10 @@ void *__kfence_alloc(struct kmem_cache *
* properties (e.g. reside in DMAable memory).
*/
if ((flags & GFP_ZONEMASK) ||
- (s->flags & (SLAB_CACHE_DMA | SLAB_CACHE_DMA32)))
+ (s->flags & (SLAB_CACHE_DMA | SLAB_CACHE_DMA32))) {
+ atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_INCOMPAT]);
return NULL;
+ }
/*
* allocation_gate only needs to become non-zero, so it doesn't make