@@ -3236,7 +3236,7 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, size_t orig_
init = slab_want_init_on_alloc(flags, cachep);
out_hooks:
- slab_post_alloc_hook(cachep, objcg, flags, 1, &ptr, init);
+ slab_post_alloc_hook(cachep, objcg, flags, 1, &ptr, init, 0);
return ptr;
}
@@ -3299,7 +3299,7 @@ slab_alloc(struct kmem_cache *cachep, struct list_lru *lru, gfp_t flags,
init = slab_want_init_on_alloc(flags, cachep);
out:
- slab_post_alloc_hook(cachep, objcg, flags, 1, &objp, init);
+ slab_post_alloc_hook(cachep, objcg, flags, 1, &objp, init, 0);
return objp;
}
@@ -3546,13 +3546,13 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
* Done outside of the IRQ disabled section.
*/
slab_post_alloc_hook(s, objcg, flags, size, p,
- slab_want_init_on_alloc(flags, s));
+ slab_want_init_on_alloc(flags, s), 0);
/* FIXME: Trace call missing. Christoph would like a bulk variant */
return size;
error:
local_irq_enable();
cache_alloc_debugcheck_after_bulk(s, flags, i, p, _RET_IP_);
- slab_post_alloc_hook(s, objcg, flags, i, p, false);
+ slab_post_alloc_hook(s, objcg, flags, i, p, false, 0);
kmem_cache_free_bulk(s, i, p);
return 0;
}
@@ -710,12 +710,17 @@ static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
static inline void slab_post_alloc_hook(struct kmem_cache *s,
struct obj_cgroup *objcg, gfp_t flags,
- size_t size, void **p, bool init)
+ size_t size, void **p, bool init,
+ unsigned int orig_size)
{
size_t i;
flags &= gfp_allowed_mask;
+ /* If original request size(kmalloc) is not set, use object_size */
+ if (!orig_size)
+ orig_size = s->object_size;
+
/*
* As memory initialization might be integrated into KASAN,
* kasan_slab_alloc and initialization memset must be
@@ -726,7 +731,7 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s,
for (i = 0; i < size; i++) {
p[i] = kasan_slab_alloc(s, p[i], flags, init);
if (p[i] && init && !kasan_has_integrated_init())
- memset(p[i], 0, s->object_size);
+ memset(p[i], 0, orig_size);
kmemleak_alloc_recursive(p[i], s->object_size, 1,
s->flags, flags);
}
@@ -3285,7 +3285,7 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s, struct list_l
init = slab_want_init_on_alloc(gfpflags, s);
out:
- slab_post_alloc_hook(s, objcg, gfpflags, 1, &object, init);
+ slab_post_alloc_hook(s, objcg, gfpflags, 1, &object, init, orig_size);
return object;
}
@@ -3778,11 +3778,11 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
* Done outside of the IRQ disabled fastpath loop.
*/
slab_post_alloc_hook(s, objcg, flags, size, p,
- slab_want_init_on_alloc(flags, s));
+ slab_want_init_on_alloc(flags, s), 0);
return i;
error:
slub_put_cpu_ptr(s->cpu_slab);
- slab_post_alloc_hook(s, objcg, flags, i, p, false);
+ slab_post_alloc_hook(s, objcg, flags, i, p, false, 0);
kmem_cache_free_bulk(s, i, p);
return 0;
}
kzalloc/kmalloc will round up the request size to a fixed size (mostly power of 2), so the allocated memory could be more than requested. Currently kzalloc family APIs will zero all the allocated memory. To detect out-of-bound usage of the extra allocated memory, only zero the requested part, so that sanity check could be added to the extra space later. For kzalloc users who will call ksize() later and utilize this extra space, please be aware that the space is not zeroed any more. Signed-off-by: Feng Tang <feng.tang@intel.com> --- mm/slab.c | 8 ++++---- mm/slab.h | 9 +++++++-- mm/slub.c | 6 +++--- 3 files changed, 14 insertions(+), 9 deletions(-)