@@ -195,8 +195,9 @@ void kfree(const void *);
void kfree_sensitive(const void *);
size_t __ksize(const void *);
size_t ksize(const void *);
-void *kmem_cache_last_alloc(struct kmem_cache *s, void *object);
+void *kmem_cache_last_alloc(struct kmem_cache *s, void *object, void **stackp, int nstackp);
void *kmem_last_alloc(void *object);
+void *kmem_last_alloc_stack(void *object, void **stackp, int nstackp);
const char *kmem_last_alloc_errstring(void *lastalloc);
#ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
@@ -3602,25 +3602,6 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
#endif
-void *kmem_cache_last_alloc(struct kmem_cache *cachep, void *object)
-{
-#ifdef DEBUG
- unsigned int objnr;
- void *objp;
- struct page *page;
-
- if (!(cachep->flags & SLAB_STORE_USER))
- return ERR_PTR(-KMEM_LA_NO_DEBUG);
- objp = object - obj_offset(cachep);
- page = virt_to_head_page(objp);
- objnr = obj_to_index(cachep, page, objp);
- objp = index_to_obj(cachep, page, objnr);
- return *dbg_userword(cachep, objp);
-#else
- return NULL;
-#endif
-}
-
static __always_inline void *
__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
{
@@ -3652,6 +3633,27 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
EXPORT_SYMBOL(__kmalloc_node_track_caller);
#endif /* CONFIG_NUMA */
+void *kmem_cache_last_alloc(struct kmem_cache *cachep, void *object, void **stackp, int nstackp)
+{
+#ifdef DEBUG
+ unsigned int objnr;
+ void *objp;
+ struct page *page;
+
+ if (!(cachep->flags & SLAB_STORE_USER))
+ return ERR_PTR(-KMEM_LA_NO_DEBUG);
+ objp = object - obj_offset(cachep);
+ page = virt_to_head_page(objp);
+ objnr = obj_to_index(cachep, page, objp);
+ objp = index_to_obj(cachep, page, objnr);
+ if (stackp && nstackp)
+ stackp[0] = NULL;
+ return *dbg_userword(cachep, objp);
+#else
+ return NULL;
+#endif
+}
+
/**
* __do_kmalloc - allocate memory
* @size: how many bytes of memory are required.
@@ -560,14 +560,22 @@ const char *kmem_last_alloc_errstring(void *lastalloc)
}
EXPORT_SYMBOL_GPL(kmem_last_alloc_errstring);
-/*
+/**
+ * kmem_last_alloc_stack - Get return address and stack for last allocation
+ * @object: object for which to find last-allocation return address.
+ * @stackp: %NULL or pointer to location to place return-address stack.
+ * @nstackp: maximum number of return addresses that may be stored.
+ *
* If the pointer references a slab-allocated object and if sufficient
- * debugging is enabled, return the returrn address for the corresponding
- * allocation. Otherwise, return NULL. Note that passing random pointers
- * to this function (including addresses of on-stack variables) is likely
- * to result in panics.
+ * debugging is enabled, return the return address for the corresponding
+ * allocation. If stackp is non-%NULL in %CONFIG_STACKTRACE kernels running
+ * the slub allocator, also copy the return-address stack into @stackp,
+ * limited by @nstackp. Otherwise, return %NULL or an appropriate error
+ * code using %ERR_PTR().
+ *
+ * Return: return address from last allocation, %NULL or negative error code.
*/
-void *kmem_last_alloc(void *object)
+void *kmem_last_alloc_stack(void *object, void **stackp, int nstackp)
{
struct page *page;
@@ -576,7 +584,24 @@ void *kmem_last_alloc(void *object)
page = virt_to_head_page(object);
if (!PageSlab(page))
return ERR_PTR(-KMEM_LA_NO_SLAB);
- return kmem_cache_last_alloc(page->slab_cache, object);
+ return kmem_cache_last_alloc(page->slab_cache, object, stackp, nstackp);
+}
+EXPORT_SYMBOL_GPL(kmem_last_alloc_stack);
+
+/**
+ * kmem_last_alloc - Get return address for last allocation
+ * @object: object for which to find last-allocation return address.
+ *
+ * If the pointer references a slab-allocated object and if sufficient
+ * debugging is enabled, return the return address for the corresponding
+ * allocation. Otherwise, return %NULL or an appropriate error code using
+ * %ERR_PTR().
+ *
+ * Return: return address from last allocation, %NULL or negative error code.
+ */
+void *kmem_last_alloc(void *object)
+{
+ return kmem_last_alloc_stack(object, NULL, 0);
}
EXPORT_SYMBOL_GPL(kmem_last_alloc);
@@ -461,8 +461,10 @@ static void slob_free(void *block, int size)
spin_unlock_irqrestore(&slob_lock, flags);
}
-void *kmem_cache_last_alloc(struct kmem_cache *s, void *object)
+void *kmem_cache_last_alloc(struct kmem_cache *s, void *object, void **stackp, int nstackp)
{
+ if (stackp && nstackp)
+ stackp[0] = NULL;
return ERR_PTR(-KMEM_LA_SLOB);
}
@@ -3918,10 +3918,11 @@ int __kmem_cache_shutdown(struct kmem_cache *s)
return 0;
}
-void *kmem_cache_last_alloc(struct kmem_cache *s, void *object)
+void *kmem_cache_last_alloc(struct kmem_cache *s, void *object, void **stackp, int nstackp)
{
#ifdef CONFIG_SLUB_DEBUG
void *base;
+ int i = 0;
unsigned int objnr;
void *objp;
struct page *page;
@@ -3938,6 +3939,17 @@ void *kmem_cache_last_alloc(struct kmem_cache *s, void *object)
if (objp < base || objp >= base + page->objects * s->size || (objp - base) % s->size)
return ERR_PTR(-KMEM_LA_INCONSISTENT);
trackp = get_track(s, objp, TRACK_ALLOC);
+#ifdef CONFIG_STACKTRACE
+ if (stackp) {
+ for (; i < nstackp && i < TRACK_ADDRS_COUNT; i++) {
+ stackp[i] = (void *)trackp->addrs[i];
+ if (!stackp[i])
+ break;
+ }
+ }
+#endif
+ if (stackp && i < nstackp)
+ stackp[i] = NULL;
return (void *)trackp->addr;
#else
return NULL;