@@ -1397,7 +1397,8 @@ static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
}
#endif
-struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep);
+struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep,
+ struct mem_cgroup **memcgp);
void memcg_kmem_put_cache(struct kmem_cache *cachep);
#ifdef CONFIG_MEMCG_KMEM
@@ -3023,7 +3023,8 @@ static inline bool memcg_kmem_bypass(void)
* done with it, memcg_kmem_put_cache() must be called to release the
* reference.
*/
-struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep)
+struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep,
+ struct mem_cgroup **memcgp)
{
struct mem_cgroup *memcg;
struct kmem_cache *memcg_cachep;
@@ -3079,8 +3080,11 @@ struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep)
*/
if (unlikely(!memcg_cachep))
memcg_schedule_kmem_cache_create(memcg, cachep);
- else if (percpu_ref_tryget(&memcg_cachep->memcg_params.refcnt))
+ else if (percpu_ref_tryget(&memcg_cachep->memcg_params.refcnt)) {
+ css_get(&memcg->css);
+ *memcgp = memcg;
cachep = memcg_cachep;
+ }
out_unlock:
rcu_read_unlock();
return cachep;
@@ -3223,9 +3223,10 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
unsigned long save_flags;
void *ptr;
int slab_node = numa_mem_id();
+ struct mem_cgroup *memcg = NULL;
flags &= gfp_allowed_mask;
- cachep = slab_pre_alloc_hook(cachep, flags);
+ cachep = slab_pre_alloc_hook(cachep, &memcg, 1, flags);
if (unlikely(!cachep))
return NULL;
@@ -3261,7 +3262,7 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
if (unlikely(slab_want_init_on_alloc(flags, cachep)) && ptr)
memset(ptr, 0, cachep->object_size);
- slab_post_alloc_hook(cachep, flags, 1, &ptr);
+ slab_post_alloc_hook(cachep, memcg, flags, 1, &ptr);
return ptr;
}
@@ -3302,9 +3303,10 @@ slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
{
unsigned long save_flags;
void *objp;
+ struct mem_cgroup *memcg = NULL;
flags &= gfp_allowed_mask;
- cachep = slab_pre_alloc_hook(cachep, flags);
+ cachep = slab_pre_alloc_hook(cachep, &memcg, 1, flags);
if (unlikely(!cachep))
return NULL;
@@ -3318,7 +3320,7 @@ slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
if (unlikely(slab_want_init_on_alloc(flags, cachep)) && objp)
memset(objp, 0, cachep->object_size);
- slab_post_alloc_hook(cachep, flags, 1, &objp);
+ slab_post_alloc_hook(cachep, memcg, flags, 1, &objp);
return objp;
}
@@ -3440,6 +3442,7 @@ void ___cache_free(struct kmem_cache *cachep, void *objp,
memset(objp, 0, cachep->object_size);
kmemleak_free_recursive(objp, cachep->flags);
objp = cache_free_debugcheck(cachep, objp, caller);
+ memcg_slab_free_hook(cachep, virt_to_head_page(objp), objp);
/*
* Skip calling cache_free_alien() when the platform is not numa.
@@ -3505,8 +3508,9 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
void **p)
{
size_t i;
+ struct mem_cgroup *memcg = NULL;
- s = slab_pre_alloc_hook(s, flags);
+ s = slab_pre_alloc_hook(s, &memcg, size, flags);
if (!s)
return 0;
@@ -3529,13 +3533,13 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
for (i = 0; i < size; i++)
memset(p[i], 0, s->object_size);
- slab_post_alloc_hook(s, flags, size, p);
+ slab_post_alloc_hook(s, memcg, flags, size, p);
/* FIXME: Trace call missing. Christoph would like a bulk variant */
return size;
error:
local_irq_enable();
cache_alloc_debugcheck_after_bulk(s, flags, i, p, _RET_IP_);
- slab_post_alloc_hook(s, flags, i, p);
+ slab_post_alloc_hook(s, memcg, flags, i, p);
__kmem_cache_free_bulk(s, i, p);
return 0;
}
@@ -423,6 +423,45 @@ static inline void memcg_free_page_memcg_vec(struct page *page)
page->mem_cgroup_vec = NULL;
}
+static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
+ struct mem_cgroup *memcg,
+ size_t size, void **p)
+{
+ struct mem_cgroup_ptr *memcg_ptr;
+ struct page *page;
+ unsigned long off;
+ size_t i;
+
+ memcg_ptr = mem_cgroup_get_kmem_ptr(memcg);
+ for (i = 0; i < size; i++) {
+ if (likely(p[i])) {
+ page = virt_to_head_page(p[i]);
+ off = obj_to_index(s, page, p[i]);
+ mem_cgroup_ptr_get(memcg_ptr);
+ page->mem_cgroup_vec[off] = memcg_ptr;
+ }
+ }
+ mem_cgroup_ptr_put(memcg_ptr);
+ mem_cgroup_put(memcg);
+
+ memcg_kmem_put_cache(s);
+}
+
+static inline void memcg_slab_free_hook(struct kmem_cache *s, struct page *page,
+ void *p)
+{
+ struct mem_cgroup_ptr *memcg_ptr;
+ unsigned int off;
+
+ if (!memcg_kmem_enabled() || is_root_cache(s))
+ return;
+
+ off = obj_to_index(s, page, p);
+ memcg_ptr = page->mem_cgroup_vec[off];
+ page->mem_cgroup_vec[off] = NULL;
+ mem_cgroup_ptr_put(memcg_ptr);
+}
+
extern void slab_init_memcg_params(struct kmem_cache *);
extern void memcg_link_cache(struct kmem_cache *s, struct mem_cgroup *memcg);
@@ -482,6 +521,17 @@ static inline void memcg_free_page_memcg_vec(struct page *page)
{
}
+static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
+ struct mem_cgroup *memcg,
+ size_t size, void **p)
+{
+}
+
+static inline void memcg_slab_free_hook(struct kmem_cache *s, struct page *page,
+ void *p)
+{
+}
+
static inline void slab_init_memcg_params(struct kmem_cache *s)
{
}
@@ -591,7 +641,8 @@ static inline size_t slab_ksize(const struct kmem_cache *s)
}
static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
- gfp_t flags)
+ struct mem_cgroup **memcgp,
+ size_t size, gfp_t flags)
{
flags &= gfp_allowed_mask;
@@ -605,13 +656,14 @@ static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
if (memcg_kmem_enabled() &&
((flags & __GFP_ACCOUNT) || (s->flags & SLAB_ACCOUNT)))
- return memcg_kmem_get_cache(s);
+ return memcg_kmem_get_cache(s, memcgp);
return s;
}
-static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
- size_t size, void **p)
+static inline void slab_post_alloc_hook(struct kmem_cache *s,
+ struct mem_cgroup *memcg,
+ gfp_t flags, size_t size, void **p)
{
size_t i;
@@ -623,8 +675,8 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
s->flags, flags);
}
- if (memcg_kmem_enabled())
- memcg_kmem_put_cache(s);
+ if (!is_root_cache(s))
+ memcg_slab_post_alloc_hook(s, memcg, size, p);
}
#ifndef CONFIG_SLOB
@@ -2700,8 +2700,9 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s,
struct kmem_cache_cpu *c;
struct page *page;
unsigned long tid;
+ struct mem_cgroup *memcg = NULL;
- s = slab_pre_alloc_hook(s, gfpflags);
+ s = slab_pre_alloc_hook(s, &memcg, 1, gfpflags);
if (!s)
return NULL;
redo:
@@ -2777,7 +2778,7 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s,
if (unlikely(slab_want_init_on_alloc(gfpflags, s)) && object)
memset(object, 0, s->object_size);
- slab_post_alloc_hook(s, gfpflags, 1, &object);
+ slab_post_alloc_hook(s, memcg, gfpflags, 1, &object);
return object;
}
@@ -2982,6 +2983,8 @@ static __always_inline void do_slab_free(struct kmem_cache *s,
void *tail_obj = tail ? : head;
struct kmem_cache_cpu *c;
unsigned long tid;
+
+ memcg_slab_free_hook(s, page, head);
redo:
/*
* Determine the currently cpus per cpu slab.
@@ -3159,9 +3162,10 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
{
struct kmem_cache_cpu *c;
int i;
+ struct mem_cgroup *memcg = NULL;
/* memcg and kmem_cache debug support */
- s = slab_pre_alloc_hook(s, flags);
+ s = slab_pre_alloc_hook(s, &memcg, size, flags);
if (unlikely(!s))
return false;
/*
@@ -3206,11 +3210,11 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
}
/* memcg and kmem_cache debug support */
- slab_post_alloc_hook(s, flags, size, p);
+ slab_post_alloc_hook(s, memcg, flags, size, p);
return i;
error:
local_irq_enable();
- slab_post_alloc_hook(s, flags, i, p);
+ slab_post_alloc_hook(s, memcg, flags, i, p);
__kmem_cache_free_bulk(s, i, p);
return 0;
}
Store a memcg_ptr in the corresponding place of the mem_cgroup_vec for each allocated non-root slab object. Make sure that each allocated object holds a reference to the mem_cgroup_ptr. To get the memcg_ptr in the post alloc hook, we need a memcg pointer. Because all memory cgroup will soon share the same set of kmem_caches, let's not use the kmem_cache->memcg_params.memcg. Instead, let's pass the pointer directly from memcg_kmem_get_cache(). This will guarantee that we will use the same cgroup in pre- and post-alloc hooks. Please, note that the code is a bit bulky now, because we have to manage 3 types of objects with reference counters: memcg, kmem_cache and memcg_ptr. The following commits in the series will simplify it. Signed-off-by: Roman Gushchin <guro@fb.com> --- include/linux/memcontrol.h | 3 +- mm/memcontrol.c | 8 +++-- mm/slab.c | 18 ++++++----- mm/slab.h | 64 ++++++++++++++++++++++++++++++++++---- mm/slub.c | 14 ++++++--- 5 files changed, 86 insertions(+), 21 deletions(-)