@@ -57,16 +57,6 @@ list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
return &nlru->lru;
}
-static __always_inline struct mem_cgroup *mem_cgroup_from_kmem(void *ptr)
-{
- struct page *page;
-
- if (!memcg_kmem_enabled())
- return NULL;
- page = virt_to_head_page(ptr);
- return memcg_from_slab_page(page);
-}
-
static inline struct list_lru_one *
list_lru_from_kmem(struct list_lru_node *nlru, void *ptr,
struct mem_cgroup **memcg_ptr)
@@ -77,7 +67,7 @@ list_lru_from_kmem(struct list_lru_node *nlru, void *ptr,
if (!nlru->memcg_lrus)
goto out;
- memcg = mem_cgroup_from_kmem(ptr);
+ memcg = memcg_from_slab_obj(ptr);
if (!memcg)
goto out;
@@ -548,7 +548,7 @@ ino_t page_cgroup_ino(struct page *page)
rcu_read_lock();
if (PageHead(page) && PageSlab(page))
- memcg = memcg_from_slab_page(page);
+ memcg = root_mem_cgroup;
else
memcg = READ_ONCE(page->mem_cgroup);
while (memcg && !(memcg->css.flags & CSS_ONLINE))
@@ -852,16 +852,15 @@ void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val)
{
- struct page *page = virt_to_head_page(p);
- pg_data_t *pgdat = page_pgdat(page);
+ pg_data_t *pgdat = page_pgdat(virt_to_page(p));
struct mem_cgroup *memcg;
struct lruvec *lruvec;
rcu_read_lock();
- memcg = memcg_from_slab_page(page);
+ memcg = memcg_from_slab_obj(p);
/* Untracked pages have no memcg, no lruvec. Update only the node */
- if (!memcg || memcg == root_mem_cgroup) {
+ if (!memcg) {
__mod_node_page_state(pgdat, idx, val);
} else {
lruvec = mem_cgroup_lruvec(pgdat, memcg);
@@ -329,15 +329,20 @@ static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
* The kmem_cache can be reparented asynchronously. The caller must ensure
* the memcg lifetime, e.g. by taking rcu_read_lock() or cgroup_mutex.
*/
-static inline struct mem_cgroup *memcg_from_slab_page(struct page *page)
+static inline struct mem_cgroup *memcg_from_slab_obj(void *ptr)
{
- struct kmem_cache *s;
-
- s = READ_ONCE(page->slab_cache);
- if (s && !is_root_cache(s))
- return READ_ONCE(s->memcg_params.memcg);
+ struct mem_cgroup_ptr *memcg_ptr;
+ struct page *page;
+ unsigned int off;
- return NULL;
+ if (!memcg_kmem_enabled())
+ return NULL;
+ page = virt_to_head_page(ptr);
+ if (is_root_cache(page->slab_cache))
+ return NULL;
+ off = obj_to_index(page->slab_cache, page, ptr);
+ memcg_ptr = page->mem_cgroup_vec[off];
+ return memcg_ptr->memcg;
}
static inline int memcg_alloc_page_memcg_vec(struct page *page, gfp_t gfp,
@@ -464,7 +469,7 @@ static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
return s;
}
-static inline struct mem_cgroup *memcg_from_slab_page(struct page *page)
+static inline struct mem_cgroup *memcg_from_slab_obj(void *ptr)
{
return NULL;
}
On our way to share slab pages between multiple memory cgroups let's make sure we don't use kmem_cache.memcg_params.memcg pointer to determine memcg ownership of a slab object/page. Let's transform memcg_from_slab_page() into memcg_from_slab_obj(), which relies on memcg ownership data stored in page->mem_cgroup_vec. Delete mem_cgroup_from_kmem() and use memcg_from_slab_obj() instead. Note: memcg_from_slab_obj() returns NULL if slab obj belongs to the root cgroup, so remove the redundant check in __mod_lruvec_slab_state(). Signed-off-by: Roman Gushchin <guro@fb.com> --- mm/list_lru.c | 12 +----------- mm/memcontrol.c | 9 ++++----- mm/slab.h | 21 +++++++++++++-------- 3 files changed, 18 insertions(+), 24 deletions(-)