@@ -340,6 +340,41 @@ struct mem_cgroup {
extern struct mem_cgroup *root_mem_cgroup;
+static inline struct mem_cgroup *page_mem_cgroup(struct page *page)
+{
+ VM_BUG_ON_PAGE(PageSlab(page), page);
+ return (struct mem_cgroup *)page->memcg_data;
+}
+
+static inline struct mem_cgroup *page_mem_cgroup_check(struct page *page)
+{
+ unsigned long memcg_data = page->memcg_data;
+
+ /*
+ * The lowest bit set means that memcg isn't a valid
+ * memcg pointer, but a obj_cgroups pointer.
+ * In this case the page is shared and doesn't belong
+ * to any specific memory cgroup.
+ */
+ if (memcg_data & 0x1UL)
+ return NULL;
+
+ return (struct mem_cgroup *)memcg_data;
+}
+
+static inline void set_page_mem_cgroup(struct page *page,
+ struct mem_cgroup *memcg)
+{
+ VM_BUG_ON_PAGE(PageSlab(page), page);
+ page->memcg_data = (unsigned long)memcg;
+}
+
+static inline void clear_page_mem_cgroup(struct page *page)
+{
+ VM_BUG_ON_PAGE(PageSlab(page), page);
+ page->memcg_data = 0;
+}
+
static __always_inline bool memcg_stat_item_in_bytes(int idx)
{
if (idx == MEMCG_PERCPU_B)
@@ -740,15 +775,15 @@ static inline void mod_memcg_state(struct mem_cgroup *memcg,
static inline void __mod_memcg_page_state(struct page *page,
int idx, int val)
{
- if (page->mem_cgroup)
- __mod_memcg_state(page->mem_cgroup, idx, val);
+ if (page_mem_cgroup(page))
+ __mod_memcg_state(page_mem_cgroup(page), idx, val);
}
static inline void mod_memcg_page_state(struct page *page,
int idx, int val)
{
- if (page->mem_cgroup)
- mod_memcg_state(page->mem_cgroup, idx, val);
+ if (page_mem_cgroup(page))
+ mod_memcg_state(page_mem_cgroup(page), idx, val);
}
static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
@@ -835,12 +870,12 @@ static inline void __mod_lruvec_page_state(struct page *page,
struct lruvec *lruvec;
/* Untracked pages have no memcg, no lruvec. Update only the node */
- if (!head->mem_cgroup) {
+ if (!page_mem_cgroup(head)) {
__mod_node_page_state(pgdat, idx, val);
return;
}
- lruvec = mem_cgroup_lruvec(head->mem_cgroup, pgdat);
+ lruvec = mem_cgroup_lruvec(page_mem_cgroup(head), pgdat);
__mod_lruvec_state(lruvec, idx, val);
}
@@ -875,8 +910,8 @@ static inline void count_memcg_events(struct mem_cgroup *memcg,
static inline void count_memcg_page_event(struct page *page,
enum vm_event_item idx)
{
- if (page->mem_cgroup)
- count_memcg_events(page->mem_cgroup, idx, 1);
+ if (page_mem_cgroup(page))
+ count_memcg_events(page_mem_cgroup(page), idx, 1);
}
static inline void count_memcg_event_mm(struct mm_struct *mm,
@@ -938,6 +973,25 @@ void mem_cgroup_split_huge_fixup(struct page *head);
struct mem_cgroup;
+static inline struct mem_cgroup *page_mem_cgroup(struct page *page)
+{
+ return NULL;
+}
+
+static inline struct mem_cgroup *page_mem_cgroup_check(struct page *page)
+{
+ return NULL;
+}
+
+static inline void set_page_mem_cgroup(struct page *page,
+ struct mem_cgroup *memcg)
+{
+}
+
+static inline void clear_page_mem_cgroup(struct page *page)
+{
+}
+
static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
{
return true;
@@ -1427,7 +1481,7 @@ static inline void mem_cgroup_track_foreign_dirty(struct page *page,
if (mem_cgroup_disabled())
return;
- if (unlikely(&page->mem_cgroup->css != wb->memcg_css))
+ if (unlikely(&page_mem_cgroup(page)->css != wb->memcg_css))
mem_cgroup_track_foreign_dirty_slowpath(page, wb);
}
@@ -1474,28 +1474,6 @@ static inline void set_page_links(struct page *page, enum zone_type zone,
#endif
}
-#ifdef CONFIG_MEMCG
-static inline struct mem_cgroup *page_memcg(struct page *page)
-{
- return page->mem_cgroup;
-}
-static inline struct mem_cgroup *page_memcg_rcu(struct page *page)
-{
- WARN_ON_ONCE(!rcu_read_lock_held());
- return READ_ONCE(page->mem_cgroup);
-}
-#else
-static inline struct mem_cgroup *page_memcg(struct page *page)
-{
- return NULL;
-}
-static inline struct mem_cgroup *page_memcg_rcu(struct page *page)
-{
- WARN_ON_ONCE(!rcu_read_lock_held());
- return NULL;
-}
-#endif
-
/*
* Some inline functions in vmstat.h depend on page_zone()
*/
@@ -199,10 +199,7 @@ struct page {
atomic_t _refcount;
#ifdef CONFIG_MEMCG
- union {
- struct mem_cgroup *mem_cgroup;
- struct obj_cgroup **obj_cgroups;
- };
+ unsigned long memcg_data;
#endif
/*
@@ -257,7 +257,7 @@ TRACE_EVENT(track_foreign_dirty,
__entry->ino = inode ? inode->i_ino : 0;
__entry->memcg_id = wb->memcg_css->id;
__entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
- __entry->page_cgroup_ino = cgroup_ino(page->mem_cgroup->css.cgroup);
+ __entry->page_cgroup_ino = cgroup_ino(page_mem_cgroup(page)->css.cgroup);
),
TP_printk("bdi %s[%llu]: ino=%lu memcg_id=%u cgroup_ino=%lu page_cgroup_ino=%lu",
@@ -182,8 +182,8 @@ void __dump_page(struct page *page, const char *reason)
pr_warn("page dumped because: %s\n", reason);
#ifdef CONFIG_MEMCG
- if (!page_poisoned && page->mem_cgroup)
- pr_warn("page->mem_cgroup:%px\n", page->mem_cgroup);
+ if (!page_poisoned && page_mem_cgroup(page))
+ pr_warn("page->mem_cgroup:%px\n", page_mem_cgroup(page));
#endif
}
@@ -470,7 +470,7 @@ pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
#ifdef CONFIG_MEMCG
static inline struct deferred_split *get_deferred_split_queue(struct page *page)
{
- struct mem_cgroup *memcg = compound_head(page)->mem_cgroup;
+ struct mem_cgroup *memcg = page_mem_cgroup(compound_head(page));
struct pglist_data *pgdat = NODE_DATA(page_to_nid(page));
if (memcg)
@@ -2728,7 +2728,7 @@ void deferred_split_huge_page(struct page *page)
{
struct deferred_split *ds_queue = get_deferred_split_queue(page);
#ifdef CONFIG_MEMCG
- struct mem_cgroup *memcg = compound_head(page)->mem_cgroup;
+ struct mem_cgroup *memcg = page_mem_cgroup(compound_head(page));
#endif
unsigned long flags;
@@ -541,7 +541,7 @@ struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page)
{
struct mem_cgroup *memcg;
- memcg = page->mem_cgroup;
+ memcg = page_mem_cgroup(page);
if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
memcg = root_mem_cgroup;
@@ -568,16 +568,7 @@ ino_t page_cgroup_ino(struct page *page)
unsigned long ino = 0;
rcu_read_lock();
- memcg = page->mem_cgroup;
-
- /*
- * The lowest bit set means that memcg isn't a valid
- * memcg pointer, but a obj_cgroups pointer.
- * In this case the page is shared and doesn't belong
- * to any specific memory cgroup.
- */
- if ((unsigned long) memcg & 0x1UL)
- memcg = NULL;
+ memcg = page_mem_cgroup_check(page);
while (memcg && !(memcg->css.flags & CSS_ONLINE))
memcg = parent_mem_cgroup(memcg);
@@ -1058,7 +1049,7 @@ EXPORT_SYMBOL(get_mem_cgroup_from_mm);
*/
struct mem_cgroup *get_mem_cgroup_from_page(struct page *page)
{
- struct mem_cgroup *memcg = page->mem_cgroup;
+ struct mem_cgroup *memcg = page_mem_cgroup(page);
if (mem_cgroup_disabled())
return NULL;
@@ -1343,7 +1334,7 @@ int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
* @page: the page
* @pgdat: pgdat of the page
*
- * This function relies on page->mem_cgroup being stable - see the
+ * This function relies on page and memcg binding being stable - see the
* access rules in commit_charge().
*/
struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct pglist_data *pgdat)
@@ -1357,7 +1348,7 @@ struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct pglist_data *pgd
goto out;
}
- memcg = page->mem_cgroup;
+ memcg = page_mem_cgroup(page);
/*
* Swapcache readahead pages are added to the LRU - and
* possibly migrated - before they are charged.
@@ -2100,7 +2091,7 @@ void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
}
/**
- * lock_page_memcg - lock a page->mem_cgroup binding
+ * lock_page_memcg - lock a page and memcg binding
* @page: the page
*
* This function protects unlocked LRU pages from being moved to
@@ -2132,7 +2123,7 @@ struct mem_cgroup *lock_page_memcg(struct page *page)
if (mem_cgroup_disabled())
return NULL;
again:
- memcg = head->mem_cgroup;
+ memcg = page_mem_cgroup(head);
if (unlikely(!memcg))
return NULL;
@@ -2140,7 +2131,7 @@ struct mem_cgroup *lock_page_memcg(struct page *page)
return memcg;
spin_lock_irqsave(&memcg->move_lock, flags);
- if (memcg != head->mem_cgroup) {
+ if (memcg != page_mem_cgroup(head)) {
spin_unlock_irqrestore(&memcg->move_lock, flags);
goto again;
}
@@ -2178,14 +2169,14 @@ void __unlock_page_memcg(struct mem_cgroup *memcg)
}
/**
- * unlock_page_memcg - unlock a page->mem_cgroup binding
+ * unlock_page_memcg - unlock a page and memcg binding
* @page: the page
*/
void unlock_page_memcg(struct page *page)
{
struct page *head = compound_head(page);
- __unlock_page_memcg(head->mem_cgroup);
+ __unlock_page_memcg(page_mem_cgroup(head));
}
EXPORT_SYMBOL(unlock_page_memcg);
@@ -2875,16 +2866,16 @@ static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
static void commit_charge(struct page *page, struct mem_cgroup *memcg)
{
- VM_BUG_ON_PAGE(page->mem_cgroup, page);
+ VM_BUG_ON_PAGE(page_mem_cgroup(page), page);
/*
- * Any of the following ensures page->mem_cgroup stability:
+ * Any of the following ensures page and memcg binding stability:
*
* - the page lock
* - LRU isolation
* - lock_page_memcg()
* - exclusive reference
*/
- page->mem_cgroup = memcg;
+ set_page_mem_cgroup(page, memcg);
}
#ifdef CONFIG_MEMCG_KMEM
@@ -2899,8 +2890,7 @@ int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s,
if (!vec)
return -ENOMEM;
- if (cmpxchg(&page->obj_cgroups, NULL,
- (struct obj_cgroup **) ((unsigned long)vec | 0x1UL)))
+ if (cmpxchg(&page->memcg_data, 0, (unsigned long)vec | 0x1UL))
kfree(vec);
else
kmemleak_not_leak(vec);
@@ -2923,17 +2913,6 @@ struct mem_cgroup *mem_cgroup_from_obj(void *p)
page = virt_to_head_page(p);
- /*
- * If page->mem_cgroup is set, it's either a simple mem_cgroup pointer
- * or a pointer to obj_cgroup vector. In the latter case the lowest
- * bit of the pointer is set.
- * The page->mem_cgroup pointer can be asynchronously changed
- * from NULL to (obj_cgroup_vec | 0x1UL), but can't be changed
- * from a valid memcg pointer to objcg vector or back.
- */
- if (!page->mem_cgroup)
- return NULL;
-
/*
* Slab objects are accounted individually, not per-page.
* Memcg membership data for each individual object is saved in
@@ -2952,7 +2931,7 @@ struct mem_cgroup *mem_cgroup_from_obj(void *p)
}
/* All other pages use page->mem_cgroup */
- return page->mem_cgroup;
+ return page_mem_cgroup_check(page);
}
__always_inline struct obj_cgroup *get_obj_cgroup_from_current(void)
@@ -3090,7 +3069,7 @@ int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order)
if (memcg && !mem_cgroup_is_root(memcg)) {
ret = __memcg_kmem_charge(memcg, gfp, 1 << order);
if (!ret) {
- page->mem_cgroup = memcg;
+ set_page_mem_cgroup(page, memcg);
__SetPageKmemcg(page);
return 0;
}
@@ -3106,7 +3085,7 @@ int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order)
*/
void __memcg_kmem_uncharge_page(struct page *page, int order)
{
- struct mem_cgroup *memcg = page->mem_cgroup;
+ struct mem_cgroup *memcg = page_mem_cgroup(page);
unsigned int nr_pages = 1 << order;
if (!memcg)
@@ -3114,7 +3093,7 @@ void __memcg_kmem_uncharge_page(struct page *page, int order)
VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page);
__memcg_kmem_uncharge(memcg, nr_pages);
- page->mem_cgroup = NULL;
+ clear_page_mem_cgroup(page);
css_put(&memcg->css);
/* slab pages do not have PageKmemcg flag set */
@@ -3265,7 +3244,7 @@ void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size)
*/
void mem_cgroup_split_huge_fixup(struct page *head)
{
- struct mem_cgroup *memcg = head->mem_cgroup;
+ struct mem_cgroup *memcg = page_mem_cgroup(head);
int i;
if (mem_cgroup_disabled())
@@ -3273,7 +3252,7 @@ void mem_cgroup_split_huge_fixup(struct page *head)
for (i = 1; i < HPAGE_PMD_NR; i++) {
css_get(&memcg->css);
- head[i].mem_cgroup = memcg;
+ set_page_mem_cgroup(&head[i], memcg);
}
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
@@ -4649,7 +4628,7 @@ void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
void mem_cgroup_track_foreign_dirty_slowpath(struct page *page,
struct bdi_writeback *wb)
{
- struct mem_cgroup *memcg = page->mem_cgroup;
+ struct mem_cgroup *memcg = page_mem_cgroup(page);
struct memcg_cgwb_frn *frn;
u64 now = get_jiffies_64();
u64 oldest_at = now;
@@ -5641,14 +5620,14 @@ static int mem_cgroup_move_account(struct page *page,
/*
* Prevent mem_cgroup_migrate() from looking at
- * page->mem_cgroup of its source page while we change it.
+ * page_mem_cgroup(page) of its source page while we change it.
*/
ret = -EBUSY;
if (!trylock_page(page))
goto out;
ret = -EINVAL;
- if (page->mem_cgroup != from)
+ if (page_mem_cgroup(page) != from)
goto out_unlock;
pgdat = page_pgdat(page);
@@ -5703,13 +5682,13 @@ static int mem_cgroup_move_account(struct page *page,
/*
* All state has been migrated, let's switch to the new memcg.
*
- * It is safe to change page->mem_cgroup here because the page
+ * It is safe to change page's memcg here because the page
* is referenced, charged, isolated, and locked: we can't race
* with (un)charging, migration, LRU putback, or anything else
- * that would rely on a stable page->mem_cgroup.
+ * that would rely on a stable page_mem_cgroup(page).
*
* Note that lock_page_memcg is a memcg lock, not a page lock,
- * to save space. As soon as we switch page->mem_cgroup to a
+ * to save space. As soon as we switch page_mem_cgroup(page) to a
* new memcg that isn't locked, the above state can change
* concurrently again. Make sure we're truly done with it.
*/
@@ -5718,7 +5697,7 @@ static int mem_cgroup_move_account(struct page *page,
css_get(&to->css);
css_put(&from->css);
- page->mem_cgroup = to;
+ set_page_mem_cgroup(page, to);
__unlock_page_memcg(from);
@@ -5784,7 +5763,7 @@ static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
* mem_cgroup_move_account() checks the page is valid or
* not under LRU exclusion.
*/
- if (page->mem_cgroup == mc.from) {
+ if (page_mem_cgroup(page) == mc.from) {
ret = MC_TARGET_PAGE;
if (is_device_private_page(page))
ret = MC_TARGET_DEVICE;
@@ -5828,7 +5807,7 @@ static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
VM_BUG_ON_PAGE(!page || !PageHead(page), page);
if (!(mc.flags & MOVE_ANON))
return ret;
- if (page->mem_cgroup == mc.from) {
+ if (page_mem_cgroup(page) == mc.from) {
ret = MC_TARGET_PAGE;
if (target) {
get_page(page);
@@ -6739,12 +6718,12 @@ int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask)
/*
* Every swap fault against a single page tries to charge the
* page, bail as early as possible. shmem_unuse() encounters
- * already charged pages, too. page->mem_cgroup is protected
- * by the page lock, which serializes swap cache removal, which
- * in turn serializes uncharging.
+ * already charged pages, too. page and memcg binding is
+ * protected by the page lock, which serializes swap cache
+ * removal, which in turn serializes uncharging.
*/
VM_BUG_ON_PAGE(!PageLocked(page), page);
- if (compound_head(page)->mem_cgroup)
+ if (page_mem_cgroup(compound_head(page)))
goto out;
id = lookup_swap_cgroup_id(ent);
@@ -6828,21 +6807,21 @@ static void uncharge_page(struct page *page, struct uncharge_gather *ug)
VM_BUG_ON_PAGE(PageLRU(page), page);
- if (!page->mem_cgroup)
+ if (!page_mem_cgroup(page))
return;
/*
* Nobody should be changing or seriously looking at
- * page->mem_cgroup at this point, we have fully
+ * page_mem_cgroup(page) at this point, we have fully
* exclusive access to the page.
*/
- if (ug->memcg != page->mem_cgroup) {
+ if (ug->memcg != page_mem_cgroup(page)) {
if (ug->memcg) {
uncharge_batch(ug);
uncharge_gather_clear(ug);
}
- ug->memcg = page->mem_cgroup;
+ ug->memcg = page_mem_cgroup(page);
/* pairs with css_put in uncharge_batch */
css_get(&ug->memcg->css);
@@ -6859,7 +6838,7 @@ static void uncharge_page(struct page *page, struct uncharge_gather *ug)
}
ug->dummy_page = page;
- page->mem_cgroup = NULL;
+ clear_page_mem_cgroup(page);
css_put(&ug->memcg->css);
}
@@ -6902,7 +6881,7 @@ void mem_cgroup_uncharge(struct page *page)
return;
/* Don't touch page->lru of any random page, pre-check: */
- if (!page->mem_cgroup)
+ if (!page_mem_cgroup(page))
return;
uncharge_gather_clear(&ug);
@@ -6952,11 +6931,11 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
return;
/* Page cache replacement: new page already charged? */
- if (newpage->mem_cgroup)
+ if (page_mem_cgroup(newpage))
return;
/* Swapcache readahead pages can get replaced before being charged */
- memcg = oldpage->mem_cgroup;
+ memcg = page_mem_cgroup(oldpage);
if (!memcg)
return;
@@ -7151,7 +7130,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
return;
- memcg = page->mem_cgroup;
+ memcg = page_mem_cgroup(page);
/* Readahead page, never charged */
if (!memcg)
@@ -7172,7 +7151,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
VM_BUG_ON_PAGE(oldid, page);
mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries);
- page->mem_cgroup = NULL;
+ clear_page_mem_cgroup(page);
if (!mem_cgroup_is_root(memcg))
page_counter_uncharge(&memcg->memory, nr_entries);
@@ -7215,7 +7194,7 @@ int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
return 0;
- memcg = page->mem_cgroup;
+ memcg = page_mem_cgroup(page);
/* Readahead page, never charged */
if (!memcg)
@@ -7296,7 +7275,7 @@ bool mem_cgroup_swap_full(struct page *page)
if (cgroup_memory_noswap || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
return false;
- memcg = page->mem_cgroup;
+ memcg = page_mem_cgroup(page);
if (!memcg)
return false;
@@ -493,7 +493,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
struct lruvec *old_lruvec, *new_lruvec;
struct mem_cgroup *memcg;
- memcg = page_memcg(page);
+ memcg = page_mem_cgroup(page);
old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat);
new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat);
@@ -1056,7 +1056,7 @@ static inline bool page_expected_state(struct page *page,
if (unlikely((unsigned long)page->mapping |
page_ref_count(page) |
#ifdef CONFIG_MEMCG
- (unsigned long)page->mem_cgroup |
+ (unsigned long)page_mem_cgroup(page) |
#endif
(page->flags & check_flags)))
return false;
@@ -1081,7 +1081,7 @@ static const char *page_bad_reason(struct page *page, unsigned long flags)
bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set";
}
#ifdef CONFIG_MEMCG
- if (unlikely(page->mem_cgroup))
+ if (unlikely(page_mem_cgroup(page)))
bad_reason = "page still charged to cgroup";
#endif
return bad_reason;
@@ -282,11 +282,11 @@ static void bio_associate_blkg_from_page(struct bio *bio, struct page *page)
{
struct cgroup_subsys_state *css;
- if (!page->mem_cgroup)
+ if (!page_mem_cgroup(page))
return;
rcu_read_lock();
- css = cgroup_e_css(page->mem_cgroup->css.cgroup, &io_cgrp_subsys);
+ css = cgroup_e_css(page_mem_cgroup(page)->css.cgroup, &io_cgrp_subsys);
bio_associate_blkg_from_css(bio, css);
rcu_read_unlock();
}
@@ -247,13 +247,12 @@ static inline struct obj_cgroup **page_obj_cgroups(struct page *page)
* that the page is a slab page (e.g. page_cgroup_ino()), let's
* always set the lowest bit of obj_cgroups.
*/
- return (struct obj_cgroup **)
- ((unsigned long)page->obj_cgroups & ~0x1UL);
+ return (struct obj_cgroup **)(page->memcg_data & ~0x1UL);
}
static inline bool page_has_obj_cgroups(struct page *page)
{
- return ((unsigned long)page->obj_cgroups & 0x1UL);
+ return page->memcg_data & 0x1UL;
}
int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s,
@@ -262,7 +261,7 @@ int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s,
static inline void memcg_free_page_obj_cgroups(struct page *page)
{
kfree(page_obj_cgroups(page));
- page->obj_cgroups = NULL;
+ page->memcg_data = 0;
}
static inline size_t obj_full_size(struct kmem_cache *s)
@@ -345,7 +345,7 @@ void workingset_refault(struct page *page, void *shadow)
* However, the cgroup that will own the page is the one that
* is actually experiencing the refault event.
*/
- memcg = page_memcg(page);
+ memcg = page_mem_cgroup(page);
lruvec = mem_cgroup_lruvec(memcg, pgdat);
inc_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + file);
@@ -407,7 +407,7 @@ void workingset_activation(struct page *page)
* XXX: See workingset_refault() - this should return
* root_mem_cgroup even for !CONFIG_MEMCG.
*/
- memcg = page_memcg_rcu(page);
+ memcg = page_mem_cgroup(page);
if (!mem_cgroup_disabled() && !memcg)
goto out;
lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page));
Currently there are many open-coded reads and writes of the page->mem_cgroup pointer, as well as a couple of read helpers, which are barely used. It creates an obstacle on a way to reuse some bits of the pointer for storing additional bits of information. In fact, we already do this for slab pages, where the last bit indicates that a pointer has an attached vector of objcg pointers instead of a regular memcg pointer. This commits introduces 4 new helper functions and converts all raw accesses to page->mem_cgroup to calls of these helpers: struct mem_cgroup *page_mem_cgroup(struct page *page); struct mem_cgroup *page_mem_cgroup_check(struct page *page); void set_page_mem_cgroup(struct page *page, struct mem_cgroup *memcg); void clear_page_mem_cgroup(struct page *page); page_mem_cgroup_check() is intended to be used in cases when the page can be a slab page and have a memcg pointer pointing at objcg vector. It does check the lowest bit, and if set, returns NULL. page_mem_cgroup() contains a VM_BUG_ON_PAGE() check for the page not being a slab page. So do set_page_mem_cgroup() and clear_page_mem_cgroup(). To make sure nobody uses a direct access, struct page's mem_cgroup/obj_cgroups is converted to unsigned long memcg_data. Only new helpers and a couple of slab-accounting related functions access this field directly. page_memcg() and page_memcg_rcu() helpers defined in mm.h are removed. New page_mem_cgroup() is a direct analog of page_memcg(), while page_memcg_rcu() has a single call site in a small rcu-read-lock section, so it's just not worth it to have a separate helper. So it's replaced with page_mem_cgroup() too. Signed-off-by: Roman Gushchin <guro@fb.com> --- include/linux/memcontrol.h | 72 +++++++++++++++++--- include/linux/mm.h | 22 ------ include/linux/mm_types.h | 5 +- include/trace/events/writeback.h | 2 +- mm/debug.c | 4 +- mm/huge_memory.c | 4 +- mm/memcontrol.c | 111 +++++++++++++------------------ mm/migrate.c | 2 +- mm/page_alloc.c | 4 +- mm/page_io.c | 4 +- mm/slab.h | 7 +- mm/workingset.c | 4 +- 12 files changed, 124 insertions(+), 117 deletions(-)