@@ -1151,7 +1151,7 @@ static inline void memcg_memory_event_mm(struct mm_struct *mm,
rcu_read_unlock();
}
-void split_page_memcg(struct page *head, unsigned int nr);
+void split_page_memcg(struct page *head, int order);
unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
gfp_t gfp_mask,
@@ -1588,7 +1588,7 @@ void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx)
{
}
-static inline void split_page_memcg(struct page *head, unsigned int nr)
+static inline void split_page_memcg(struct page *head, int order)
{
}
@@ -2512,10 +2512,11 @@ static void __split_huge_page(struct page *page, struct list_head *list,
struct address_space *swap_cache = NULL;
unsigned long offset = 0;
unsigned int nr = thp_nr_pages(head);
+ int order = folio_order(folio);
int i;
/* complete memcg works before add pages to LRU */
- split_page_memcg(head, nr);
+ split_page_memcg(head, order);
if (PageAnon(head) && PageSwapCache(head)) {
swp_entry_t entry = { .val = page_private(head) };
@@ -3414,11 +3414,12 @@ void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size)
/*
* Because page_memcg(head) is not set on tails, set it now.
*/
-void split_page_memcg(struct page *head, unsigned int nr)
+void split_page_memcg(struct page *head, int order)
{
struct folio *folio = page_folio(head);
struct mem_cgroup *memcg = folio_memcg(folio);
int i;
+ unsigned int nr = 1 << order;
if (mem_cgroup_disabled() || !memcg)
return;
@@ -2781,7 +2781,7 @@ void split_page(struct page *page, unsigned int order)
for (i = 1; i < (1 << order); i++)
set_page_refcounted(page + i);
split_page_owner(page, 1 << order);
- split_page_memcg(page, 1 << order);
+ split_page_memcg(page, order);
}
EXPORT_SYMBOL_GPL(split_page);
@@ -4997,7 +4997,7 @@ static void *make_alloc_exact(unsigned long addr, unsigned int order,
struct page *last = page + nr;
split_page_owner(page, 1 << order);
- split_page_memcg(page, 1 << order);
+ split_page_memcg(page, order);
while (page < --last)
set_page_refcounted(last);