@@ -52,7 +52,7 @@ enum mem_cgroup_stat_index {
MEM_CGROUP_STAT_SWAP, /* # of pages, swapped out */
MEM_CGROUP_STAT_NSTATS,
/* default hierarchy stats */
- MEMCG_KERNEL_STACK = MEM_CGROUP_STAT_NSTATS,
+ MEMCG_KERNEL_STACK_KB = MEM_CGROUP_STAT_NSTATS,
MEMCG_SLAB_RECLAIMABLE,
MEMCG_SLAB_UNRECLAIMABLE,
MEMCG_SOCK,
@@ -165,20 +165,12 @@ static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
struct page *page = alloc_kmem_pages_node(node, THREADINFO_GFP,
THREAD_SIZE_ORDER);
- if (page)
- memcg_kmem_update_page_stat(page, MEMCG_KERNEL_STACK,
- 1 << THREAD_SIZE_ORDER);
-
return page ? page_address(page) : NULL;
}
static inline void free_thread_info(struct thread_info *ti)
{
- struct page *page = virt_to_page(ti);
-
- memcg_kmem_update_page_stat(page, MEMCG_KERNEL_STACK,
- -(1 << THREAD_SIZE_ORDER));
- __free_kmem_pages(page, THREAD_SIZE_ORDER);
+ free_kmem_pages((unsigned long)ti, THREAD_SIZE_ORDER);
}
# else
static struct kmem_cache *thread_info_cache;
@@ -227,6 +219,11 @@ static void account_kernel_stack(struct thread_info *ti, int account)
mod_zone_page_state(zone, NR_KERNEL_STACK_KB,
THREAD_SIZE / 1024 * account);
+
+ /* All stack pages belong to the same memcg. */
+ memcg_kmem_update_page_stat(
+ virt_to_page(ti), MEMCG_KERNEL_STACK_KB,
+ account * (THREAD_SIZE / 1024));
}
void free_task(struct task_struct *tsk)
@@ -5133,7 +5133,7 @@ static int memory_stat_show(struct seq_file *m, void *v)
seq_printf(m, "file %llu\n",
(u64)stat[MEM_CGROUP_STAT_CACHE] * PAGE_SIZE);
seq_printf(m, "kernel_stack %llu\n",
- (u64)stat[MEMCG_KERNEL_STACK] * PAGE_SIZE);
+ (u64)stat[MEMCG_KERNEL_STACK_KB] * 1024);
seq_printf(m, "slab %llu\n",
(u64)(stat[MEMCG_SLAB_RECLAIMABLE] +
stat[MEMCG_SLAB_UNRECLAIMABLE]) * PAGE_SIZE);