Message ID | 20240415111123924s9IbQkgHF8S4yZv4su8LI@zte.com.cn (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | mm: thp: makes the memcg THP deferred split shrinker aware of node_id | expand |
On Mon, Apr 15, 2024 at 3:11 PM <xu.xin16@zte.com.cn> wrote: > > From: Ran Xiaokai <ran.xiaokai@zte.com.cn> > > Since commit 87eaceb3faa5 ("mm: thp: make deferred split shrinker > memcg aware"), the THP deferred split queue is per memcg but not > per mem_cgroup_per_node. This has two aspects of impact: > > Impact1: for kswapd reclaim > ===================== > kswapd > balance_pgdat > kswapd_shrink_node > shrink_node(pgdat, sc); > shrink_node_memcgs(pgdat, sc); > shrink_slab(sc->gfp_mask, pgdat->node_id, memcg...); > the parameter "pgdat->node_id" does not take effectct for > THP deferred_split_shrinker, as the deferred_split_queue of > specified memcg is not for a certain numa node but for all the nodes. > We want to makes the memcg THP deferred split shrinker aware of > node_id. > > Impact2: thp-deferred_split shrinker debugfs interface > ========================================= > for the "count" file: > <cgroup inode id> <objects on node 0> <objects on node 1> > the output is acctually the sum of all numa nodes. > for the "scan" file: > <cgroup inode id> <numa id> <number of objects to scan> > Also the "numa id" input does not take effect here. > > This patch makes memcg deferred_split_queue per mem_cgroup_per_node > so try to conform to semantic logic. This seems to be a correct fix to me, + Yang Shi, the original author of commit 87eaceb3faa5. > > Reviewed-by: Lu Zhongjun <lu.zhongjun@zte.com.cn> > Signed-off-by: Ran Xiaokai <ran.xiaokai@zte.com.cn> > Cc: xu xin <xu.xin16@zte.com.cn> > Cc: Yang Yang <yang.yang29@zte.com.cn> > --- > include/linux/memcontrol.h | 7 +++---- > mm/huge_memory.c | 6 +++--- > mm/memcontrol.c | 11 +++++------ > 3 files changed, 11 insertions(+), 13 deletions(-) > > diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h > index 394fd0a887ae..7282861d5a5d 100644 > --- a/include/linux/memcontrol.h > +++ b/include/linux/memcontrol.h > @@ -130,6 +130,9 @@ struct mem_cgroup_per_node { > bool on_tree; > struct mem_cgroup *memcg; /* Back pointer, we cannot */ > /* use container_of */ > +#ifdef CONFIG_TRANSPARENT_HUGEPAGE > + struct deferred_split deferred_split_queue; > +#endif > }; > > struct mem_cgroup_threshold { > @@ -327,10 +330,6 @@ struct mem_cgroup { > struct list_head event_list; > spinlock_t event_list_lock; > > -#ifdef CONFIG_TRANSPARENT_HUGEPAGE > - struct deferred_split deferred_split_queue; > -#endif > - > #ifdef CONFIG_LRU_GEN_WALKS_MMU > /* per-memcg mm_struct list */ > struct lru_gen_mm_list mm_list; > diff --git a/mm/huge_memory.c b/mm/huge_memory.c > index 9859aa4f7553..338d071070a6 100644 > --- a/mm/huge_memory.c > +++ b/mm/huge_memory.c > @@ -774,7 +774,7 @@ struct deferred_split *get_deferred_split_queue(struct folio *folio) > struct pglist_data *pgdat = NODE_DATA(folio_nid(folio)); > > if (memcg) > - return &memcg->deferred_split_queue; > + return &memcg->nodeinfo[pgdat->node_id]->deferred_split_queue; > else > return &pgdat->deferred_split_queue; > } > @@ -3305,7 +3305,7 @@ static unsigned long deferred_split_count(struct shrinker *shrink, > > #ifdef CONFIG_MEMCG > if (sc->memcg) > - ds_queue = &sc->memcg->deferred_split_queue; > + ds_queue = &sc->memcg->nodeinfo[sc->nid]->deferred_split_queue; > #endif > return READ_ONCE(ds_queue->split_queue_len); > } > @@ -3322,7 +3322,7 @@ static unsigned long deferred_split_scan(struct shrinker *shrink, > > #ifdef CONFIG_MEMCG > if (sc->memcg) > - ds_queue = &sc->memcg->deferred_split_queue; > + ds_queue = &sc->memcg->nodeinfo[sc->nid]->deferred_split_queue; > #endif > > spin_lock_irqsave(&ds_queue->split_queue_lock, flags); > diff --git a/mm/memcontrol.c b/mm/memcontrol.c > index fabce2b50c69..cdf9f5fa3b8e 100644 > --- a/mm/memcontrol.c > +++ b/mm/memcontrol.c > @@ -5445,7 +5445,11 @@ static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) > kfree(pn); > return 1; > } > - > +#ifdef CONFIG_TRANSPARENT_HUGEPAGE > + spin_lock_init(&pn->deferred_split_queue.split_queue_lock); > + INIT_LIST_HEAD(&pn->deferred_split_queue.split_queue); > + pn->deferred_split_queue.split_queue_len = 0; > +#endif > lruvec_init(&pn->lruvec); > pn->memcg = memcg; > > @@ -5545,11 +5549,6 @@ static struct mem_cgroup *mem_cgroup_alloc(struct mem_cgroup *parent) > for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) > memcg->cgwb_frn[i].done = > __WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq); > -#endif > -#ifdef CONFIG_TRANSPARENT_HUGEPAGE > - spin_lock_init(&memcg->deferred_split_queue.split_queue_lock); > - INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue); > - memcg->deferred_split_queue.split_queue_len = 0; > #endif > lru_gen_init_memcg(memcg); > return memcg; > -- > 2.15.2 > Thanks Barry
On Sun, Apr 14, 2024 at 8:30 PM Barry Song <21cnbao@gmail.com> wrote: > > On Mon, Apr 15, 2024 at 3:11 PM <xu.xin16@zte.com.cn> wrote: > > > > From: Ran Xiaokai <ran.xiaokai@zte.com.cn> > > > > Since commit 87eaceb3faa5 ("mm: thp: make deferred split shrinker > > memcg aware"), the THP deferred split queue is per memcg but not > > per mem_cgroup_per_node. This has two aspects of impact: > > > > Impact1: for kswapd reclaim > > ===================== > > kswapd > > balance_pgdat > > kswapd_shrink_node > > shrink_node(pgdat, sc); > > shrink_node_memcgs(pgdat, sc); > > shrink_slab(sc->gfp_mask, pgdat->node_id, memcg...); > > the parameter "pgdat->node_id" does not take effectct for > > THP deferred_split_shrinker, as the deferred_split_queue of > > specified memcg is not for a certain numa node but for all the nodes. > > We want to makes the memcg THP deferred split shrinker aware of > > node_id. > > > > Impact2: thp-deferred_split shrinker debugfs interface > > ========================================= > > for the "count" file: > > <cgroup inode id> <objects on node 0> <objects on node 1> > > the output is acctually the sum of all numa nodes. > > for the "scan" file: > > <cgroup inode id> <numa id> <number of objects to scan> > > Also the "numa id" input does not take effect here. > > > > This patch makes memcg deferred_split_queue per mem_cgroup_per_node > > so try to conform to semantic logic. I used to have a similar patch before, https://lore.kernel.org/linux-mm/1569968203-64647-1-git-send-email-yang.shi@linux.alibaba.com/ But it was somehow lost in discussion. I have no objection to this patch. However, I was thinking about using list_lru for deferred split queue, but I didn't have time to look deeper. Maybe we should try now? > > This seems to be a correct fix to me, + Yang Shi, the original author of > commit 87eaceb3faa5. > > > > > Reviewed-by: Lu Zhongjun <lu.zhongjun@zte.com.cn> > > Signed-off-by: Ran Xiaokai <ran.xiaokai@zte.com.cn> > > Cc: xu xin <xu.xin16@zte.com.cn> > > Cc: Yang Yang <yang.yang29@zte.com.cn> > > --- > > include/linux/memcontrol.h | 7 +++---- > > mm/huge_memory.c | 6 +++--- > > mm/memcontrol.c | 11 +++++------ > > 3 files changed, 11 insertions(+), 13 deletions(-) > > > > diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h > > index 394fd0a887ae..7282861d5a5d 100644 > > --- a/include/linux/memcontrol.h > > +++ b/include/linux/memcontrol.h > > @@ -130,6 +130,9 @@ struct mem_cgroup_per_node { > > bool on_tree; > > struct mem_cgroup *memcg; /* Back pointer, we cannot */ > > /* use container_of */ > > +#ifdef CONFIG_TRANSPARENT_HUGEPAGE > > + struct deferred_split deferred_split_queue; > > +#endif > > }; > > > > struct mem_cgroup_threshold { > > @@ -327,10 +330,6 @@ struct mem_cgroup { > > struct list_head event_list; > > spinlock_t event_list_lock; > > > > -#ifdef CONFIG_TRANSPARENT_HUGEPAGE > > - struct deferred_split deferred_split_queue; > > -#endif > > - > > #ifdef CONFIG_LRU_GEN_WALKS_MMU > > /* per-memcg mm_struct list */ > > struct lru_gen_mm_list mm_list; > > diff --git a/mm/huge_memory.c b/mm/huge_memory.c > > index 9859aa4f7553..338d071070a6 100644 > > --- a/mm/huge_memory.c > > +++ b/mm/huge_memory.c > > @@ -774,7 +774,7 @@ struct deferred_split *get_deferred_split_queue(struct folio *folio) > > struct pglist_data *pgdat = NODE_DATA(folio_nid(folio)); > > > > if (memcg) > > - return &memcg->deferred_split_queue; > > + return &memcg->nodeinfo[pgdat->node_id]->deferred_split_queue; > > else > > return &pgdat->deferred_split_queue; > > } > > @@ -3305,7 +3305,7 @@ static unsigned long deferred_split_count(struct shrinker *shrink, > > > > #ifdef CONFIG_MEMCG > > if (sc->memcg) > > - ds_queue = &sc->memcg->deferred_split_queue; > > + ds_queue = &sc->memcg->nodeinfo[sc->nid]->deferred_split_queue; > > #endif > > return READ_ONCE(ds_queue->split_queue_len); > > } > > @@ -3322,7 +3322,7 @@ static unsigned long deferred_split_scan(struct shrinker *shrink, > > > > #ifdef CONFIG_MEMCG > > if (sc->memcg) > > - ds_queue = &sc->memcg->deferred_split_queue; > > + ds_queue = &sc->memcg->nodeinfo[sc->nid]->deferred_split_queue; > > #endif > > > > spin_lock_irqsave(&ds_queue->split_queue_lock, flags); > > diff --git a/mm/memcontrol.c b/mm/memcontrol.c > > index fabce2b50c69..cdf9f5fa3b8e 100644 > > --- a/mm/memcontrol.c > > +++ b/mm/memcontrol.c > > @@ -5445,7 +5445,11 @@ static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) > > kfree(pn); > > return 1; > > } > > - > > +#ifdef CONFIG_TRANSPARENT_HUGEPAGE > > + spin_lock_init(&pn->deferred_split_queue.split_queue_lock); > > + INIT_LIST_HEAD(&pn->deferred_split_queue.split_queue); > > + pn->deferred_split_queue.split_queue_len = 0; > > +#endif > > lruvec_init(&pn->lruvec); > > pn->memcg = memcg; > > > > @@ -5545,11 +5549,6 @@ static struct mem_cgroup *mem_cgroup_alloc(struct mem_cgroup *parent) > > for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) > > memcg->cgwb_frn[i].done = > > __WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq); > > -#endif > > -#ifdef CONFIG_TRANSPARENT_HUGEPAGE > > - spin_lock_init(&memcg->deferred_split_queue.split_queue_lock); > > - INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue); > > - memcg->deferred_split_queue.split_queue_len = 0; > > #endif > > lru_gen_init_memcg(memcg); > > return memcg; > > -- > > 2.15.2 > > > > Thanks > Barry >
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 394fd0a887ae..7282861d5a5d 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -130,6 +130,9 @@ struct mem_cgroup_per_node { bool on_tree; struct mem_cgroup *memcg; /* Back pointer, we cannot */ /* use container_of */ +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + struct deferred_split deferred_split_queue; +#endif }; struct mem_cgroup_threshold { @@ -327,10 +330,6 @@ struct mem_cgroup { struct list_head event_list; spinlock_t event_list_lock; -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - struct deferred_split deferred_split_queue; -#endif - #ifdef CONFIG_LRU_GEN_WALKS_MMU /* per-memcg mm_struct list */ struct lru_gen_mm_list mm_list; diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 9859aa4f7553..338d071070a6 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -774,7 +774,7 @@ struct deferred_split *get_deferred_split_queue(struct folio *folio) struct pglist_data *pgdat = NODE_DATA(folio_nid(folio)); if (memcg) - return &memcg->deferred_split_queue; + return &memcg->nodeinfo[pgdat->node_id]->deferred_split_queue; else return &pgdat->deferred_split_queue; } @@ -3305,7 +3305,7 @@ static unsigned long deferred_split_count(struct shrinker *shrink, #ifdef CONFIG_MEMCG if (sc->memcg) - ds_queue = &sc->memcg->deferred_split_queue; + ds_queue = &sc->memcg->nodeinfo[sc->nid]->deferred_split_queue; #endif return READ_ONCE(ds_queue->split_queue_len); } @@ -3322,7 +3322,7 @@ static unsigned long deferred_split_scan(struct shrinker *shrink, #ifdef CONFIG_MEMCG if (sc->memcg) - ds_queue = &sc->memcg->deferred_split_queue; + ds_queue = &sc->memcg->nodeinfo[sc->nid]->deferred_split_queue; #endif spin_lock_irqsave(&ds_queue->split_queue_lock, flags); diff --git a/mm/memcontrol.c b/mm/memcontrol.c index fabce2b50c69..cdf9f5fa3b8e 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -5445,7 +5445,11 @@ static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) kfree(pn); return 1; } - +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + spin_lock_init(&pn->deferred_split_queue.split_queue_lock); + INIT_LIST_HEAD(&pn->deferred_split_queue.split_queue); + pn->deferred_split_queue.split_queue_len = 0; +#endif lruvec_init(&pn->lruvec); pn->memcg = memcg; @@ -5545,11 +5549,6 @@ static struct mem_cgroup *mem_cgroup_alloc(struct mem_cgroup *parent) for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) memcg->cgwb_frn[i].done = __WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq); -#endif -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - spin_lock_init(&memcg->deferred_split_queue.split_queue_lock); - INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue); - memcg->deferred_split_queue.split_queue_len = 0; #endif lru_gen_init_memcg(memcg); return memcg;