Message ID | 20241206013512.2883617-5-chenridong@huaweicloud.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | Some cleanup for memcg | expand |
On Fri, Dec 06, 2024 at 01:35:11AM +0000, Chen Ridong wrote: > From: Chen Ridong <chenridong@huawei.com> > > Factor out the '__refill_obj_stock' function to make the code more > cohesive. > > Signed-off-by: Chen Ridong <chenridong@huawei.com> > --- > mm/memcontrol.c | 31 ++++++++++++++++++------------- > 1 file changed, 18 insertions(+), 13 deletions(-) > > diff --git a/mm/memcontrol.c b/mm/memcontrol.c > index f977e0be1c04..0c9331d7b606 100644 > --- a/mm/memcontrol.c > +++ b/mm/memcontrol.c > @@ -2697,6 +2697,21 @@ void __memcg_kmem_uncharge_page(struct page *page, int order) > obj_cgroup_put(objcg); > } > > +/* If the cached_objcg was refilled, return true; otherwise, return false */ > +static bool __refill_obj_stock(struct memcg_stock_pcp *stock, > + struct obj_cgroup *objcg, struct obj_cgroup **old_objcg) > +{ > + if (READ_ONCE(stock->cached_objcg) != objcg) { Keep the above check in the calling functions and make this a void function. Also I think we need a better name. > + *old_objcg = drain_obj_stock(stock); > + obj_cgroup_get(objcg); > + stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes) > + ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0; > + WRITE_ONCE(stock->cached_objcg, objcg); > + return true; > + } > + return false; > +} > + > static void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat, > enum node_stat_item idx, int nr) > { > @@ -2713,12 +2728,7 @@ static void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat, > * accumulating over a page of vmstat data or when pgdat or idx > * changes. > */ > - if (READ_ONCE(stock->cached_objcg) != objcg) { > - old = drain_obj_stock(stock); > - obj_cgroup_get(objcg); > - stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes) > - ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0; > - WRITE_ONCE(stock->cached_objcg, objcg); > + if (__refill_obj_stock(stock, objcg, &old)) { > stock->cached_pgdat = pgdat; > } else if (stock->cached_pgdat != pgdat) { > /* Flush the existing cached vmstat data */ > @@ -2871,14 +2881,9 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes, > local_lock_irqsave(&memcg_stock.stock_lock, flags); > > stock = this_cpu_ptr(&memcg_stock); > - if (READ_ONCE(stock->cached_objcg) != objcg) { /* reset if necessary */ > - old = drain_obj_stock(stock); > - obj_cgroup_get(objcg); > - WRITE_ONCE(stock->cached_objcg, objcg); > - stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes) > - ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0; > + if (__refill_obj_stock(stock, objcg, &old)) > allow_uncharge = true; /* Allow uncharge when objcg changes */ > - } > + > stock->nr_bytes += nr_bytes; > > if (allow_uncharge && (stock->nr_bytes > PAGE_SIZE)) { > -- > 2.34.1 >
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index f977e0be1c04..0c9331d7b606 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2697,6 +2697,21 @@ void __memcg_kmem_uncharge_page(struct page *page, int order) obj_cgroup_put(objcg); } +/* If the cached_objcg was refilled, return true; otherwise, return false */ +static bool __refill_obj_stock(struct memcg_stock_pcp *stock, + struct obj_cgroup *objcg, struct obj_cgroup **old_objcg) +{ + if (READ_ONCE(stock->cached_objcg) != objcg) { + *old_objcg = drain_obj_stock(stock); + obj_cgroup_get(objcg); + stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes) + ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0; + WRITE_ONCE(stock->cached_objcg, objcg); + return true; + } + return false; +} + static void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat, enum node_stat_item idx, int nr) { @@ -2713,12 +2728,7 @@ static void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat, * accumulating over a page of vmstat data or when pgdat or idx * changes. */ - if (READ_ONCE(stock->cached_objcg) != objcg) { - old = drain_obj_stock(stock); - obj_cgroup_get(objcg); - stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes) - ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0; - WRITE_ONCE(stock->cached_objcg, objcg); + if (__refill_obj_stock(stock, objcg, &old)) { stock->cached_pgdat = pgdat; } else if (stock->cached_pgdat != pgdat) { /* Flush the existing cached vmstat data */ @@ -2871,14 +2881,9 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes, local_lock_irqsave(&memcg_stock.stock_lock, flags); stock = this_cpu_ptr(&memcg_stock); - if (READ_ONCE(stock->cached_objcg) != objcg) { /* reset if necessary */ - old = drain_obj_stock(stock); - obj_cgroup_get(objcg); - WRITE_ONCE(stock->cached_objcg, objcg); - stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes) - ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0; + if (__refill_obj_stock(stock, objcg, &old)) allow_uncharge = true; /* Allow uncharge when objcg changes */ - } + stock->nr_bytes += nr_bytes; if (allow_uncharge && (stock->nr_bytes > PAGE_SIZE)) {