Message ID | 1563869295-25748-1-git-send-email-laoar.shao@gmail.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | mm/compaction: introduce a helper compact_zone_counters_init() | expand |
On Tue 23-07-19 04:08:15, Yafang Shao wrote: > This is the follow-up of the > commit "mm/compaction.c: clear total_{migrate,free}_scanned before scanning a new zone". > > These counters are used to track activities during compacting a zone, > and they will be set to zero before compacting a new zone in all compact > paths. Move all these common settings into compact_zone() for better > management. A new helper compact_zone_counters_init() is introduced for > this purpose. The helper seems excessive a bit because we have a single call site but other than that this is an improvement to the current fragile and duplicated code. I would just get rid of the helper and squash it to your previous patch which Andrew already took to the mm tree. > Signed-off-by: Yafang Shao <laoar.shao@gmail.com> > Cc: Michal Hocko <mhocko@suse.com> > Cc: Mel Gorman <mgorman@techsingularity.net> > Cc: Yafang Shao <shaoyafang@didiglobal.com> > --- > mm/compaction.c | 28 ++++++++++++++-------------- > 1 file changed, 14 insertions(+), 14 deletions(-) > > diff --git a/mm/compaction.c b/mm/compaction.c > index a109b45..356348b 100644 > --- a/mm/compaction.c > +++ b/mm/compaction.c > @@ -2065,6 +2065,19 @@ bool compaction_zonelist_suitable(struct alloc_context *ac, int order, > return false; > } > > + > +/* > + * Bellow counters are used to track activities during compacting a zone. > + * Before compacting a new zone, we should init these counters first. > + */ > +static void compact_zone_counters_init(struct compact_control *cc) > +{ > + cc->total_migrate_scanned = 0; > + cc->total_free_scanned = 0; > + cc->nr_migratepages = 0; > + cc->nr_freepages = 0; > +} > + > static enum compact_result > compact_zone(struct compact_control *cc, struct capture_control *capc) > { > @@ -2075,6 +2088,7 @@ bool compaction_zonelist_suitable(struct alloc_context *ac, int order, > const bool sync = cc->mode != MIGRATE_ASYNC; > bool update_cached; > > + compact_zone_counters_init(cc); > cc->migratetype = gfpflags_to_migratetype(cc->gfp_mask); > ret = compaction_suitable(cc->zone, cc->order, cc->alloc_flags, > cc->classzone_idx); > @@ -2278,10 +2292,6 @@ static enum compact_result compact_zone_order(struct zone *zone, int order, > { > enum compact_result ret; > struct compact_control cc = { > - .nr_freepages = 0, > - .nr_migratepages = 0, > - .total_migrate_scanned = 0, > - .total_free_scanned = 0, > .order = order, > .search_order = order, > .gfp_mask = gfp_mask, > @@ -2418,10 +2428,6 @@ static void compact_node(int nid) > if (!populated_zone(zone)) > continue; > > - cc.nr_freepages = 0; > - cc.nr_migratepages = 0; > - cc.total_migrate_scanned = 0; > - cc.total_free_scanned = 0; > cc.zone = zone; > INIT_LIST_HEAD(&cc.freepages); > INIT_LIST_HEAD(&cc.migratepages); > @@ -2526,8 +2532,6 @@ static void kcompactd_do_work(pg_data_t *pgdat) > struct compact_control cc = { > .order = pgdat->kcompactd_max_order, > .search_order = pgdat->kcompactd_max_order, > - .total_migrate_scanned = 0, > - .total_free_scanned = 0, > .classzone_idx = pgdat->kcompactd_classzone_idx, > .mode = MIGRATE_SYNC_LIGHT, > .ignore_skip_hint = false, > @@ -2551,10 +2555,6 @@ static void kcompactd_do_work(pg_data_t *pgdat) > COMPACT_CONTINUE) > continue; > > - cc.nr_freepages = 0; > - cc.nr_migratepages = 0; > - cc.total_migrate_scanned = 0; > - cc.total_free_scanned = 0; > cc.zone = zone; > INIT_LIST_HEAD(&cc.freepages); > INIT_LIST_HEAD(&cc.migratepages); > -- > 1.8.3.1
On Tue, Jul 23, 2019 at 4:12 PM Michal Hocko <mhocko@suse.com> wrote: > > On Tue 23-07-19 04:08:15, Yafang Shao wrote: > > This is the follow-up of the > > commit "mm/compaction.c: clear total_{migrate,free}_scanned before scanning a new zone". > > > > These counters are used to track activities during compacting a zone, > > and they will be set to zero before compacting a new zone in all compact > > paths. Move all these common settings into compact_zone() for better > > management. A new helper compact_zone_counters_init() is introduced for > > this purpose. > > The helper seems excessive a bit because we have a single call site but > other than that this is an improvement to the current fragile and > duplicated code. > Understood. > I would just get rid of the helper and squash it to your previous patch > which Andrew already took to the mm tree. > I appreciate it. Thanks Yafang > > Signed-off-by: Yafang Shao <laoar.shao@gmail.com> > > Cc: Michal Hocko <mhocko@suse.com> > > Cc: Mel Gorman <mgorman@techsingularity.net> > > Cc: Yafang Shao <shaoyafang@didiglobal.com> > > --- > > mm/compaction.c | 28 ++++++++++++++-------------- > > 1 file changed, 14 insertions(+), 14 deletions(-) > > > > diff --git a/mm/compaction.c b/mm/compaction.c > > index a109b45..356348b 100644 > > --- a/mm/compaction.c > > +++ b/mm/compaction.c > > @@ -2065,6 +2065,19 @@ bool compaction_zonelist_suitable(struct alloc_context *ac, int order, > > return false; > > } > > > > + > > +/* > > + * Bellow counters are used to track activities during compacting a zone. > > + * Before compacting a new zone, we should init these counters first. > > + */ > > +static void compact_zone_counters_init(struct compact_control *cc) > > +{ > > + cc->total_migrate_scanned = 0; > > + cc->total_free_scanned = 0; > > + cc->nr_migratepages = 0; > > + cc->nr_freepages = 0; > > +} > > + > > static enum compact_result > > compact_zone(struct compact_control *cc, struct capture_control *capc) > > { > > @@ -2075,6 +2088,7 @@ bool compaction_zonelist_suitable(struct alloc_context *ac, int order, > > const bool sync = cc->mode != MIGRATE_ASYNC; > > bool update_cached; > > > > + compact_zone_counters_init(cc); > > cc->migratetype = gfpflags_to_migratetype(cc->gfp_mask); > > ret = compaction_suitable(cc->zone, cc->order, cc->alloc_flags, > > cc->classzone_idx); > > @@ -2278,10 +2292,6 @@ static enum compact_result compact_zone_order(struct zone *zone, int order, > > { > > enum compact_result ret; > > struct compact_control cc = { > > - .nr_freepages = 0, > > - .nr_migratepages = 0, > > - .total_migrate_scanned = 0, > > - .total_free_scanned = 0, > > .order = order, > > .search_order = order, > > .gfp_mask = gfp_mask, > > @@ -2418,10 +2428,6 @@ static void compact_node(int nid) > > if (!populated_zone(zone)) > > continue; > > > > - cc.nr_freepages = 0; > > - cc.nr_migratepages = 0; > > - cc.total_migrate_scanned = 0; > > - cc.total_free_scanned = 0; > > cc.zone = zone; > > INIT_LIST_HEAD(&cc.freepages); > > INIT_LIST_HEAD(&cc.migratepages); > > @@ -2526,8 +2532,6 @@ static void kcompactd_do_work(pg_data_t *pgdat) > > struct compact_control cc = { > > .order = pgdat->kcompactd_max_order, > > .search_order = pgdat->kcompactd_max_order, > > - .total_migrate_scanned = 0, > > - .total_free_scanned = 0, > > .classzone_idx = pgdat->kcompactd_classzone_idx, > > .mode = MIGRATE_SYNC_LIGHT, > > .ignore_skip_hint = false, > > @@ -2551,10 +2555,6 @@ static void kcompactd_do_work(pg_data_t *pgdat) > > COMPACT_CONTINUE) > > continue; > > > > - cc.nr_freepages = 0; > > - cc.nr_migratepages = 0; > > - cc.total_migrate_scanned = 0; > > - cc.total_free_scanned = 0; > > cc.zone = zone; > > INIT_LIST_HEAD(&cc.freepages); > > INIT_LIST_HEAD(&cc.migratepages); > > -- > > 1.8.3.1 > > -- > Michal Hocko > SUSE Labs
On Tue, 23 Jul 2019 10:12:18 +0200 Michal Hocko <mhocko@suse.com> wrote: > On Tue 23-07-19 04:08:15, Yafang Shao wrote: > > This is the follow-up of the > > commit "mm/compaction.c: clear total_{migrate,free}_scanned before scanning a new zone". > > > > These counters are used to track activities during compacting a zone, > > and they will be set to zero before compacting a new zone in all compact > > paths. Move all these common settings into compact_zone() for better > > management. A new helper compact_zone_counters_init() is introduced for > > this purpose. > > The helper seems excessive a bit because we have a single call site but > other than that this is an improvement to the current fragile and > duplicated code. > > I would just get rid of the helper and squash it to your previous patch > which Andrew already took to the mm tree. --- a/mm/compaction.c~mm-compaction-clear-total_migratefree_scanned-before-scanning-a-new-zone-fix-fix +++ a/mm/compaction.c @@ -2068,19 +2068,6 @@ bool compaction_zonelist_suitable(struct return false; } - -/* - * Bellow counters are used to track activities during compacting a zone. - * Before compacting a new zone, we should init these counters first. - */ -static void compact_zone_counters_init(struct compact_control *cc) -{ - cc->total_migrate_scanned = 0; - cc->total_free_scanned = 0; - cc->nr_migratepages = 0; - cc->nr_freepages = 0; -} - static enum compact_result compact_zone(struct compact_control *cc, struct capture_control *capc) { @@ -2091,7 +2078,15 @@ compact_zone(struct compact_control *cc, const bool sync = cc->mode != MIGRATE_ASYNC; bool update_cached; - compact_zone_counters_init(cc); + /* + * These counters track activities during zone compaction. Initialize + * them before compacting a new zone. + */ + cc->total_migrate_scanned = 0; + cc->total_free_scanned = 0; + cc->nr_migratepages = 0; + cc->nr_freepages = 0; + cc->migratetype = gfpflags_to_migratetype(cc->gfp_mask); ret = compaction_suitable(cc->zone, cc->order, cc->alloc_flags, cc->classzone_idx);
On 7/23/19 11:40 PM, Andrew Morton wrote: > On Tue, 23 Jul 2019 10:12:18 +0200 Michal Hocko <mhocko@suse.com> wrote: > >> On Tue 23-07-19 04:08:15, Yafang Shao wrote: >>> This is the follow-up of the >>> commit "mm/compaction.c: clear total_{migrate,free}_scanned before scanning a new zone". >>> >>> These counters are used to track activities during compacting a zone, >>> and they will be set to zero before compacting a new zone in all compact >>> paths. Move all these common settings into compact_zone() for better >>> management. A new helper compact_zone_counters_init() is introduced for >>> this purpose. >> >> The helper seems excessive a bit because we have a single call site but >> other than that this is an improvement to the current fragile and >> duplicated code. >> >> I would just get rid of the helper and squash it to your previous patch >> which Andrew already took to the mm tree. I have squashed everything locally, and for the result: Reviewed-by: Vlastimil Babka <vbabka@suse.cz> Also, why not squash some more? ----8<---- diff --git a/mm/compaction.c b/mm/compaction.c index dcbe95fa8e28..cfe1457352f9 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -2083,6 +2083,8 @@ compact_zone(struct compact_control *cc, struct capture_control *capc) cc->total_free_scanned = 0; cc->nr_migratepages = 0; cc->nr_freepages = 0; + INIT_LIST_HEAD(&cc->freepages); + INIT_LIST_HEAD(&cc->migratepages); cc->migratetype = gfpflags_to_migratetype(cc->gfp_mask); ret = compaction_suitable(cc->zone, cc->order, cc->alloc_flags, @@ -2307,8 +2309,6 @@ static enum compact_result compact_zone_order(struct zone *zone, int order, if (capture) current->capture_control = &capc; - INIT_LIST_HEAD(&cc.freepages); - INIT_LIST_HEAD(&cc.migratepages); ret = compact_zone(&cc, &capc); @@ -2424,8 +2424,6 @@ static void compact_node(int nid) continue; cc.zone = zone; - INIT_LIST_HEAD(&cc.freepages); - INIT_LIST_HEAD(&cc.migratepages); compact_zone(&cc, NULL); @@ -2551,8 +2549,6 @@ static void kcompactd_do_work(pg_data_t *pgdat) continue; cc.zone = zone; - INIT_LIST_HEAD(&cc.freepages); - INIT_LIST_HEAD(&cc.migratepages); if (kthread_should_stop()) return;
On Wed, 24 Jul 2019 15:35:12 +0200 Vlastimil Babka <vbabka@suse.cz> wrote: > On 7/23/19 11:40 PM, Andrew Morton wrote: > > On Tue, 23 Jul 2019 10:12:18 +0200 Michal Hocko <mhocko@suse.com> wrote: > > > >> On Tue 23-07-19 04:08:15, Yafang Shao wrote: > >>> This is the follow-up of the > >>> commit "mm/compaction.c: clear total_{migrate,free}_scanned before scanning a new zone". > >>> > >>> These counters are used to track activities during compacting a zone, > >>> and they will be set to zero before compacting a new zone in all compact > >>> paths. Move all these common settings into compact_zone() for better > >>> management. A new helper compact_zone_counters_init() is introduced for > >>> this purpose. > >> > >> The helper seems excessive a bit because we have a single call site but > >> other than that this is an improvement to the current fragile and > >> duplicated code. > >> > >> I would just get rid of the helper and squash it to your previous patch > >> which Andrew already took to the mm tree. > > I have squashed everything locally, and for the result: > > Reviewed-by: Vlastimil Babka <vbabka@suse.cz> > > Also, why not squash some more? An SOB would be nice.. And this? --- a/mm/compaction.c~mm-compaction-clear-total_migratefree_scanned-before-scanning-a-new-zone-fix-2-fix +++ a/mm/compaction.c @@ -2551,10 +2551,10 @@ static void kcompactd_do_work(pg_data_t COMPACT_CONTINUE) continue; - cc.zone = zone; - if (kthread_should_stop()) return; + + cc.zone = zone; status = compact_zone(&cc, NULL); if (status == COMPACT_SUCCESS) {
On Wed, 24 Jul 2019 17:19:45 -0700 Andrew Morton <akpm@linux-foundation.org> wrote:
> And this?
Here's the current rolled-up state of this patch.
mm/compaction.c | 35 +++++++++++++----------------------
1 file changed, 13 insertions(+), 22 deletions(-)
--- a/mm/compaction.c~mm-compaction-clear-total_migratefree_scanned-before-scanning-a-new-zone
+++ a/mm/compaction.c
@@ -2078,6 +2078,17 @@ compact_zone(struct compact_control *cc,
const bool sync = cc->mode != MIGRATE_ASYNC;
bool update_cached;
+ /*
+ * These counters track activities during zone compaction. Initialize
+ * them before compacting a new zone.
+ */
+ cc->total_migrate_scanned = 0;
+ cc->total_free_scanned = 0;
+ cc->nr_migratepages = 0;
+ cc->nr_freepages = 0;
+ INIT_LIST_HEAD(&cc->freepages);
+ INIT_LIST_HEAD(&cc->migratepages);
+
cc->migratetype = gfpflags_to_migratetype(cc->gfp_mask);
ret = compaction_suitable(cc->zone, cc->order, cc->alloc_flags,
cc->classzone_idx);
@@ -2281,10 +2292,6 @@ static enum compact_result compact_zone_
{
enum compact_result ret;
struct compact_control cc = {
- .nr_freepages = 0,
- .nr_migratepages = 0,
- .total_migrate_scanned = 0,
- .total_free_scanned = 0,
.order = order,
.search_order = order,
.gfp_mask = gfp_mask,
@@ -2305,8 +2312,6 @@ static enum compact_result compact_zone_
if (capture)
current->capture_control = &capc;
- INIT_LIST_HEAD(&cc.freepages);
- INIT_LIST_HEAD(&cc.migratepages);
ret = compact_zone(&cc, &capc);
@@ -2408,8 +2413,6 @@ static void compact_node(int nid)
struct zone *zone;
struct compact_control cc = {
.order = -1,
- .total_migrate_scanned = 0,
- .total_free_scanned = 0,
.mode = MIGRATE_SYNC,
.ignore_skip_hint = true,
.whole_zone = true,
@@ -2423,11 +2426,7 @@ static void compact_node(int nid)
if (!populated_zone(zone))
continue;
- cc.nr_freepages = 0;
- cc.nr_migratepages = 0;
cc.zone = zone;
- INIT_LIST_HEAD(&cc.freepages);
- INIT_LIST_HEAD(&cc.migratepages);
compact_zone(&cc, NULL);
@@ -2529,8 +2528,6 @@ static void kcompactd_do_work(pg_data_t
struct compact_control cc = {
.order = pgdat->kcompactd_max_order,
.search_order = pgdat->kcompactd_max_order,
- .total_migrate_scanned = 0,
- .total_free_scanned = 0,
.classzone_idx = pgdat->kcompactd_classzone_idx,
.mode = MIGRATE_SYNC_LIGHT,
.ignore_skip_hint = false,
@@ -2554,16 +2551,10 @@ static void kcompactd_do_work(pg_data_t
COMPACT_CONTINUE)
continue;
- cc.nr_freepages = 0;
- cc.nr_migratepages = 0;
- cc.total_migrate_scanned = 0;
- cc.total_free_scanned = 0;
- cc.zone = zone;
- INIT_LIST_HEAD(&cc.freepages);
- INIT_LIST_HEAD(&cc.migratepages);
-
if (kthread_should_stop())
return;
+
+ cc.zone = zone;
status = compact_zone(&cc, NULL);
if (status == COMPACT_SUCCESS) {
On 7/25/19 2:25 AM, Andrew Morton wrote: > On Wed, 24 Jul 2019 17:19:45 -0700 Andrew Morton <akpm@linux-foundation.org> wrote: > >> And this? Makes sense. > Here's the current rolled-up state of this patch. Looks good, thanks.
diff --git a/mm/compaction.c b/mm/compaction.c index a109b45..356348b 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -2065,6 +2065,19 @@ bool compaction_zonelist_suitable(struct alloc_context *ac, int order, return false; } + +/* + * Bellow counters are used to track activities during compacting a zone. + * Before compacting a new zone, we should init these counters first. + */ +static void compact_zone_counters_init(struct compact_control *cc) +{ + cc->total_migrate_scanned = 0; + cc->total_free_scanned = 0; + cc->nr_migratepages = 0; + cc->nr_freepages = 0; +} + static enum compact_result compact_zone(struct compact_control *cc, struct capture_control *capc) { @@ -2075,6 +2088,7 @@ bool compaction_zonelist_suitable(struct alloc_context *ac, int order, const bool sync = cc->mode != MIGRATE_ASYNC; bool update_cached; + compact_zone_counters_init(cc); cc->migratetype = gfpflags_to_migratetype(cc->gfp_mask); ret = compaction_suitable(cc->zone, cc->order, cc->alloc_flags, cc->classzone_idx); @@ -2278,10 +2292,6 @@ static enum compact_result compact_zone_order(struct zone *zone, int order, { enum compact_result ret; struct compact_control cc = { - .nr_freepages = 0, - .nr_migratepages = 0, - .total_migrate_scanned = 0, - .total_free_scanned = 0, .order = order, .search_order = order, .gfp_mask = gfp_mask, @@ -2418,10 +2428,6 @@ static void compact_node(int nid) if (!populated_zone(zone)) continue; - cc.nr_freepages = 0; - cc.nr_migratepages = 0; - cc.total_migrate_scanned = 0; - cc.total_free_scanned = 0; cc.zone = zone; INIT_LIST_HEAD(&cc.freepages); INIT_LIST_HEAD(&cc.migratepages); @@ -2526,8 +2532,6 @@ static void kcompactd_do_work(pg_data_t *pgdat) struct compact_control cc = { .order = pgdat->kcompactd_max_order, .search_order = pgdat->kcompactd_max_order, - .total_migrate_scanned = 0, - .total_free_scanned = 0, .classzone_idx = pgdat->kcompactd_classzone_idx, .mode = MIGRATE_SYNC_LIGHT, .ignore_skip_hint = false, @@ -2551,10 +2555,6 @@ static void kcompactd_do_work(pg_data_t *pgdat) COMPACT_CONTINUE) continue; - cc.nr_freepages = 0; - cc.nr_migratepages = 0; - cc.total_migrate_scanned = 0; - cc.total_free_scanned = 0; cc.zone = zone; INIT_LIST_HEAD(&cc.freepages); INIT_LIST_HEAD(&cc.migratepages);
This is the follow-up of the commit "mm/compaction.c: clear total_{migrate,free}_scanned before scanning a new zone". These counters are used to track activities during compacting a zone, and they will be set to zero before compacting a new zone in all compact paths. Move all these common settings into compact_zone() for better management. A new helper compact_zone_counters_init() is introduced for this purpose. Signed-off-by: Yafang Shao <laoar.shao@gmail.com> Cc: Michal Hocko <mhocko@suse.com> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Yafang Shao <shaoyafang@didiglobal.com> --- mm/compaction.c | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-)