Message ID | 20240208013607.1731817-1-wangkefeng.wang@huawei.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | [v2] mm: compaction: refactor compact_node() | expand |
On 2024/2/8 09:36, Kefeng Wang wrote: > Refactor compact_node() to handle both proactive and synchronous compact > memory, which cleanups code a bit. > > Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> LGTM. Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com> > --- > v2: > - drop proactive_compact_node() and add comments for compact_node() > suggested by Andrew > > mm/compaction.c | 65 ++++++++++++++++--------------------------------- > 1 file changed, 21 insertions(+), 44 deletions(-) > > diff --git a/mm/compaction.c b/mm/compaction.c > index e63a4ee7e029..de882ecb61c5 100644 > --- a/mm/compaction.c > +++ b/mm/compaction.c > @@ -2885,25 +2885,27 @@ enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order, > } > > /* > - * Compact all zones within a node till each zone's fragmentation score > - * reaches within proactive compaction thresholds (as determined by the > - * proactiveness tunable). > + * compact_node() - compact all zones within a node > + * @pgdat: The node page data > + * @proactive: Whether the compaction is proactive > * > - * It is possible that the function returns before reaching score targets > - * due to various back-off conditions, such as, contention on per-node or > - * per-zone locks. > + * For proactive compaction, compact till each zone's fragmentation score > + * reaches within proactive compaction thresholds (as determined by the > + * proactiveness tunable), it is possible that the function returns before > + * reaching score targets due to various back-off conditions, such as, > + * contention on per-node or per-zone locks. > */ > -static void proactive_compact_node(pg_data_t *pgdat) > +static void compact_node(pg_data_t *pgdat, bool proactive) > { > int zoneid; > struct zone *zone; > struct compact_control cc = { > .order = -1, > - .mode = MIGRATE_SYNC_LIGHT, > + .mode = proactive ? MIGRATE_SYNC_LIGHT : MIGRATE_SYNC, > .ignore_skip_hint = true, > .whole_zone = true, > .gfp_mask = GFP_KERNEL, > - .proactive_compaction = true, > + .proactive_compaction = proactive, > }; > > for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { > @@ -2915,41 +2917,16 @@ static void proactive_compact_node(pg_data_t *pgdat) > > compact_zone(&cc, NULL); > > - count_compact_events(KCOMPACTD_MIGRATE_SCANNED, > - cc.total_migrate_scanned); > - count_compact_events(KCOMPACTD_FREE_SCANNED, > - cc.total_free_scanned); > - } > -} > - > -/* Compact all zones within a node */ > -static void compact_node(int nid) > -{ > - pg_data_t *pgdat = NODE_DATA(nid); > - int zoneid; > - struct zone *zone; > - struct compact_control cc = { > - .order = -1, > - .mode = MIGRATE_SYNC, > - .ignore_skip_hint = true, > - .whole_zone = true, > - .gfp_mask = GFP_KERNEL, > - }; > - > - > - for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { > - > - zone = &pgdat->node_zones[zoneid]; > - if (!populated_zone(zone)) > - continue; > - > - cc.zone = zone; > - > - compact_zone(&cc, NULL); > + if (proactive) { > + count_compact_events(KCOMPACTD_MIGRATE_SCANNED, > + cc.total_migrate_scanned); > + count_compact_events(KCOMPACTD_FREE_SCANNED, > + cc.total_free_scanned); > + } > } > } > > -/* Compact all nodes in the system */ > +/* Compact all zones of all nodes in the system */ > static void compact_nodes(void) > { > int nid; > @@ -2958,7 +2935,7 @@ static void compact_nodes(void) > lru_add_drain_all(); > > for_each_online_node(nid) > - compact_node(nid); > + compact_node(NODE_DATA(nid), false); > } > > static int compaction_proactiveness_sysctl_handler(struct ctl_table *table, int write, > @@ -3020,7 +2997,7 @@ static ssize_t compact_store(struct device *dev, > /* Flush pending updates to the LRU lists */ > lru_add_drain_all(); > > - compact_node(nid); > + compact_node(NODE_DATA(nid), false); > } > > return count; > @@ -3229,7 +3206,7 @@ static int kcompactd(void *p) > unsigned int prev_score, score; > > prev_score = fragmentation_score_node(pgdat); > - proactive_compact_node(pgdat); > + compact_node(pgdat, true); > score = fragmentation_score_node(pgdat); > /* > * Defer proactive compaction if the fragmentation
diff --git a/mm/compaction.c b/mm/compaction.c index e63a4ee7e029..de882ecb61c5 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -2885,25 +2885,27 @@ enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order, } /* - * Compact all zones within a node till each zone's fragmentation score - * reaches within proactive compaction thresholds (as determined by the - * proactiveness tunable). + * compact_node() - compact all zones within a node + * @pgdat: The node page data + * @proactive: Whether the compaction is proactive * - * It is possible that the function returns before reaching score targets - * due to various back-off conditions, such as, contention on per-node or - * per-zone locks. + * For proactive compaction, compact till each zone's fragmentation score + * reaches within proactive compaction thresholds (as determined by the + * proactiveness tunable), it is possible that the function returns before + * reaching score targets due to various back-off conditions, such as, + * contention on per-node or per-zone locks. */ -static void proactive_compact_node(pg_data_t *pgdat) +static void compact_node(pg_data_t *pgdat, bool proactive) { int zoneid; struct zone *zone; struct compact_control cc = { .order = -1, - .mode = MIGRATE_SYNC_LIGHT, + .mode = proactive ? MIGRATE_SYNC_LIGHT : MIGRATE_SYNC, .ignore_skip_hint = true, .whole_zone = true, .gfp_mask = GFP_KERNEL, - .proactive_compaction = true, + .proactive_compaction = proactive, }; for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { @@ -2915,41 +2917,16 @@ static void proactive_compact_node(pg_data_t *pgdat) compact_zone(&cc, NULL); - count_compact_events(KCOMPACTD_MIGRATE_SCANNED, - cc.total_migrate_scanned); - count_compact_events(KCOMPACTD_FREE_SCANNED, - cc.total_free_scanned); - } -} - -/* Compact all zones within a node */ -static void compact_node(int nid) -{ - pg_data_t *pgdat = NODE_DATA(nid); - int zoneid; - struct zone *zone; - struct compact_control cc = { - .order = -1, - .mode = MIGRATE_SYNC, - .ignore_skip_hint = true, - .whole_zone = true, - .gfp_mask = GFP_KERNEL, - }; - - - for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { - - zone = &pgdat->node_zones[zoneid]; - if (!populated_zone(zone)) - continue; - - cc.zone = zone; - - compact_zone(&cc, NULL); + if (proactive) { + count_compact_events(KCOMPACTD_MIGRATE_SCANNED, + cc.total_migrate_scanned); + count_compact_events(KCOMPACTD_FREE_SCANNED, + cc.total_free_scanned); + } } } -/* Compact all nodes in the system */ +/* Compact all zones of all nodes in the system */ static void compact_nodes(void) { int nid; @@ -2958,7 +2935,7 @@ static void compact_nodes(void) lru_add_drain_all(); for_each_online_node(nid) - compact_node(nid); + compact_node(NODE_DATA(nid), false); } static int compaction_proactiveness_sysctl_handler(struct ctl_table *table, int write, @@ -3020,7 +2997,7 @@ static ssize_t compact_store(struct device *dev, /* Flush pending updates to the LRU lists */ lru_add_drain_all(); - compact_node(nid); + compact_node(NODE_DATA(nid), false); } return count; @@ -3229,7 +3206,7 @@ static int kcompactd(void *p) unsigned int prev_score, score; prev_score = fragmentation_score_node(pgdat); - proactive_compact_node(pgdat); + compact_node(pgdat, true); score = fragmentation_score_node(pgdat); /* * Defer proactive compaction if the fragmentation
Refactor compact_node() to handle both proactive and synchronous compact memory, which cleanups code a bit. Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> --- v2: - drop proactive_compact_node() and add comments for compact_node() suggested by Andrew mm/compaction.c | 65 ++++++++++++++++--------------------------------- 1 file changed, 21 insertions(+), 44 deletions(-)