Message ID | 20220622082513.467538-10-aneesh.kumar@linux.ibm.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | mm/demotion: Memory tiers and demotion | expand |
"Aneesh Kumar K.V" <aneesh.kumar@linux.ibm.com> writes: > From: Jagdish Gediya <jvgediya@linux.ibm.com> [...] > -static struct page *alloc_demote_page(struct page *page, unsigned long node) > +static struct page *alloc_demote_page(struct page *page, unsigned long private) > { > - struct migration_target_control mtc = { > - /* > - * Allocate from 'node', or fail quickly and quietly. > - * When this happens, 'page' will likely just be discarded > - * instead of migrated. > - */ > - .gfp_mask = (GFP_HIGHUSER_MOVABLE & ~__GFP_RECLAIM) | > - __GFP_THISNODE | __GFP_NOWARN | > - __GFP_NOMEMALLOC | GFP_NOWAIT, > - .nid = node > - }; > + struct page *target_page; > + nodemask_t *allowed_mask; > + struct migration_target_control *mtc; > + > + mtc = (struct migration_target_control *)private; > + > + allowed_mask = mtc->nmask; > + /* > + * make sure we allocate from the target node first also trying to > + * reclaim pages from the target node via kswapd if we are low on > + * free memory on target node. If we don't do this and if we have low > + * free memory on the target memtier, we would start allocating pages > + * from higher memory tiers without even forcing a demotion of cold > + * pages from the target memtier. This can result in the kernel placing > + * hotpages in higher memory tiers. > + */ > + mtc->nmask = NULL; > + mtc->gfp_mask |= __GFP_THISNODE; > + target_page = alloc_migration_target(page, (unsigned long)&mtc); I finally managed to get a system setup to start testing some of this out. However it quickly crashed due to the bad pointer in the above call - you need mtc not &mtc here. > + if (target_page) > + return target_page; > + > + mtc->gfp_mask &= ~__GFP_THISNODE; > + mtc->nmask = allowed_mask; > > return alloc_migration_target(page, (unsigned long)&mtc); And here. > } > @@ -1487,6 +1500,19 @@ static unsigned int demote_page_list(struct list_head *demote_pages, > { > int target_nid = next_demotion_node(pgdat->node_id); > unsigned int nr_succeeded; > + nodemask_t allowed_mask; > + > + struct migration_target_control mtc = { > + /* > + * Allocate from 'node', or fail quickly and quietly. > + * When this happens, 'page' will likely just be discarded > + * instead of migrated. > + */ > + .gfp_mask = (GFP_HIGHUSER_MOVABLE & ~__GFP_RECLAIM) | __GFP_NOWARN | > + __GFP_NOMEMALLOC | GFP_NOWAIT, > + .nid = target_nid, > + .nmask = &allowed_mask > + }; > > if (list_empty(demote_pages)) > return 0; > @@ -1494,10 +1520,12 @@ static unsigned int demote_page_list(struct list_head *demote_pages, > if (target_nid == NUMA_NO_NODE) > return 0; > > + node_get_allowed_targets(pgdat, &allowed_mask); > + > /* Demotion ignores all cpuset and mempolicy settings */ > migrate_pages(demote_pages, alloc_demote_page, NULL, > - target_nid, MIGRATE_ASYNC, MR_DEMOTION, > - &nr_succeeded); > + (unsigned long)&mtc, MIGRATE_ASYNC, MR_DEMOTION, > + &nr_succeeded); > > if (current_is_kswapd()) > __count_vm_events(PGDEMOTE_KSWAPD, nr_succeeded);
Alistair Popple <apopple@nvidia.com> writes: > "Aneesh Kumar K.V" <aneesh.kumar@linux.ibm.com> writes: > >> From: Jagdish Gediya <jvgediya@linux.ibm.com> > > [...] > >> -static struct page *alloc_demote_page(struct page *page, unsigned long node) >> +static struct page *alloc_demote_page(struct page *page, unsigned long private) >> { >> - struct migration_target_control mtc = { >> - /* >> - * Allocate from 'node', or fail quickly and quietly. >> - * When this happens, 'page' will likely just be discarded >> - * instead of migrated. >> - */ >> - .gfp_mask = (GFP_HIGHUSER_MOVABLE & ~__GFP_RECLAIM) | >> - __GFP_THISNODE | __GFP_NOWARN | >> - __GFP_NOMEMALLOC | GFP_NOWAIT, >> - .nid = node >> - }; >> + struct page *target_page; >> + nodemask_t *allowed_mask; >> + struct migration_target_control *mtc; >> + >> + mtc = (struct migration_target_control *)private; >> + >> + allowed_mask = mtc->nmask; >> + /* >> + * make sure we allocate from the target node first also trying to >> + * reclaim pages from the target node via kswapd if we are low on >> + * free memory on target node. If we don't do this and if we have low >> + * free memory on the target memtier, we would start allocating pages >> + * from higher memory tiers without even forcing a demotion of cold >> + * pages from the target memtier. This can result in the kernel placing >> + * hotpages in higher memory tiers. >> + */ >> + mtc->nmask = NULL; >> + mtc->gfp_mask |= __GFP_THISNODE; >> + target_page = alloc_migration_target(page, (unsigned long)&mtc); > > I finally managed to get a system setup to start testing some of this > out. However it quickly crashed due to the bad pointer in the above call > - you need mtc not &mtc here. I remember fixing that during earlier testing. I guess I missed to copy the change from test to my development. Thanks for testing this. I have now also tested the complete series with the above-suggested changes and did make sure we are indeed doing demotion by looking at /proc/vmstat:pgdemote_kswapd/pgdemote_direct > >> + if (target_page) >> + return target_page; >> + >> + mtc->gfp_mask &= ~__GFP_THISNODE; >> + mtc->nmask = allowed_mask; >> >> return alloc_migration_target(page, (unsigned long)&mtc); > > And here. > I will fold this changes in and send a v8 after waiting for review feedback from others. -aneesh
"Aneesh Kumar K.V" <aneesh.kumar@linux.ibm.com> writes: > Alistair Popple <apopple@nvidia.com> writes: > >> "Aneesh Kumar K.V" <aneesh.kumar@linux.ibm.com> writes: >> >>> From: Jagdish Gediya <jvgediya@linux.ibm.com> >> >> [...] >> >>> -static struct page *alloc_demote_page(struct page *page, unsigned long node) >>> +static struct page *alloc_demote_page(struct page *page, unsigned long private) >>> { >>> - struct migration_target_control mtc = { >>> - /* >>> - * Allocate from 'node', or fail quickly and quietly. >>> - * When this happens, 'page' will likely just be discarded >>> - * instead of migrated. >>> - */ >>> - .gfp_mask = (GFP_HIGHUSER_MOVABLE & ~__GFP_RECLAIM) | >>> - __GFP_THISNODE | __GFP_NOWARN | >>> - __GFP_NOMEMALLOC | GFP_NOWAIT, >>> - .nid = node >>> - }; >>> + struct page *target_page; >>> + nodemask_t *allowed_mask; >>> + struct migration_target_control *mtc; >>> + >>> + mtc = (struct migration_target_control *)private; >>> + >>> + allowed_mask = mtc->nmask; >>> + /* >>> + * make sure we allocate from the target node first also trying to >>> + * reclaim pages from the target node via kswapd if we are low on >>> + * free memory on target node. If we don't do this and if we have low >>> + * free memory on the target memtier, we would start allocating pages >>> + * from higher memory tiers without even forcing a demotion of cold >>> + * pages from the target memtier. This can result in the kernel placing >>> + * hotpages in higher memory tiers. >>> + */ >>> + mtc->nmask = NULL; >>> + mtc->gfp_mask |= __GFP_THISNODE; >>> + target_page = alloc_migration_target(page, (unsigned long)&mtc); >> >> I finally managed to get a system setup to start testing some of this >> out. However it quickly crashed due to the bad pointer in the above call >> - you need mtc not &mtc here. > > I remember fixing that during earlier testing. I guess I missed to copy > the change from test to my development. Thanks for testing this. I have > now also tested the complete series with the above-suggested changes and did > make sure we are indeed doing demotion by looking at > /proc/vmstat:pgdemote_kswapd/pgdemote_direct No worries. I'm still testing but the early results are looking really promising for some of our use cases so thanks for picking up this work. - Alistair >> >>> + if (target_page) >>> + return target_page; >>> + >>> + mtc->gfp_mask &= ~__GFP_THISNODE; >>> + mtc->nmask = allowed_mask; >>> >>> return alloc_migration_target(page, (unsigned long)&mtc); >> >> And here. >> > > I will fold this changes in and send a v8 after waiting for review > feedback from others. > > -aneesh
diff --git a/include/linux/memory-tiers.h b/include/linux/memory-tiers.h index 705b63ee31d5..335d21a30b2c 100644 --- a/include/linux/memory-tiers.h +++ b/include/linux/memory-tiers.h @@ -3,11 +3,12 @@ #define _LINUX_MEMORY_TIERS_H #include <linux/types.h> +#include <linux/nodemask.h> +#include <linux/mmzone.h> #ifdef CONFIG_NUMA #include <linux/device.h> -#include <linux/nodemask.h> #define MEMORY_TIER_HBM_GPU 300 #define MEMORY_TIER_DRAM 200 @@ -20,18 +21,25 @@ struct memory_tier { struct list_head list; struct device dev; nodemask_t nodelist; + nodemask_t lower_tier_mask; }; extern bool numa_demotion_enabled; int node_create_and_set_memory_tier(int node, int tier); #ifdef CONFIG_MIGRATION int next_demotion_node(int node); +void node_get_allowed_targets(pg_data_t *pgdat, nodemask_t *targets); #else static inline int next_demotion_node(int node) { return NUMA_NO_NODE; } -#endif + +static inline void node_get_allowed_targets(pg_data_t *pgdat, nodemask_t *targets) +{ + *targets = NODE_MASK_NONE; +} +#endif /* CONFIG_MIGRATION */ int node_get_memory_tier_id(int node); int node_update_memory_tier(int node, int tier); struct memory_tier *node_get_memory_tier(int node); @@ -49,5 +57,10 @@ static inline int next_demotion_node(int node) { return NUMA_NO_NODE; } + +static inline void node_get_allowed_targets(pg_data_t *pgdat, nodemask_t *targets) +{ + *targets = NODE_MASK_NONE; +} #endif /* CONFIG_NUMA */ #endif /* _LINUX_MEMORY_TIERS_H */ diff --git a/mm/memory-tiers.c b/mm/memory-tiers.c index 6a2476faf13a..aecce987df7c 100644 --- a/mm/memory-tiers.c +++ b/mm/memory-tiers.c @@ -374,6 +374,24 @@ void node_put_memory_tier(struct memory_tier *memtier) } #ifdef CONFIG_MIGRATION +void node_get_allowed_targets(pg_data_t *pgdat, nodemask_t *targets) +{ + struct memory_tier *memtier; + + /* + * pg_data_t.memtier updates includes a synchronize_rcu() + * which ensures that we either find NULL or a valid memtier + * in NODE_DATA. protect the access via rcu_read_lock(); + */ + rcu_read_lock(); + memtier = rcu_dereference(pgdat->memtier); + if (memtier) + *targets = memtier->lower_tier_mask; + else + *targets = NODE_MASK_NONE; + rcu_read_unlock(); +} + /** * next_demotion_node() - Get the next node in the demotion path * @node: The starting node to lookup the next node @@ -422,10 +440,19 @@ int next_demotion_node(int node) /* Disable reclaim-based migration. */ static void __disable_all_migrate_targets(void) { + struct memory_tier *memtier; int node; - for_each_node_state(node, N_MEMORY) + for_each_node_state(node, N_MEMORY) { node_demotion[node].preferred = NODE_MASK_NONE; + /* + * We are holding memory_tier_lock, it is safe + * to access pgda->memtier. + */ + memtier = rcu_dereference_check(NODE_DATA(node)->memtier, + lockdep_is_held(&memory_tier_lock)); + memtier->lower_tier_mask = NODE_MASK_NONE; + } } static void disable_all_migrate_targets(void) @@ -455,10 +482,26 @@ static void establish_migration_targets(void) struct demotion_nodes *nd; int target = NUMA_NO_NODE, node; int distance, best_distance; - nodemask_t used; - - if (!node_demotion || !IS_ENABLED(CONFIG_MIGRATION)) - return; + nodemask_t used, lower_tier = NODE_MASK_NONE; + + if (!node_demotion || !IS_ENABLED(CONFIG_MIGRATION)) { + + for_each_node_state(node, N_MEMORY) { + /* + * We are holding memory_tier_lock, it is safe + * to access pgda->memtier. + */ + memtier = rcu_dereference_check(NODE_DATA(node)->memtier, + lockdep_is_held(&memory_tier_lock)); + memtier->lower_tier_mask = NODE_MASK_NONE; + } + /* + * Wait for read side to work with old values + * or see the updated NODE_MASK_NONE; + */ + synchronize_rcu(); + goto build_lower_tier_mask; + } disable_all_migrate_targets(); @@ -501,6 +544,29 @@ static void establish_migration_targets(void) } } while (1); } +build_lower_tier_mask: + /* + * Now build the lower_tier mask for each node collecting node mask from + * all memory tier below it. This allows us to fallback demotion page + * allocation to a set of nodes that is closer the above selected + * perferred node. + */ + list_for_each_entry(memtier, &memory_tiers, list) + nodes_or(lower_tier, lower_tier, memtier->nodelist); + /* + * Removes nodes not yet in N_MEMORY. + */ + nodes_and(lower_tier, node_states[N_MEMORY], lower_tier); + + list_for_each_entry(memtier, &memory_tiers, list) { + /* + * Keep removing current tier from lower_tier nodes, + * This will remove all nodes in current and above + * memory tier from the lower_tier mask. + */ + nodes_andnot(lower_tier, lower_tier, memtier->nodelist); + memtier->lower_tier_mask = lower_tier; + } } static unsigned int default_memtier = DEFAULT_MEMORY_TIER; diff --git a/mm/vmscan.c b/mm/vmscan.c index 3a8f78277f99..2b213248effa 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1460,19 +1460,32 @@ static void folio_check_dirty_writeback(struct folio *folio, mapping->a_ops->is_dirty_writeback(folio, dirty, writeback); } -static struct page *alloc_demote_page(struct page *page, unsigned long node) +static struct page *alloc_demote_page(struct page *page, unsigned long private) { - struct migration_target_control mtc = { - /* - * Allocate from 'node', or fail quickly and quietly. - * When this happens, 'page' will likely just be discarded - * instead of migrated. - */ - .gfp_mask = (GFP_HIGHUSER_MOVABLE & ~__GFP_RECLAIM) | - __GFP_THISNODE | __GFP_NOWARN | - __GFP_NOMEMALLOC | GFP_NOWAIT, - .nid = node - }; + struct page *target_page; + nodemask_t *allowed_mask; + struct migration_target_control *mtc; + + mtc = (struct migration_target_control *)private; + + allowed_mask = mtc->nmask; + /* + * make sure we allocate from the target node first also trying to + * reclaim pages from the target node via kswapd if we are low on + * free memory on target node. If we don't do this and if we have low + * free memory on the target memtier, we would start allocating pages + * from higher memory tiers without even forcing a demotion of cold + * pages from the target memtier. This can result in the kernel placing + * hotpages in higher memory tiers. + */ + mtc->nmask = NULL; + mtc->gfp_mask |= __GFP_THISNODE; + target_page = alloc_migration_target(page, (unsigned long)&mtc); + if (target_page) + return target_page; + + mtc->gfp_mask &= ~__GFP_THISNODE; + mtc->nmask = allowed_mask; return alloc_migration_target(page, (unsigned long)&mtc); } @@ -1487,6 +1500,19 @@ static unsigned int demote_page_list(struct list_head *demote_pages, { int target_nid = next_demotion_node(pgdat->node_id); unsigned int nr_succeeded; + nodemask_t allowed_mask; + + struct migration_target_control mtc = { + /* + * Allocate from 'node', or fail quickly and quietly. + * When this happens, 'page' will likely just be discarded + * instead of migrated. + */ + .gfp_mask = (GFP_HIGHUSER_MOVABLE & ~__GFP_RECLAIM) | __GFP_NOWARN | + __GFP_NOMEMALLOC | GFP_NOWAIT, + .nid = target_nid, + .nmask = &allowed_mask + }; if (list_empty(demote_pages)) return 0; @@ -1494,10 +1520,12 @@ static unsigned int demote_page_list(struct list_head *demote_pages, if (target_nid == NUMA_NO_NODE) return 0; + node_get_allowed_targets(pgdat, &allowed_mask); + /* Demotion ignores all cpuset and mempolicy settings */ migrate_pages(demote_pages, alloc_demote_page, NULL, - target_nid, MIGRATE_ASYNC, MR_DEMOTION, - &nr_succeeded); + (unsigned long)&mtc, MIGRATE_ASYNC, MR_DEMOTION, + &nr_succeeded); if (current_is_kswapd()) __count_vm_events(PGDEMOTE_KSWAPD, nr_succeeded);