Message ID | 20240729091717.464-1-shivankg@amd.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | mm: improve code consistency with zonelist_* helper functions | expand |
On 29.07.24 11:17, Shivank Garg wrote: > From: Wei Yang <richard.weiyang@gmail.com> > > Replace direct access to zoneref->zone, zoneref->zone_idx, or > zone_to_nid(zoneref->zone) with the corresponding zonelist_* > helper functions for consistency. > > No functional change. > > Co-developed-by: Shivank Garg <shivankg@amd.com> > Signed-off-by: Shivank Garg <shivankg@amd.com> > Signed-off-by: Wei Yang <richard.weiyang@gmail.com> > > CC: Mike Rapoport (IBM) <rppt@kernel.org> > CC: David Hildenbrand <david@redhat.com> > --- > > Hi Andrew, > > I've rebased the patch on top of 6.11-rc1. > > Thanks, > Shivank Acked-by: David Hildenbrand <david@redhat.com>
On Mon, Jul 29, 2024 at 02:47:17PM +0530, Shivank Garg wrote: >From: Wei Yang <richard.weiyang@gmail.com> > >Replace direct access to zoneref->zone, zoneref->zone_idx, or >zone_to_nid(zoneref->zone) with the corresponding zonelist_* >helper functions for consistency. > >No functional change. > >Co-developed-by: Shivank Garg <shivankg@amd.com> >Signed-off-by: Shivank Garg <shivankg@amd.com> >Signed-off-by: Wei Yang <richard.weiyang@gmail.com> > >CC: Mike Rapoport (IBM) <rppt@kernel.org> >CC: David Hildenbrand <david@redhat.com> >--- > >Hi Andrew, > >I've rebased the patch on top of 6.11-rc1. Thanks, looks good to me. > >Thanks, >Shivank > > > include/linux/mmzone.h | 4 ++-- > include/trace/events/oom.h | 4 ++-- > mm/mempolicy.c | 4 ++-- > mm/mmzone.c | 2 +- > mm/page_alloc.c | 22 +++++++++++----------- > 5 files changed, 18 insertions(+), 18 deletions(-) > >diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h >index 41458892bc8a..9f389c76581f 100644 >--- a/include/linux/mmzone.h >+++ b/include/linux/mmzone.h >@@ -1690,7 +1690,7 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, > zone = zonelist_zone(z)) > > #define for_next_zone_zonelist_nodemask(zone, z, highidx, nodemask) \ >- for (zone = z->zone; \ >+ for (zone = zonelist_zone(z); \ > zone; \ > z = next_zones_zonelist(++z, highidx, nodemask), \ > zone = zonelist_zone(z)) >@@ -1726,7 +1726,7 @@ static inline bool movable_only_nodes(nodemask_t *nodes) > nid = first_node(*nodes); > zonelist = &NODE_DATA(nid)->node_zonelists[ZONELIST_FALLBACK]; > z = first_zones_zonelist(zonelist, ZONE_NORMAL, nodes); >- return (!z->zone) ? true : false; >+ return (!zonelist_zone(z)) ? true : false; > } > > >diff --git a/include/trace/events/oom.h b/include/trace/events/oom.h >index a42be4c8563b..9f0a5d1482c4 100644 >--- a/include/trace/events/oom.h >+++ b/include/trace/events/oom.h >@@ -55,8 +55,8 @@ TRACE_EVENT(reclaim_retry_zone, > ), > > TP_fast_assign( >- __entry->node = zone_to_nid(zoneref->zone); >- __entry->zone_idx = zoneref->zone_idx; >+ __entry->node = zonelist_node_idx(zoneref); >+ __entry->zone_idx = zonelist_zone_idx(zoneref); > __entry->order = order; > __entry->reclaimable = reclaimable; > __entry->available = available; >diff --git a/mm/mempolicy.c b/mm/mempolicy.c >index b858e22b259d..b3b5f376471f 100644 >--- a/mm/mempolicy.c >+++ b/mm/mempolicy.c >@@ -1951,7 +1951,7 @@ unsigned int mempolicy_slab_node(void) > zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK]; > z = first_zones_zonelist(zonelist, highest_zoneidx, > &policy->nodes); >- return z->zone ? zone_to_nid(z->zone) : node; >+ return zonelist_zone(z) ? zonelist_node_idx(z) : node; > } > case MPOL_LOCAL: > return node; >@@ -2809,7 +2809,7 @@ int mpol_misplaced(struct folio *folio, struct vm_fault *vmf, > node_zonelist(thisnid, GFP_HIGHUSER), > gfp_zone(GFP_HIGHUSER), > &pol->nodes); >- polnid = zone_to_nid(z->zone); >+ polnid = zonelist_node_idx(z); > break; > > default: >diff --git a/mm/mmzone.c b/mm/mmzone.c >index c01896eca736..f9baa8882fbf 100644 >--- a/mm/mmzone.c >+++ b/mm/mmzone.c >@@ -66,7 +66,7 @@ struct zoneref *__next_zones_zonelist(struct zoneref *z, > z++; > else > while (zonelist_zone_idx(z) > highest_zoneidx || >- (z->zone && !zref_in_nodemask(z, nodes))) >+ (zonelist_zone(z) && !zref_in_nodemask(z, nodes))) > z++; > > return z; >diff --git a/mm/page_alloc.c b/mm/page_alloc.c >index 28f80daf5c04..94e3aa1e145d 100644 >--- a/mm/page_alloc.c >+++ b/mm/page_alloc.c >@@ -3353,7 +3353,7 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, > } > > if (no_fallback && nr_online_nodes > 1 && >- zone != ac->preferred_zoneref->zone) { >+ zone != zonelist_zone(ac->preferred_zoneref)) { > int local_nid; > > /* >@@ -3361,7 +3361,7 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, > * fragmenting fallbacks. Locality is more important > * than fragmentation avoidance. > */ >- local_nid = zone_to_nid(ac->preferred_zoneref->zone); >+ local_nid = zonelist_node_idx(ac->preferred_zoneref); > if (zone_to_nid(zone) != local_nid) { > alloc_flags &= ~ALLOC_NOFRAGMENT; > goto retry; >@@ -3414,7 +3414,7 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, > goto try_this_zone; > > if (!node_reclaim_enabled() || >- !zone_allows_reclaim(ac->preferred_zoneref->zone, zone)) >+ !zone_allows_reclaim(zonelist_zone(ac->preferred_zoneref), zone)) > continue; > > ret = node_reclaim(zone->zone_pgdat, gfp_mask, order); >@@ -3436,7 +3436,7 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, > } > > try_this_zone: >- page = rmqueue(ac->preferred_zoneref->zone, zone, order, >+ page = rmqueue(zonelist_zone(ac->preferred_zoneref), zone, order, > gfp_mask, alloc_flags, ac->migratetype); > if (page) { > prep_new_page(page, order, gfp_mask, alloc_flags); >@@ -4207,7 +4207,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, > */ > ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, > ac->highest_zoneidx, ac->nodemask); >- if (!ac->preferred_zoneref->zone) >+ if (!zonelist_zone(ac->preferred_zoneref)) > goto nopage; > > /* >@@ -4219,7 +4219,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, > struct zoneref *z = first_zones_zonelist(ac->zonelist, > ac->highest_zoneidx, > &cpuset_current_mems_allowed); >- if (!z->zone) >+ if (!zonelist_zone(z)) > goto nopage; > } > >@@ -4576,8 +4576,8 @@ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid, > continue; > } > >- if (nr_online_nodes > 1 && zone != ac.preferred_zoneref->zone && >- zone_to_nid(zone) != zone_to_nid(ac.preferred_zoneref->zone)) { >+ if (nr_online_nodes > 1 && zone != zonelist_zone(ac.preferred_zoneref) && >+ zone_to_nid(zone) != zonelist_node_idx(ac.preferred_zoneref)) { > goto failed; > } > >@@ -4636,7 +4636,7 @@ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid, > pcp_trylock_finish(UP_flags); > > __count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account); >- zone_statistics(ac.preferred_zoneref->zone, zone, nr_account); >+ zone_statistics(zonelist_zone(ac.preferred_zoneref), zone, nr_account); > > out: > return nr_populated; >@@ -4694,7 +4694,7 @@ struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order, > * Forbid the first pass from falling back to types that fragment > * memory until all local zones are considered. > */ >- alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp); >+ alloc_flags |= alloc_flags_nofragment(zonelist_zone(ac.preferred_zoneref), gfp); > > /* First allocation attempt */ > page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac); >@@ -5299,7 +5299,7 @@ int local_memory_node(int node) > z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL), > gfp_zone(GFP_KERNEL), > NULL); >- return zone_to_nid(z->zone); >+ return zonelist_node_idx(z); > } > #endif > >-- >2.34.1
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 41458892bc8a..9f389c76581f 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -1690,7 +1690,7 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, zone = zonelist_zone(z)) #define for_next_zone_zonelist_nodemask(zone, z, highidx, nodemask) \ - for (zone = z->zone; \ + for (zone = zonelist_zone(z); \ zone; \ z = next_zones_zonelist(++z, highidx, nodemask), \ zone = zonelist_zone(z)) @@ -1726,7 +1726,7 @@ static inline bool movable_only_nodes(nodemask_t *nodes) nid = first_node(*nodes); zonelist = &NODE_DATA(nid)->node_zonelists[ZONELIST_FALLBACK]; z = first_zones_zonelist(zonelist, ZONE_NORMAL, nodes); - return (!z->zone) ? true : false; + return (!zonelist_zone(z)) ? true : false; } diff --git a/include/trace/events/oom.h b/include/trace/events/oom.h index a42be4c8563b..9f0a5d1482c4 100644 --- a/include/trace/events/oom.h +++ b/include/trace/events/oom.h @@ -55,8 +55,8 @@ TRACE_EVENT(reclaim_retry_zone, ), TP_fast_assign( - __entry->node = zone_to_nid(zoneref->zone); - __entry->zone_idx = zoneref->zone_idx; + __entry->node = zonelist_node_idx(zoneref); + __entry->zone_idx = zonelist_zone_idx(zoneref); __entry->order = order; __entry->reclaimable = reclaimable; __entry->available = available; diff --git a/mm/mempolicy.c b/mm/mempolicy.c index b858e22b259d..b3b5f376471f 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1951,7 +1951,7 @@ unsigned int mempolicy_slab_node(void) zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK]; z = first_zones_zonelist(zonelist, highest_zoneidx, &policy->nodes); - return z->zone ? zone_to_nid(z->zone) : node; + return zonelist_zone(z) ? zonelist_node_idx(z) : node; } case MPOL_LOCAL: return node; @@ -2809,7 +2809,7 @@ int mpol_misplaced(struct folio *folio, struct vm_fault *vmf, node_zonelist(thisnid, GFP_HIGHUSER), gfp_zone(GFP_HIGHUSER), &pol->nodes); - polnid = zone_to_nid(z->zone); + polnid = zonelist_node_idx(z); break; default: diff --git a/mm/mmzone.c b/mm/mmzone.c index c01896eca736..f9baa8882fbf 100644 --- a/mm/mmzone.c +++ b/mm/mmzone.c @@ -66,7 +66,7 @@ struct zoneref *__next_zones_zonelist(struct zoneref *z, z++; else while (zonelist_zone_idx(z) > highest_zoneidx || - (z->zone && !zref_in_nodemask(z, nodes))) + (zonelist_zone(z) && !zref_in_nodemask(z, nodes))) z++; return z; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 28f80daf5c04..94e3aa1e145d 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3353,7 +3353,7 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, } if (no_fallback && nr_online_nodes > 1 && - zone != ac->preferred_zoneref->zone) { + zone != zonelist_zone(ac->preferred_zoneref)) { int local_nid; /* @@ -3361,7 +3361,7 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, * fragmenting fallbacks. Locality is more important * than fragmentation avoidance. */ - local_nid = zone_to_nid(ac->preferred_zoneref->zone); + local_nid = zonelist_node_idx(ac->preferred_zoneref); if (zone_to_nid(zone) != local_nid) { alloc_flags &= ~ALLOC_NOFRAGMENT; goto retry; @@ -3414,7 +3414,7 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, goto try_this_zone; if (!node_reclaim_enabled() || - !zone_allows_reclaim(ac->preferred_zoneref->zone, zone)) + !zone_allows_reclaim(zonelist_zone(ac->preferred_zoneref), zone)) continue; ret = node_reclaim(zone->zone_pgdat, gfp_mask, order); @@ -3436,7 +3436,7 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, } try_this_zone: - page = rmqueue(ac->preferred_zoneref->zone, zone, order, + page = rmqueue(zonelist_zone(ac->preferred_zoneref), zone, order, gfp_mask, alloc_flags, ac->migratetype); if (page) { prep_new_page(page, order, gfp_mask, alloc_flags); @@ -4207,7 +4207,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, */ ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, ac->highest_zoneidx, ac->nodemask); - if (!ac->preferred_zoneref->zone) + if (!zonelist_zone(ac->preferred_zoneref)) goto nopage; /* @@ -4219,7 +4219,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, struct zoneref *z = first_zones_zonelist(ac->zonelist, ac->highest_zoneidx, &cpuset_current_mems_allowed); - if (!z->zone) + if (!zonelist_zone(z)) goto nopage; } @@ -4576,8 +4576,8 @@ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid, continue; } - if (nr_online_nodes > 1 && zone != ac.preferred_zoneref->zone && - zone_to_nid(zone) != zone_to_nid(ac.preferred_zoneref->zone)) { + if (nr_online_nodes > 1 && zone != zonelist_zone(ac.preferred_zoneref) && + zone_to_nid(zone) != zonelist_node_idx(ac.preferred_zoneref)) { goto failed; } @@ -4636,7 +4636,7 @@ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid, pcp_trylock_finish(UP_flags); __count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account); - zone_statistics(ac.preferred_zoneref->zone, zone, nr_account); + zone_statistics(zonelist_zone(ac.preferred_zoneref), zone, nr_account); out: return nr_populated; @@ -4694,7 +4694,7 @@ struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order, * Forbid the first pass from falling back to types that fragment * memory until all local zones are considered. */ - alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp); + alloc_flags |= alloc_flags_nofragment(zonelist_zone(ac.preferred_zoneref), gfp); /* First allocation attempt */ page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac); @@ -5299,7 +5299,7 @@ int local_memory_node(int node) z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL), gfp_zone(GFP_KERNEL), NULL); - return zone_to_nid(z->zone); + return zonelist_node_idx(z); } #endif