Message ID | 1627970362-61305-3-git-send-email-feng.tang@intel.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | Introduce multi-preference mempolicy | expand |
On Tue 03-08-21 13:59:19, Feng Tang wrote: > The semantics of MPOL_PREFERRED_MANY is similar to MPOL_PREFERRED, > that it will first try to allocate memory from the preferred node(s), > and fallback to all nodes in system when first try fails. > > Add a dedicated function alloc_pages_preferred_many() for it just > like for 'interleave' policy, which will be used by 2 general > memoory allocation APIs: alloc_pages() and alloc_pages_vma() > > Link: https://lore.kernel.org/r/20200630212517.308045-9-ben.widawsky@intel.com > Suggested-by: Michal Hocko <mhocko@suse.com> > Originally-by: Ben Widawsky <ben.widawsky@intel.com> > Co-developed-by: Ben Widawsky <ben.widawsky@intel.com> > Signed-off-by: Ben Widawsky <ben.widawsky@intel.com> > Signed-off-by: Feng Tang <feng.tang@intel.com> Acked-by: Michal Hocko <mhocko@suse.com> Thanks! > --- > mm/mempolicy.c | 30 ++++++++++++++++++++++++++++++ > 1 file changed, 30 insertions(+) > > diff --git a/mm/mempolicy.c b/mm/mempolicy.c > index 72f7ff760989..a00bb1c48a15 100644 > --- a/mm/mempolicy.c > +++ b/mm/mempolicy.c > @@ -2166,6 +2166,27 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, > return page; > } > > +static struct page *alloc_pages_preferred_many(gfp_t gfp, unsigned int order, > + int nid, struct mempolicy *pol) > +{ > + struct page *page; > + gfp_t preferred_gfp; > + > + /* > + * This is a two pass approach. The first pass will only try the > + * preferred nodes but skip the direct reclaim and allow the > + * allocation to fail, while the second pass will try all the > + * nodes in system. > + */ > + preferred_gfp = gfp | __GFP_NOWARN; > + preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL); > + page = __alloc_pages(preferred_gfp, order, nid, &pol->nodes); > + if (!page) > + page = __alloc_pages(gfp, order, numa_node_id(), NULL); > + > + return page; > +} > + > /** > * alloc_pages_vma - Allocate a page for a VMA. > * @gfp: GFP flags. > @@ -2201,6 +2222,12 @@ struct page *alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, > goto out; > } > > + if (pol->mode == MPOL_PREFERRED_MANY) { > + page = alloc_pages_preferred_many(gfp, order, node, pol); > + mpol_cond_put(pol); > + goto out; > + } > + > if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) { > int hpage_node = node; > > @@ -2278,6 +2305,9 @@ struct page *alloc_pages(gfp_t gfp, unsigned order) > */ > if (pol->mode == MPOL_INTERLEAVE) > page = alloc_page_interleave(gfp, order, interleave_nodes(pol)); > + else if (pol->mode == MPOL_PREFERRED_MANY) > + page = alloc_pages_preferred_many(gfp, order, > + numa_node_id(), pol); > else > page = __alloc_pages(gfp, order, > policy_node(gfp, pol, numa_node_id()), > -- > 2.14.1
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 72f7ff760989..a00bb1c48a15 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -2166,6 +2166,27 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, return page; } +static struct page *alloc_pages_preferred_many(gfp_t gfp, unsigned int order, + int nid, struct mempolicy *pol) +{ + struct page *page; + gfp_t preferred_gfp; + + /* + * This is a two pass approach. The first pass will only try the + * preferred nodes but skip the direct reclaim and allow the + * allocation to fail, while the second pass will try all the + * nodes in system. + */ + preferred_gfp = gfp | __GFP_NOWARN; + preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL); + page = __alloc_pages(preferred_gfp, order, nid, &pol->nodes); + if (!page) + page = __alloc_pages(gfp, order, numa_node_id(), NULL); + + return page; +} + /** * alloc_pages_vma - Allocate a page for a VMA. * @gfp: GFP flags. @@ -2201,6 +2222,12 @@ struct page *alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, goto out; } + if (pol->mode == MPOL_PREFERRED_MANY) { + page = alloc_pages_preferred_many(gfp, order, node, pol); + mpol_cond_put(pol); + goto out; + } + if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) { int hpage_node = node; @@ -2278,6 +2305,9 @@ struct page *alloc_pages(gfp_t gfp, unsigned order) */ if (pol->mode == MPOL_INTERLEAVE) page = alloc_page_interleave(gfp, order, interleave_nodes(pol)); + else if (pol->mode == MPOL_PREFERRED_MANY) + page = alloc_pages_preferred_many(gfp, order, + numa_node_id(), pol); else page = __alloc_pages(gfp, order, policy_node(gfp, pol, numa_node_id()),