@@ -2293,27 +2293,29 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
hpage_node = first_node(pol->v.preferred_nodes);
nmask = policy_nodemask(gfp, pol);
- if (!nmask || node_isset(hpage_node, *nmask)) {
- mpol_cond_put(pol);
- /*
- * First, try to allocate THP only on local node, but
- * don't reclaim unnecessarily, just compact.
- */
- page = __alloc_pages_node(hpage_node,
- gfp | __GFP_THISNODE | __GFP_NORETRY, order);
+ mpol_cond_put(pol);
- /*
- * If hugepage allocations are configured to always
- * synchronous compact or the vma has been madvised
- * to prefer hugepage backing, retry allowing remote
- * memory with both reclaim and compact as well.
- */
- if (!page && (gfp & __GFP_DIRECT_RECLAIM))
- page = __alloc_pages_node(hpage_node,
- gfp, order);
+ /*
+ * First, try to allocate THP only on local node, but
+ * don't reclaim unnecessarily, just compact.
+ */
+ page = __alloc_pages_nodemask(gfp | __GFP_THISNODE |
+ __GFP_NORETRY,
+ order, hpage_node, nmask);
- goto out;
- }
+ /*
+ * If hugepage allocations are configured to always synchronous
+ * compact or the vma has been madvised to prefer hugepage
+ * backing, retry allowing remote memory with both reclaim and
+ * compact as well.
+ */
+ if (!page && (gfp & __GFP_DIRECT_RECLAIM))
+ page = __alloc_pages_nodemask(gfp, order, hpage_node,
+ nmask);
+
+ VM_BUG_ON(page && nmask &&
+ !node_isset(page_to_nid(page), *nmask));
+ goto out;
}
nmask = policy_nodemask(gfp, pol);
__alloc_pages_nodemask() already does the right thing for a preferred node and bind nodemask. Calling it directly allows us to simplify much of this. The handling occurs in prepare_alloc_pages() A VM assertion is added to prove correctness. Cc: Andrew Morton <akpm@linux-foundation.org> Cc: David Rientjes <rientjes@google.com> Cc: Dave Hansen <dave.hansen@linux.intel.com> Signed-off-by: Ben Widawsky <ben.widawsky@intel.com> --- mm/mempolicy.c | 40 +++++++++++++++++++++------------------- 1 file changed, 21 insertions(+), 19 deletions(-)