diff mbox series

[18/21] mm/cma: support CMA in Designated Movable Blocks

Message ID 20220913195508.3511038-19-opendmb@gmail.com (mailing list archive)
State New
Headers show
Series mm: introduce Designated Movable Blocks | expand

Commit Message

Doug Berger Sept. 13, 2022, 7:55 p.m. UTC
This commit allows for different page allocator handling for
CMA areas that are within Designated Movable Blocks.

Specifically, the pageblocks are allowed to remain migratetype
MIGRATE_MOVABLE to allow more aggressive utilization by the
page allocator. This also means that the page allocator should
not consider these pages as part of the nr_free_cma metric it
uses for managing MIGRATE_CMA type pageblocks.

This leads to the decision to remove these areas from the
CmaTotal metrics after initialization to avoid confusion.

Signed-off-by: Doug Berger <opendmb@gmail.com>
---
 include/linux/cma.h | 13 ++++++---
 mm/cma.c            | 55 +++++++++++++++++++++++++-----------
 mm/page_alloc.c     | 69 +++++++++++++++++++++++++++++----------------
 3 files changed, 92 insertions(+), 45 deletions(-)

Comments

kernel test robot Sept. 14, 2022, 5:07 p.m. UTC | #1
Hi Doug,

I love your patch! Perhaps something to improve:

[auto build test WARNING on robh/for-next]
[also build test WARNING on linus/master v6.0-rc5]
[cannot apply to akpm-mm/mm-everything next-20220914]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url:    https://github.com/intel-lab-lkp/linux/commits/Doug-Berger/mm-introduce-Designated-Movable-Blocks/20220914-040216
base:   https://git.kernel.org/pub/scm/linux/kernel/git/robh/linux.git for-next
config: i386-randconfig-a002 (https://download.01.org/0day-ci/archive/20220915/202209150009.PoWlLoNu-lkp@intel.com/config)
compiler: clang version 14.0.6 (https://github.com/llvm/llvm-project f28c006a5895fc0e329fe15fead81e37457cb1d1)
reproduce (this is a W=1 build):
        wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        # https://github.com/intel-lab-lkp/linux/commit/635e919c92ca242c4b900bdfc7e21529e76f2f8e
        git remote add linux-review https://github.com/intel-lab-lkp/linux
        git fetch --no-tags linux-review Doug-Berger/mm-introduce-Designated-Movable-Blocks/20220914-040216
        git checkout 635e919c92ca242c4b900bdfc7e21529e76f2f8e
        # save the config file
        mkdir build_dir && cp config build_dir/.config
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross W=1 O=build_dir ARCH=i386 SHELL=/bin/bash

If you fix the issue, kindly add following tag where applicable
Reported-by: kernel test robot <lkp@intel.com>

All warnings (new ones prefixed by >>):

>> mm/page_alloc.c:9236:5: warning: no previous prototype for function '_alloc_contig_range' [-Wmissing-prototypes]
   int _alloc_contig_range(unsigned long start, unsigned long end,
       ^
   mm/page_alloc.c:9236:1: note: declare 'static' if the function is not intended to be used outside of this translation unit
   int _alloc_contig_range(unsigned long start, unsigned long end,
   ^
   static 
   1 warning generated.


vim +/_alloc_contig_range +9236 mm/page_alloc.c

  9235	
> 9236	int _alloc_contig_range(unsigned long start, unsigned long end,
  9237				unsigned int migratetype, gfp_t gfp_mask)
  9238	{
  9239		unsigned long outer_start, outer_end;
  9240		int order;
  9241		int ret = 0;
  9242	
  9243		struct compact_control cc = {
  9244			.nr_migratepages = 0,
  9245			.order = -1,
  9246			.zone = page_zone(pfn_to_page(start)),
  9247			.mode = MIGRATE_SYNC,
  9248			.ignore_skip_hint = true,
  9249			.no_set_skip_hint = true,
  9250			.gfp_mask = current_gfp_context(gfp_mask),
  9251			.alloc_contig = true,
  9252		};
  9253		INIT_LIST_HEAD(&cc.migratepages);
  9254	
  9255		/*
  9256		 * What we do here is we mark all pageblocks in range as
  9257		 * MIGRATE_ISOLATE.  Because pageblock and max order pages may
  9258		 * have different sizes, and due to the way page allocator
  9259		 * work, start_isolate_page_range() has special handlings for this.
  9260		 *
  9261		 * Once the pageblocks are marked as MIGRATE_ISOLATE, we
  9262		 * migrate the pages from an unaligned range (ie. pages that
  9263		 * we are interested in). This will put all the pages in
  9264		 * range back to page allocator as MIGRATE_ISOLATE.
  9265		 *
  9266		 * When this is done, we take the pages in range from page
  9267		 * allocator removing them from the buddy system.  This way
  9268		 * page allocator will never consider using them.
  9269		 *
  9270		 * This lets us mark the pageblocks back as
  9271		 * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the
  9272		 * aligned range but not in the unaligned, original range are
  9273		 * put back to page allocator so that buddy can use them.
  9274		 */
  9275	
  9276		ret = start_isolate_page_range(start, end, migratetype, 0, gfp_mask);
  9277		if (ret)
  9278			goto done;
  9279	
  9280		drain_all_pages(cc.zone);
  9281	
  9282		/*
  9283		 * In case of -EBUSY, we'd like to know which page causes problem.
  9284		 * So, just fall through. test_pages_isolated() has a tracepoint
  9285		 * which will report the busy page.
  9286		 *
  9287		 * It is possible that busy pages could become available before
  9288		 * the call to test_pages_isolated, and the range will actually be
  9289		 * allocated.  So, if we fall through be sure to clear ret so that
  9290		 * -EBUSY is not accidentally used or returned to caller.
  9291		 */
  9292		ret = __alloc_contig_migrate_range(&cc, start, end);
  9293		if (ret && ret != -EBUSY)
  9294			goto done;
  9295		ret = 0;
  9296		sync_hugetlb_dissolve();
  9297	
  9298		/*
  9299		 * Pages from [start, end) are within a pageblock_nr_pages
  9300		 * aligned blocks that are marked as MIGRATE_ISOLATE.  What's
  9301		 * more, all pages in [start, end) are free in page allocator.
  9302		 * What we are going to do is to allocate all pages from
  9303		 * [start, end) (that is remove them from page allocator).
  9304		 *
  9305		 * The only problem is that pages at the beginning and at the
  9306		 * end of interesting range may be not aligned with pages that
  9307		 * page allocator holds, ie. they can be part of higher order
  9308		 * pages.  Because of this, we reserve the bigger range and
  9309		 * once this is done free the pages we are not interested in.
  9310		 *
  9311		 * We don't have to hold zone->lock here because the pages are
  9312		 * isolated thus they won't get removed from buddy.
  9313		 */
  9314	
  9315		order = 0;
  9316		outer_start = start;
  9317		while (!PageBuddy(pfn_to_page(outer_start))) {
  9318			if (++order >= MAX_ORDER) {
  9319				outer_start = start;
  9320				break;
  9321			}
  9322			outer_start &= ~0UL << order;
  9323		}
  9324	
  9325		if (outer_start != start) {
  9326			order = buddy_order(pfn_to_page(outer_start));
  9327	
  9328			/*
  9329			 * outer_start page could be small order buddy page and
  9330			 * it doesn't include start page. Adjust outer_start
  9331			 * in this case to report failed page properly
  9332			 * on tracepoint in test_pages_isolated()
  9333			 */
  9334			if (outer_start + (1UL << order) <= start)
  9335				outer_start = start;
  9336		}
  9337	
  9338		/* Make sure the range is really isolated. */
  9339		if (test_pages_isolated(outer_start, end, 0)) {
  9340			ret = -EBUSY;
  9341			goto done;
  9342		}
  9343	
  9344		/* Grab isolated pages from freelists. */
  9345		outer_end = isolate_freepages_range(&cc, outer_start, end);
  9346		if (!outer_end) {
  9347			ret = -EBUSY;
  9348			goto done;
  9349		}
  9350	
  9351		/* Free head and tail (if any) */
  9352		if (start != outer_start)
  9353			free_contig_range(outer_start, start - outer_start);
  9354		if (end != outer_end)
  9355			free_contig_range(end, outer_end - end);
  9356	
  9357	done:
  9358		undo_isolate_page_range(start, end, migratetype);
  9359		return ret;
  9360	}
  9361
kernel test robot Sept. 14, 2022, 5:58 p.m. UTC | #2
Hi Doug,

I love your patch! Perhaps something to improve:

[auto build test WARNING on robh/for-next]
[also build test WARNING on linus/master v6.0-rc5]
[cannot apply to akpm-mm/mm-everything next-20220914]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url:    https://github.com/intel-lab-lkp/linux/commits/Doug-Berger/mm-introduce-Designated-Movable-Blocks/20220914-040216
base:   https://git.kernel.org/pub/scm/linux/kernel/git/robh/linux.git for-next
config: i386-randconfig-a001
compiler: gcc-11 (Debian 11.3.0-5) 11.3.0
reproduce (this is a W=1 build):
        # https://github.com/intel-lab-lkp/linux/commit/635e919c92ca242c4b900bdfc7e21529e76f2f8e
        git remote add linux-review https://github.com/intel-lab-lkp/linux
        git fetch --no-tags linux-review Doug-Berger/mm-introduce-Designated-Movable-Blocks/20220914-040216
        git checkout 635e919c92ca242c4b900bdfc7e21529e76f2f8e
        # save the config file
        mkdir build_dir && cp config build_dir/.config
        make W=1 O=build_dir ARCH=i386 SHELL=/bin/bash

If you fix the issue, kindly add following tag where applicable
Reported-by: kernel test robot <lkp@intel.com>

All warnings (new ones prefixed by >>):

>> mm/page_alloc.c:9236:5: warning: no previous prototype for '_alloc_contig_range' [-Wmissing-prototypes]
    9236 | int _alloc_contig_range(unsigned long start, unsigned long end,
         |     ^~~~~~~~~~~~~~~~~~~


vim +/_alloc_contig_range +9236 mm/page_alloc.c

  9235	
> 9236	int _alloc_contig_range(unsigned long start, unsigned long end,
  9237				unsigned int migratetype, gfp_t gfp_mask)
  9238	{
  9239		unsigned long outer_start, outer_end;
  9240		int order;
  9241		int ret = 0;
  9242	
  9243		struct compact_control cc = {
  9244			.nr_migratepages = 0,
  9245			.order = -1,
  9246			.zone = page_zone(pfn_to_page(start)),
  9247			.mode = MIGRATE_SYNC,
  9248			.ignore_skip_hint = true,
  9249			.no_set_skip_hint = true,
  9250			.gfp_mask = current_gfp_context(gfp_mask),
  9251			.alloc_contig = true,
  9252		};
  9253		INIT_LIST_HEAD(&cc.migratepages);
  9254	
  9255		/*
  9256		 * What we do here is we mark all pageblocks in range as
  9257		 * MIGRATE_ISOLATE.  Because pageblock and max order pages may
  9258		 * have different sizes, and due to the way page allocator
  9259		 * work, start_isolate_page_range() has special handlings for this.
  9260		 *
  9261		 * Once the pageblocks are marked as MIGRATE_ISOLATE, we
  9262		 * migrate the pages from an unaligned range (ie. pages that
  9263		 * we are interested in). This will put all the pages in
  9264		 * range back to page allocator as MIGRATE_ISOLATE.
  9265		 *
  9266		 * When this is done, we take the pages in range from page
  9267		 * allocator removing them from the buddy system.  This way
  9268		 * page allocator will never consider using them.
  9269		 *
  9270		 * This lets us mark the pageblocks back as
  9271		 * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the
  9272		 * aligned range but not in the unaligned, original range are
  9273		 * put back to page allocator so that buddy can use them.
  9274		 */
  9275	
  9276		ret = start_isolate_page_range(start, end, migratetype, 0, gfp_mask);
  9277		if (ret)
  9278			goto done;
  9279	
  9280		drain_all_pages(cc.zone);
  9281	
  9282		/*
  9283		 * In case of -EBUSY, we'd like to know which page causes problem.
  9284		 * So, just fall through. test_pages_isolated() has a tracepoint
  9285		 * which will report the busy page.
  9286		 *
  9287		 * It is possible that busy pages could become available before
  9288		 * the call to test_pages_isolated, and the range will actually be
  9289		 * allocated.  So, if we fall through be sure to clear ret so that
  9290		 * -EBUSY is not accidentally used or returned to caller.
  9291		 */
  9292		ret = __alloc_contig_migrate_range(&cc, start, end);
  9293		if (ret && ret != -EBUSY)
  9294			goto done;
  9295		ret = 0;
  9296		sync_hugetlb_dissolve();
  9297	
  9298		/*
  9299		 * Pages from [start, end) are within a pageblock_nr_pages
  9300		 * aligned blocks that are marked as MIGRATE_ISOLATE.  What's
  9301		 * more, all pages in [start, end) are free in page allocator.
  9302		 * What we are going to do is to allocate all pages from
  9303		 * [start, end) (that is remove them from page allocator).
  9304		 *
  9305		 * The only problem is that pages at the beginning and at the
  9306		 * end of interesting range may be not aligned with pages that
  9307		 * page allocator holds, ie. they can be part of higher order
  9308		 * pages.  Because of this, we reserve the bigger range and
  9309		 * once this is done free the pages we are not interested in.
  9310		 *
  9311		 * We don't have to hold zone->lock here because the pages are
  9312		 * isolated thus they won't get removed from buddy.
  9313		 */
  9314	
  9315		order = 0;
  9316		outer_start = start;
  9317		while (!PageBuddy(pfn_to_page(outer_start))) {
  9318			if (++order >= MAX_ORDER) {
  9319				outer_start = start;
  9320				break;
  9321			}
  9322			outer_start &= ~0UL << order;
  9323		}
  9324	
  9325		if (outer_start != start) {
  9326			order = buddy_order(pfn_to_page(outer_start));
  9327	
  9328			/*
  9329			 * outer_start page could be small order buddy page and
  9330			 * it doesn't include start page. Adjust outer_start
  9331			 * in this case to report failed page properly
  9332			 * on tracepoint in test_pages_isolated()
  9333			 */
  9334			if (outer_start + (1UL << order) <= start)
  9335				outer_start = start;
  9336		}
  9337	
  9338		/* Make sure the range is really isolated. */
  9339		if (test_pages_isolated(outer_start, end, 0)) {
  9340			ret = -EBUSY;
  9341			goto done;
  9342		}
  9343	
  9344		/* Grab isolated pages from freelists. */
  9345		outer_end = isolate_freepages_range(&cc, outer_start, end);
  9346		if (!outer_end) {
  9347			ret = -EBUSY;
  9348			goto done;
  9349		}
  9350	
  9351		/* Free head and tail (if any) */
  9352		if (start != outer_start)
  9353			free_contig_range(outer_start, start - outer_start);
  9354		if (end != outer_end)
  9355			free_contig_range(end, outer_end - end);
  9356	
  9357	done:
  9358		undo_isolate_page_range(start, end, migratetype);
  9359		return ret;
  9360	}
  9361
kernel test robot Sept. 14, 2022, 10:03 p.m. UTC | #3
Hi Doug,

I love your patch! Yet something to improve:

[auto build test ERROR on robh/for-next]
[also build test ERROR on linus/master v6.0-rc5]
[cannot apply to akpm-mm/mm-everything next-20220914]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url:    https://github.com/intel-lab-lkp/linux/commits/Doug-Berger/mm-introduce-Designated-Movable-Blocks/20220914-040216
base:   https://git.kernel.org/pub/scm/linux/kernel/git/robh/linux.git for-next
config: x86_64-rhel-8.3 (https://download.01.org/0day-ci/archive/20220915/202209150503.AZYsY64p-lkp@intel.com/config)
compiler: gcc-11 (Debian 11.3.0-5) 11.3.0
reproduce (this is a W=1 build):
        # https://github.com/intel-lab-lkp/linux/commit/635e919c92ca242c4b900bdfc7e21529e76f2f8e
        git remote add linux-review https://github.com/intel-lab-lkp/linux
        git fetch --no-tags linux-review Doug-Berger/mm-introduce-Designated-Movable-Blocks/20220914-040216
        git checkout 635e919c92ca242c4b900bdfc7e21529e76f2f8e
        # save the config file
        mkdir build_dir && cp config build_dir/.config
        make W=1 O=build_dir ARCH=x86_64 SHELL=/bin/bash

If you fix the issue, kindly add following tag where applicable
Reported-by: kernel test robot <lkp@intel.com>

All errors (new ones prefixed by >>):

   mm/page_alloc.c:9236:5: warning: no previous prototype for '_alloc_contig_range' [-Wmissing-prototypes]
    9236 | int _alloc_contig_range(unsigned long start, unsigned long end,
         |     ^~~~~~~~~~~~~~~~~~~
   mm/page_alloc.c: In function 'alloc_contig_range':
>> mm/page_alloc.c:9390:36: error: 'MIGRATE_CMA' undeclared (first use in this function); did you mean 'MIGRATE_SYNC'?
    9390 |                 if (migratetype == MIGRATE_CMA)
         |                                    ^~~~~~~~~~~
         |                                    MIGRATE_SYNC
   mm/page_alloc.c:9390:36: note: each undeclared identifier is reported only once for each function it appears in


vim +9390 mm/page_alloc.c

  9361	
  9362	/**
  9363	 * alloc_contig_range() -- tries to allocate given range of pages
  9364	 * @start:	start PFN to allocate
  9365	 * @end:	one-past-the-last PFN to allocate
  9366	 * @migratetype:	migratetype of the underlying pageblocks (either
  9367	 *			#MIGRATE_MOVABLE or #MIGRATE_CMA).  All pageblocks
  9368	 *			in range must have the same migratetype and it must
  9369	 *			be either of the two.
  9370	 * @gfp_mask:	GFP mask to use during compaction
  9371	 *
  9372	 * The PFN range does not have to be pageblock aligned. The PFN range must
  9373	 * belong to a single zone.
  9374	 *
  9375	 * The first thing this routine does is attempt to MIGRATE_ISOLATE all
  9376	 * pageblocks in the range.  Once isolated, the pageblocks should not
  9377	 * be modified by others.
  9378	 *
  9379	 * Return: zero on success or negative error code.  On success all
  9380	 * pages which PFN is in [start, end) are allocated for the caller and
  9381	 * need to be freed with free_contig_range().
  9382	 */
  9383	int alloc_contig_range(unsigned long start, unsigned long end,
  9384			       unsigned int migratetype, gfp_t gfp_mask)
  9385	{
  9386		switch (dmb_intersects(start, end)) {
  9387		case DMB_DISJOINT:
  9388			break;
  9389		case DMB_INTERSECTS:
> 9390			if (migratetype == MIGRATE_CMA)
  9391				migratetype = MIGRATE_MOVABLE;
  9392			else
  9393				return -EBUSY;
  9394			break;
  9395		default:
  9396			return -EBUSY;
  9397		}
  9398	
  9399		return _alloc_contig_range(start, end, migratetype, gfp_mask);
  9400	}
  9401	EXPORT_SYMBOL(alloc_contig_range);
  9402
diff mbox series

Patch

diff --git a/include/linux/cma.h b/include/linux/cma.h
index 63873b93deaa..ffbb8ea2c5f8 100644
--- a/include/linux/cma.h
+++ b/include/linux/cma.h
@@ -31,11 +31,13 @@  extern phys_addr_t cma_get_base(const struct cma *cma);
 extern unsigned long cma_get_size(const struct cma *cma);
 extern const char *cma_get_name(const struct cma *cma);
 
-extern int __init cma_declare_contiguous_nid(phys_addr_t base,
+extern int __init __cma_declare_contiguous_nid(phys_addr_t base,
 			phys_addr_t size, phys_addr_t limit,
 			phys_addr_t alignment, unsigned int order_per_bit,
 			bool fixed, const char *name, struct cma **res_cma,
-			int nid);
+			int nid, bool in_dmb);
+#define cma_declare_contiguous_nid(b, s, l, a, o, f, n, r_c, nid)	\
+	__cma_declare_contiguous_nid(b, s, l, a, o, f, n, r_c, nid, false)
 static inline int __init cma_declare_contiguous(phys_addr_t base,
 			phys_addr_t size, phys_addr_t limit,
 			phys_addr_t alignment, unsigned int order_per_bit,
@@ -44,10 +46,13 @@  static inline int __init cma_declare_contiguous(phys_addr_t base,
 	return cma_declare_contiguous_nid(base, size, limit, alignment,
 			order_per_bit, fixed, name, res_cma, NUMA_NO_NODE);
 }
-extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
+extern int __cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
 					unsigned int order_per_bit,
 					const char *name,
-					struct cma **res_cma);
+					struct cma **res_cma,
+					bool in_dmb);
+#define cma_init_reserved_mem(base, size, order, name, res_cma)		\
+	__cma_init_reserved_mem(base, size, order, name, res_cma, 0)
 extern struct page *cma_alloc(struct cma *cma, unsigned long count, unsigned int align,
 			      bool no_warn);
 extern bool cma_pages_valid(struct cma *cma, const struct page *pages, unsigned long count);
diff --git a/mm/cma.c b/mm/cma.c
index 6208a3e1cd9d..4f33cd54db9e 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -33,6 +33,7 @@ 
 #include <linux/kmemleak.h>
 #include <linux/page-isolation.h>
 #include <trace/events/cma.h>
+#include <linux/dmb.h>
 
 #include "cma.h"
 
@@ -98,6 +99,10 @@  static void __init cma_activate_area(struct cma *cma)
 {
 	unsigned long base_pfn = cma->base_pfn, pfn;
 	struct zone *zone;
+	int is_dmb = dmb_intersects(base_pfn, base_pfn + cma->count);
+
+	if (is_dmb == DMB_MIXED)
+		goto out_error;
 
 	cma->bitmap = bitmap_zalloc(cma_bitmap_maxno(cma), GFP_KERNEL);
 	if (!cma->bitmap)
@@ -116,13 +121,17 @@  static void __init cma_activate_area(struct cma *cma)
 			goto not_in_zone;
 	}
 
-	for (pfn = base_pfn; pfn < base_pfn + cma->count;
-	     pfn += pageblock_nr_pages) {
-		struct page *page = pfn_to_page(pfn);
+	if (is_dmb == DMB_INTERSECTS) {
+		totalcma_pages -= cma->count;
+	} else {
+		for (pfn = base_pfn; pfn < base_pfn + cma->count;
+		     pfn += pageblock_nr_pages) {
+			struct page *page = pfn_to_page(pfn);
 
-		set_pageblock_migratetype(page, MIGRATE_CMA);
-		init_reserved_pageblock(page);
-		page_zone(page)->cma_pages += pageblock_nr_pages;
+			set_pageblock_migratetype(page, MIGRATE_CMA);
+			init_reserved_pageblock(page);
+			page_zone(page)->cma_pages += pageblock_nr_pages;
+		}
 	}
 
 	spin_lock_init(&cma->lock);
@@ -141,7 +150,8 @@  static void __init cma_activate_area(struct cma *cma)
 	if (!cma->reserve_pages_on_error) {
 		for (pfn = base_pfn; pfn < base_pfn + cma->count;
 		     pfn += pageblock_nr_pages)
-			init_reserved_pageblock(pfn_to_page(pfn));
+			if (!dmb_intersects(pfn, pfn + pageblock_nr_pages))
+				init_reserved_pageblock(pfn_to_page(pfn));
 	}
 	totalcma_pages -= cma->count;
 	cma->count = 0;
@@ -166,7 +176,7 @@  void __init cma_reserve_pages_on_error(struct cma *cma)
 }
 
 /**
- * cma_init_reserved_mem() - create custom contiguous area from reserved memory
+ * __cma_init_reserved_mem() - create custom contiguous area in reserved memory
  * @base: Base address of the reserved area
  * @size: Size of the reserved area (in bytes),
  * @order_per_bit: Order of pages represented by one bit on bitmap.
@@ -174,15 +184,18 @@  void __init cma_reserve_pages_on_error(struct cma *cma)
  *        the area will be set to "cmaN", where N is a running counter of
  *        used areas.
  * @res_cma: Pointer to store the created cma region.
+ * @in_dmb: Designate the reserved memory as a Designated Movable Block.
  *
  * This function creates custom contiguous area from already reserved memory.
  */
-int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
-				 unsigned int order_per_bit,
-				 const char *name,
-				 struct cma **res_cma)
+int __init __cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
+				   unsigned int order_per_bit,
+				   const char *name,
+				   struct cma **res_cma,
+				   bool in_dmb)
 {
 	struct cma *cma;
+	int err;
 
 	/* Sanity checks */
 	if (cma_area_count == ARRAY_SIZE(cma_areas)) {
@@ -201,6 +214,14 @@  int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
 	if (!IS_ALIGNED(base | size, CMA_MIN_ALIGNMENT_BYTES))
 		return -EINVAL;
 
+	if (in_dmb) {
+		err = dmb_reserve(base, size, NULL);
+		if (err) {
+			pr_err("Cannot reserve DMB for CMA!\n");
+			return err;
+		}
+	}
+
 	/*
 	 * Each reserved area must be initialised later, when more kernel
 	 * subsystems (like slab allocator) are available.
@@ -223,7 +244,7 @@  int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
 }
 
 /**
- * cma_declare_contiguous_nid() - reserve custom contiguous area
+ * __cma_declare_contiguous_nid() - reserve custom contiguous area
  * @base: Base address of the reserved area optional, use 0 for any
  * @size: Size of the reserved area (in bytes),
  * @limit: End address of the reserved memory (optional, 0 for any).
@@ -233,6 +254,7 @@  int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
  * @name: The name of the area. See function cma_init_reserved_mem()
  * @res_cma: Pointer to store the created cma region.
  * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
+ * @in_dmb: Designate the reserved memory as a Designated Movable Block.
  *
  * This function reserves memory from early allocator. It should be
  * called by arch specific code once the early allocator (memblock or bootmem)
@@ -242,11 +264,11 @@  int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
  * If @fixed is true, reserve contiguous area at exactly @base.  If false,
  * reserve in range from @base to @limit.
  */
-int __init cma_declare_contiguous_nid(phys_addr_t base,
+int __init __cma_declare_contiguous_nid(phys_addr_t base,
 			phys_addr_t size, phys_addr_t limit,
 			phys_addr_t alignment, unsigned int order_per_bit,
 			bool fixed, const char *name, struct cma **res_cma,
-			int nid)
+			int nid, bool in_dmb)
 {
 	phys_addr_t memblock_end = memblock_end_of_DRAM();
 	phys_addr_t highmem_start;
@@ -374,7 +396,8 @@  int __init cma_declare_contiguous_nid(phys_addr_t base,
 		base = addr;
 	}
 
-	ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma);
+	ret = __cma_init_reserved_mem(base, size, order_per_bit, name, res_cma,
+				      in_dmb);
 	if (ret)
 		goto free_mem;
 
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index e38dd1b32771..09d00c178bc8 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -9233,29 +9233,8 @@  int __alloc_contig_migrate_range(struct compact_control *cc,
 	return 0;
 }
 
-/**
- * alloc_contig_range() -- tries to allocate given range of pages
- * @start:	start PFN to allocate
- * @end:	one-past-the-last PFN to allocate
- * @migratetype:	migratetype of the underlying pageblocks (either
- *			#MIGRATE_MOVABLE or #MIGRATE_CMA).  All pageblocks
- *			in range must have the same migratetype and it must
- *			be either of the two.
- * @gfp_mask:	GFP mask to use during compaction
- *
- * The PFN range does not have to be pageblock aligned. The PFN range must
- * belong to a single zone.
- *
- * The first thing this routine does is attempt to MIGRATE_ISOLATE all
- * pageblocks in the range.  Once isolated, the pageblocks should not
- * be modified by others.
- *
- * Return: zero on success or negative error code.  On success all
- * pages which PFN is in [start, end) are allocated for the caller and
- * need to be freed with free_contig_range().
- */
-int alloc_contig_range(unsigned long start, unsigned long end,
-		       unsigned migratetype, gfp_t gfp_mask)
+int _alloc_contig_range(unsigned long start, unsigned long end,
+			unsigned int migratetype, gfp_t gfp_mask)
 {
 	unsigned long outer_start, outer_end;
 	int order;
@@ -9379,6 +9358,46 @@  int alloc_contig_range(unsigned long start, unsigned long end,
 	undo_isolate_page_range(start, end, migratetype);
 	return ret;
 }
+
+/**
+ * alloc_contig_range() -- tries to allocate given range of pages
+ * @start:	start PFN to allocate
+ * @end:	one-past-the-last PFN to allocate
+ * @migratetype:	migratetype of the underlying pageblocks (either
+ *			#MIGRATE_MOVABLE or #MIGRATE_CMA).  All pageblocks
+ *			in range must have the same migratetype and it must
+ *			be either of the two.
+ * @gfp_mask:	GFP mask to use during compaction
+ *
+ * The PFN range does not have to be pageblock aligned. The PFN range must
+ * belong to a single zone.
+ *
+ * The first thing this routine does is attempt to MIGRATE_ISOLATE all
+ * pageblocks in the range.  Once isolated, the pageblocks should not
+ * be modified by others.
+ *
+ * Return: zero on success or negative error code.  On success all
+ * pages which PFN is in [start, end) are allocated for the caller and
+ * need to be freed with free_contig_range().
+ */
+int alloc_contig_range(unsigned long start, unsigned long end,
+		       unsigned int migratetype, gfp_t gfp_mask)
+{
+	switch (dmb_intersects(start, end)) {
+	case DMB_DISJOINT:
+		break;
+	case DMB_INTERSECTS:
+		if (migratetype == MIGRATE_CMA)
+			migratetype = MIGRATE_MOVABLE;
+		else
+			return -EBUSY;
+		break;
+	default:
+		return -EBUSY;
+	}
+
+	return _alloc_contig_range(start, end, migratetype, gfp_mask);
+}
 EXPORT_SYMBOL(alloc_contig_range);
 
 static int __alloc_contig_pages(unsigned long start_pfn,
@@ -9386,8 +9405,8 @@  static int __alloc_contig_pages(unsigned long start_pfn,
 {
 	unsigned long end_pfn = start_pfn + nr_pages;
 
-	return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE,
-				  gfp_mask);
+	return _alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE,
+				   gfp_mask);
 }
 
 static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn,