diff mbox series

[v1,03/11] mm/page_alloc: refactor memmap_init_zone_device() page init

Message ID 20210325230938.30752-4-joao.m.martins@oracle.com (mailing list archive)
State New, archived
Headers show
Series mm, sparse-vmemmap: Introduce compound pagemaps | expand

Commit Message

Joao Martins March 25, 2021, 11:09 p.m. UTC
Move struct page init to an helper function __init_zone_device_page().

Signed-off-by: Joao Martins <joao.m.martins@oracle.com>
---
 mm/page_alloc.c | 74 +++++++++++++++++++++++++++----------------------
 1 file changed, 41 insertions(+), 33 deletions(-)

Comments

Dan Williams April 24, 2021, 12:18 a.m. UTC | #1
On Thu, Mar 25, 2021 at 4:10 PM Joao Martins <joao.m.martins@oracle.com> wrote:
>
> Move struct page init to an helper function __init_zone_device_page().

Same sentence addition suggestion from the last patch to make this
patch have some rationale for existing.

>
> Signed-off-by: Joao Martins <joao.m.martins@oracle.com>
> ---
>  mm/page_alloc.c | 74 +++++++++++++++++++++++++++----------------------
>  1 file changed, 41 insertions(+), 33 deletions(-)
>
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index 43dd98446b0b..58974067bbd4 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -6237,6 +6237,46 @@ void __meminit memmap_init_range(unsigned long size, int nid, unsigned long zone
>  }
>
>  #ifdef CONFIG_ZONE_DEVICE
> +static void __ref __init_zone_device_page(struct page *page, unsigned long pfn,
> +                                         unsigned long zone_idx, int nid,
> +                                         struct dev_pagemap *pgmap)
> +{
> +
> +       __init_single_page(page, pfn, zone_idx, nid);
> +
> +       /*
> +        * Mark page reserved as it will need to wait for onlining
> +        * phase for it to be fully associated with a zone.
> +        *
> +        * We can use the non-atomic __set_bit operation for setting
> +        * the flag as we are still initializing the pages.
> +        */
> +       __SetPageReserved(page);
> +
> +       /*
> +        * ZONE_DEVICE pages union ->lru with a ->pgmap back pointer
> +        * and zone_device_data.  It is a bug if a ZONE_DEVICE page is
> +        * ever freed or placed on a driver-private list.
> +        */
> +       page->pgmap = pgmap;
> +       page->zone_device_data = NULL;
> +
> +       /*
> +        * Mark the block movable so that blocks are reserved for
> +        * movable at startup. This will force kernel allocations
> +        * to reserve their blocks rather than leaking throughout
> +        * the address space during boot when many long-lived
> +        * kernel allocations are made.
> +        *
> +        * Please note that MEMINIT_HOTPLUG path doesn't clear memmap
> +        * because this is done early in section_activate()
> +        */
> +       if (IS_ALIGNED(pfn, pageblock_nr_pages)) {
> +               set_pageblock_migratetype(page, MIGRATE_MOVABLE);
> +               cond_resched();
> +       }
> +}
> +
>  void __ref memmap_init_zone_device(struct zone *zone,
>                                    unsigned long start_pfn,
>                                    unsigned long nr_pages,
> @@ -6265,39 +6305,7 @@ void __ref memmap_init_zone_device(struct zone *zone,
>         for (pfn = start_pfn; pfn < end_pfn; pfn++) {
>                 struct page *page = pfn_to_page(pfn);
>
> -               __init_single_page(page, pfn, zone_idx, nid);
> -
> -               /*
> -                * Mark page reserved as it will need to wait for onlining
> -                * phase for it to be fully associated with a zone.
> -                *
> -                * We can use the non-atomic __set_bit operation for setting
> -                * the flag as we are still initializing the pages.
> -                */
> -               __SetPageReserved(page);
> -
> -               /*
> -                * ZONE_DEVICE pages union ->lru with a ->pgmap back pointer
> -                * and zone_device_data.  It is a bug if a ZONE_DEVICE page is
> -                * ever freed or placed on a driver-private list.
> -                */
> -               page->pgmap = pgmap;
> -               page->zone_device_data = NULL;
> -
> -               /*
> -                * Mark the block movable so that blocks are reserved for
> -                * movable at startup. This will force kernel allocations
> -                * to reserve their blocks rather than leaking throughout
> -                * the address space during boot when many long-lived
> -                * kernel allocations are made.
> -                *
> -                * Please note that MEMINIT_HOTPLUG path doesn't clear memmap
> -                * because this is done early in section_activate()
> -                */
> -               if (IS_ALIGNED(pfn, pageblock_nr_pages)) {
> -                       set_pageblock_migratetype(page, MIGRATE_MOVABLE);
> -                       cond_resched();
> -               }
> +               __init_zone_device_page(page, pfn, zone_idx, nid, pgmap);
>         }
>
>         pr_info("%s initialised %lu pages in %ums\n", __func__,
> --
> 2.17.1
>
Joao Martins April 24, 2021, 7:05 p.m. UTC | #2
On 4/24/21 1:18 AM, Dan Williams wrote:
> On Thu, Mar 25, 2021 at 4:10 PM Joao Martins <joao.m.martins@oracle.com> wrote:
>>
>> Move struct page init to an helper function __init_zone_device_page().
> 
> Same sentence addition suggestion from the last patch to make this
> patch have some rationale for existing.
> 
I have fixed this too, with the same message as the previous patch.

>>
>> Signed-off-by: Joao Martins <joao.m.martins@oracle.com>
>> ---
>>  mm/page_alloc.c | 74 +++++++++++++++++++++++++++----------------------
>>  1 file changed, 41 insertions(+), 33 deletions(-)
>>
>> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
>> index 43dd98446b0b..58974067bbd4 100644
>> --- a/mm/page_alloc.c
>> +++ b/mm/page_alloc.c
>> @@ -6237,6 +6237,46 @@ void __meminit memmap_init_range(unsigned long size, int nid, unsigned long zone
>>  }
>>
>>  #ifdef CONFIG_ZONE_DEVICE
>> +static void __ref __init_zone_device_page(struct page *page, unsigned long pfn,
>> +                                         unsigned long zone_idx, int nid,
>> +                                         struct dev_pagemap *pgmap)
>> +{
>> +
>> +       __init_single_page(page, pfn, zone_idx, nid);
>> +
>> +       /*
>> +        * Mark page reserved as it will need to wait for onlining
>> +        * phase for it to be fully associated with a zone.
>> +        *
>> +        * We can use the non-atomic __set_bit operation for setting
>> +        * the flag as we are still initializing the pages.
>> +        */
>> +       __SetPageReserved(page);
>> +
>> +       /*
>> +        * ZONE_DEVICE pages union ->lru with a ->pgmap back pointer
>> +        * and zone_device_data.  It is a bug if a ZONE_DEVICE page is
>> +        * ever freed or placed on a driver-private list.
>> +        */
>> +       page->pgmap = pgmap;
>> +       page->zone_device_data = NULL;
>> +
>> +       /*
>> +        * Mark the block movable so that blocks are reserved for
>> +        * movable at startup. This will force kernel allocations
>> +        * to reserve their blocks rather than leaking throughout
>> +        * the address space during boot when many long-lived
>> +        * kernel allocations are made.
>> +        *
>> +        * Please note that MEMINIT_HOTPLUG path doesn't clear memmap
>> +        * because this is done early in section_activate()
>> +        */
>> +       if (IS_ALIGNED(pfn, pageblock_nr_pages)) {
>> +               set_pageblock_migratetype(page, MIGRATE_MOVABLE);
>> +               cond_resched();
>> +       }
>> +}
>> +
>>  void __ref memmap_init_zone_device(struct zone *zone,
>>                                    unsigned long start_pfn,
>>                                    unsigned long nr_pages,
>> @@ -6265,39 +6305,7 @@ void __ref memmap_init_zone_device(struct zone *zone,
>>         for (pfn = start_pfn; pfn < end_pfn; pfn++) {
>>                 struct page *page = pfn_to_page(pfn);
>>
>> -               __init_single_page(page, pfn, zone_idx, nid);
>> -
>> -               /*
>> -                * Mark page reserved as it will need to wait for onlining
>> -                * phase for it to be fully associated with a zone.
>> -                *
>> -                * We can use the non-atomic __set_bit operation for setting
>> -                * the flag as we are still initializing the pages.
>> -                */
>> -               __SetPageReserved(page);
>> -
>> -               /*
>> -                * ZONE_DEVICE pages union ->lru with a ->pgmap back pointer
>> -                * and zone_device_data.  It is a bug if a ZONE_DEVICE page is
>> -                * ever freed or placed on a driver-private list.
>> -                */
>> -               page->pgmap = pgmap;
>> -               page->zone_device_data = NULL;
>> -
>> -               /*
>> -                * Mark the block movable so that blocks are reserved for
>> -                * movable at startup. This will force kernel allocations
>> -                * to reserve their blocks rather than leaking throughout
>> -                * the address space during boot when many long-lived
>> -                * kernel allocations are made.
>> -                *
>> -                * Please note that MEMINIT_HOTPLUG path doesn't clear memmap
>> -                * because this is done early in section_activate()
>> -                */
>> -               if (IS_ALIGNED(pfn, pageblock_nr_pages)) {
>> -                       set_pageblock_migratetype(page, MIGRATE_MOVABLE);
>> -                       cond_resched();
>> -               }
>> +               __init_zone_device_page(page, pfn, zone_idx, nid, pgmap);
>>         }
>>
>>         pr_info("%s initialised %lu pages in %ums\n", __func__,
>> --
>> 2.17.1
>>
diff mbox series

Patch

diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 43dd98446b0b..58974067bbd4 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -6237,6 +6237,46 @@  void __meminit memmap_init_range(unsigned long size, int nid, unsigned long zone
 }
 
 #ifdef CONFIG_ZONE_DEVICE
+static void __ref __init_zone_device_page(struct page *page, unsigned long pfn,
+					  unsigned long zone_idx, int nid,
+					  struct dev_pagemap *pgmap)
+{
+
+	__init_single_page(page, pfn, zone_idx, nid);
+
+	/*
+	 * Mark page reserved as it will need to wait for onlining
+	 * phase for it to be fully associated with a zone.
+	 *
+	 * We can use the non-atomic __set_bit operation for setting
+	 * the flag as we are still initializing the pages.
+	 */
+	__SetPageReserved(page);
+
+	/*
+	 * ZONE_DEVICE pages union ->lru with a ->pgmap back pointer
+	 * and zone_device_data.  It is a bug if a ZONE_DEVICE page is
+	 * ever freed or placed on a driver-private list.
+	 */
+	page->pgmap = pgmap;
+	page->zone_device_data = NULL;
+
+	/*
+	 * Mark the block movable so that blocks are reserved for
+	 * movable at startup. This will force kernel allocations
+	 * to reserve their blocks rather than leaking throughout
+	 * the address space during boot when many long-lived
+	 * kernel allocations are made.
+	 *
+	 * Please note that MEMINIT_HOTPLUG path doesn't clear memmap
+	 * because this is done early in section_activate()
+	 */
+	if (IS_ALIGNED(pfn, pageblock_nr_pages)) {
+		set_pageblock_migratetype(page, MIGRATE_MOVABLE);
+		cond_resched();
+	}
+}
+
 void __ref memmap_init_zone_device(struct zone *zone,
 				   unsigned long start_pfn,
 				   unsigned long nr_pages,
@@ -6265,39 +6305,7 @@  void __ref memmap_init_zone_device(struct zone *zone,
 	for (pfn = start_pfn; pfn < end_pfn; pfn++) {
 		struct page *page = pfn_to_page(pfn);
 
-		__init_single_page(page, pfn, zone_idx, nid);
-
-		/*
-		 * Mark page reserved as it will need to wait for onlining
-		 * phase for it to be fully associated with a zone.
-		 *
-		 * We can use the non-atomic __set_bit operation for setting
-		 * the flag as we are still initializing the pages.
-		 */
-		__SetPageReserved(page);
-
-		/*
-		 * ZONE_DEVICE pages union ->lru with a ->pgmap back pointer
-		 * and zone_device_data.  It is a bug if a ZONE_DEVICE page is
-		 * ever freed or placed on a driver-private list.
-		 */
-		page->pgmap = pgmap;
-		page->zone_device_data = NULL;
-
-		/*
-		 * Mark the block movable so that blocks are reserved for
-		 * movable at startup. This will force kernel allocations
-		 * to reserve their blocks rather than leaking throughout
-		 * the address space during boot when many long-lived
-		 * kernel allocations are made.
-		 *
-		 * Please note that MEMINIT_HOTPLUG path doesn't clear memmap
-		 * because this is done early in section_activate()
-		 */
-		if (IS_ALIGNED(pfn, pageblock_nr_pages)) {
-			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
-			cond_resched();
-		}
+		__init_zone_device_page(page, pfn, zone_idx, nid, pgmap);
 	}
 
 	pr_info("%s initialised %lu pages in %ums\n", __func__,