diff mbox series

[RFC,1/4] mm/page_alloc: convert "report" flag of __free_one_page() to a proper flag

Message ID 20200916183411.64756-2-david@redhat.com (mailing list archive)
State Superseded
Headers show
Series mm: place pages to the freelist tail when onling and undoing isolation | expand

Commit Message

David Hildenbrand Sept. 16, 2020, 6:34 p.m. UTC
Let's prepare for additional flags and avoid long parameter lists of bools.
Follow-up patches will also make use of the flags in __free_pages_ok(),
however, I wasn't able to come up with a better name for the type - should
be good enough for internal purposes.

Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Alexander Duyck <alexander.h.duyck@linux.intel.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Wei Yang <richard.weiyang@linux.alibaba.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Mike Rapoport <rppt@kernel.org>
Signed-off-by: David Hildenbrand <david@redhat.com>
---
 mm/page_alloc.c | 28 ++++++++++++++++++++--------
 1 file changed, 20 insertions(+), 8 deletions(-)

Comments

Alexander Duyck Sept. 16, 2020, 9:44 p.m. UTC | #1
On Wed, Sep 16, 2020 at 11:34 AM David Hildenbrand <david@redhat.com> wrote:
>
> Let's prepare for additional flags and avoid long parameter lists of bools.
> Follow-up patches will also make use of the flags in __free_pages_ok(),
> however, I wasn't able to come up with a better name for the type - should
> be good enough for internal purposes.
>
> Cc: Andrew Morton <akpm@linux-foundation.org>
> Cc: Alexander Duyck <alexander.h.duyck@linux.intel.com>
> Cc: Mel Gorman <mgorman@techsingularity.net>
> Cc: Michal Hocko <mhocko@kernel.org>
> Cc: Dave Hansen <dave.hansen@intel.com>
> Cc: Vlastimil Babka <vbabka@suse.cz>
> Cc: Wei Yang <richard.weiyang@linux.alibaba.com>
> Cc: Oscar Salvador <osalvador@suse.de>
> Cc: Mike Rapoport <rppt@kernel.org>
> Signed-off-by: David Hildenbrand <david@redhat.com>
> ---
>  mm/page_alloc.c | 28 ++++++++++++++++++++--------
>  1 file changed, 20 insertions(+), 8 deletions(-)
>
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index 6b699d273d6e..91cefb8157dd 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -77,6 +77,18 @@
>  #include "shuffle.h"
>  #include "page_reporting.h"
>
> +/* Free One Page flags: for internal, non-pcp variants of free_pages(). */
> +typedef int __bitwise fop_t;
> +
> +/* No special request */
> +#define FOP_NONE               ((__force fop_t)0)
> +
> +/*
> + * Skip free page reporting notification after buddy merging (will *not* mark
> + * the page reported, only skip the notification).
> + */
> +#define FOP_SKIP_REPORT_NOTIFY ((__force fop_t)BIT(0))
> +
>  /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
>  static DEFINE_MUTEX(pcp_batch_high_lock);
>  #define MIN_PERCPU_PAGELIST_FRACTION   (8)
> @@ -948,10 +960,9 @@ buddy_merge_likely(unsigned long pfn, unsigned long buddy_pfn,
>   * -- nyc
>   */
>
> -static inline void __free_one_page(struct page *page,
> -               unsigned long pfn,
> -               struct zone *zone, unsigned int order,
> -               int migratetype, bool report)
> +static inline void __free_one_page(struct page *page, unsigned long pfn,
> +                                  struct zone *zone, unsigned int order,
> +                                  int migratetype, fop_t fop_flags)
>  {
>         struct capture_control *capc = task_capc(zone);
>         unsigned long buddy_pfn;
> @@ -1038,7 +1049,7 @@ static inline void __free_one_page(struct page *page,
>                 add_to_free_list(page, zone, order, migratetype);
>
>         /* Notify page reporting subsystem of freed page */
> -       if (report)
> +       if (!(fop_flags & FOP_SKIP_REPORT_NOTIFY))
>                 page_reporting_notify_free(order);
>  }
>
> @@ -1368,7 +1379,7 @@ static void free_pcppages_bulk(struct zone *zone, int count,
>                 if (unlikely(isolated_pageblocks))
>                         mt = get_pageblock_migratetype(page);
>
> -               __free_one_page(page, page_to_pfn(page), zone, 0, mt, true);
> +               __free_one_page(page, page_to_pfn(page), zone, 0, mt, FOP_NONE);
>                 trace_mm_page_pcpu_drain(page, 0, mt);
>         }
>         spin_unlock(&zone->lock);
> @@ -1384,7 +1395,7 @@ static void free_one_page(struct zone *zone,
>                 is_migrate_isolate(migratetype))) {
>                 migratetype = get_pfnblock_migratetype(page, pfn);
>         }
> -       __free_one_page(page, pfn, zone, order, migratetype, true);
> +       __free_one_page(page, pfn, zone, order, migratetype, FOP_NONE);
>         spin_unlock(&zone->lock);
>  }
>
> @@ -3277,7 +3288,8 @@ void __putback_isolated_page(struct page *page, unsigned int order, int mt)
>         lockdep_assert_held(&zone->lock);
>
>         /* Return isolated page to tail of freelist. */
> -       __free_one_page(page, page_to_pfn(page), zone, order, mt, false);
> +       __free_one_page(page, page_to_pfn(page), zone, order, mt,
> +                       FOP_SKIP_REPORT_NOTIFY);
>  }
>
>  /*

Seems pretty straight forward. So we are basically flipping the logic
and replacing !report with FOP_SKIP_REPORT_NOTIFY.

Reviewed-by: Alexander Duyck <alexander.h.duyck@linux.intel.com>
Wei Yang Sept. 18, 2020, 1:53 a.m. UTC | #2
On Wed, Sep 16, 2020 at 08:34:08PM +0200, David Hildenbrand wrote:
>Let's prepare for additional flags and avoid long parameter lists of bools.
>Follow-up patches will also make use of the flags in __free_pages_ok(),
>however, I wasn't able to come up with a better name for the type - should
>be good enough for internal purposes.
>
>Cc: Andrew Morton <akpm@linux-foundation.org>
>Cc: Alexander Duyck <alexander.h.duyck@linux.intel.com>
>Cc: Mel Gorman <mgorman@techsingularity.net>
>Cc: Michal Hocko <mhocko@kernel.org>
>Cc: Dave Hansen <dave.hansen@intel.com>
>Cc: Vlastimil Babka <vbabka@suse.cz>
>Cc: Wei Yang <richard.weiyang@linux.alibaba.com>
>Cc: Oscar Salvador <osalvador@suse.de>
>Cc: Mike Rapoport <rppt@kernel.org>
>Signed-off-by: David Hildenbrand <david@redhat.com>
>---
> mm/page_alloc.c | 28 ++++++++++++++++++++--------
> 1 file changed, 20 insertions(+), 8 deletions(-)
>
>diff --git a/mm/page_alloc.c b/mm/page_alloc.c
>index 6b699d273d6e..91cefb8157dd 100644
>--- a/mm/page_alloc.c
>+++ b/mm/page_alloc.c
>@@ -77,6 +77,18 @@
> #include "shuffle.h"
> #include "page_reporting.h"
> 
>+/* Free One Page flags: for internal, non-pcp variants of free_pages(). */
>+typedef int __bitwise fop_t;
>+
>+/* No special request */
>+#define FOP_NONE		((__force fop_t)0)
>+
>+/*
>+ * Skip free page reporting notification after buddy merging (will *not* mark

__free_one_page() may not merge buddy when its buddy is not available.

Would this comment be a little confusing?

>+ * the page reported, only skip the notification).
>+ */
>+#define FOP_SKIP_REPORT_NOTIFY	((__force fop_t)BIT(0))
>+
> /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
> static DEFINE_MUTEX(pcp_batch_high_lock);
> #define MIN_PERCPU_PAGELIST_FRACTION	(8)
>@@ -948,10 +960,9 @@ buddy_merge_likely(unsigned long pfn, unsigned long buddy_pfn,
>  * -- nyc
>  */
> 
>-static inline void __free_one_page(struct page *page,
>-		unsigned long pfn,
>-		struct zone *zone, unsigned int order,
>-		int migratetype, bool report)
>+static inline void __free_one_page(struct page *page, unsigned long pfn,
>+				   struct zone *zone, unsigned int order,
>+				   int migratetype, fop_t fop_flags)
> {
> 	struct capture_control *capc = task_capc(zone);
> 	unsigned long buddy_pfn;
>@@ -1038,7 +1049,7 @@ static inline void __free_one_page(struct page *page,
> 		add_to_free_list(page, zone, order, migratetype);
> 
> 	/* Notify page reporting subsystem of freed page */
>-	if (report)
>+	if (!(fop_flags & FOP_SKIP_REPORT_NOTIFY))
> 		page_reporting_notify_free(order);
> }
> 
>@@ -1368,7 +1379,7 @@ static void free_pcppages_bulk(struct zone *zone, int count,
> 		if (unlikely(isolated_pageblocks))
> 			mt = get_pageblock_migratetype(page);
> 
>-		__free_one_page(page, page_to_pfn(page), zone, 0, mt, true);
>+		__free_one_page(page, page_to_pfn(page), zone, 0, mt, FOP_NONE);
> 		trace_mm_page_pcpu_drain(page, 0, mt);
> 	}
> 	spin_unlock(&zone->lock);
>@@ -1384,7 +1395,7 @@ static void free_one_page(struct zone *zone,
> 		is_migrate_isolate(migratetype))) {
> 		migratetype = get_pfnblock_migratetype(page, pfn);
> 	}
>-	__free_one_page(page, pfn, zone, order, migratetype, true);
>+	__free_one_page(page, pfn, zone, order, migratetype, FOP_NONE);
> 	spin_unlock(&zone->lock);
> }
> 
>@@ -3277,7 +3288,8 @@ void __putback_isolated_page(struct page *page, unsigned int order, int mt)
> 	lockdep_assert_held(&zone->lock);
> 
> 	/* Return isolated page to tail of freelist. */
>-	__free_one_page(page, page_to_pfn(page), zone, order, mt, false);
>+	__free_one_page(page, page_to_pfn(page), zone, order, mt,
>+			FOP_SKIP_REPORT_NOTIFY);
> }
> 
> /*
>-- 
>2.26.2
David Hildenbrand Sept. 18, 2020, 7:23 a.m. UTC | #3
On 18.09.20 03:53, Wei Yang wrote:
> On Wed, Sep 16, 2020 at 08:34:08PM +0200, David Hildenbrand wrote:
>> Let's prepare for additional flags and avoid long parameter lists of bools.
>> Follow-up patches will also make use of the flags in __free_pages_ok(),
>> however, I wasn't able to come up with a better name for the type - should
>> be good enough for internal purposes.
>>
>> Cc: Andrew Morton <akpm@linux-foundation.org>
>> Cc: Alexander Duyck <alexander.h.duyck@linux.intel.com>
>> Cc: Mel Gorman <mgorman@techsingularity.net>
>> Cc: Michal Hocko <mhocko@kernel.org>
>> Cc: Dave Hansen <dave.hansen@intel.com>
>> Cc: Vlastimil Babka <vbabka@suse.cz>
>> Cc: Wei Yang <richard.weiyang@linux.alibaba.com>
>> Cc: Oscar Salvador <osalvador@suse.de>
>> Cc: Mike Rapoport <rppt@kernel.org>
>> Signed-off-by: David Hildenbrand <david@redhat.com>
>> ---
>> mm/page_alloc.c | 28 ++++++++++++++++++++--------
>> 1 file changed, 20 insertions(+), 8 deletions(-)
>>
>> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
>> index 6b699d273d6e..91cefb8157dd 100644
>> --- a/mm/page_alloc.c
>> +++ b/mm/page_alloc.c
>> @@ -77,6 +77,18 @@
>> #include "shuffle.h"
>> #include "page_reporting.h"
>>
>> +/* Free One Page flags: for internal, non-pcp variants of free_pages(). */
>> +typedef int __bitwise fop_t;
>> +
>> +/* No special request */
>> +#define FOP_NONE		((__force fop_t)0)
>> +
>> +/*
>> + * Skip free page reporting notification after buddy merging (will *not* mark
> 
> __free_one_page() may not merge buddy when its buddy is not available.
> 
> Would this comment be a little confusing?
> 

I rather meant the process than if it's actually happening.

"Skip free page reporting notification for the (possibly merged) page."

Thanks!
Vlastimil Babka Sept. 24, 2020, 10:19 a.m. UTC | #4
On 9/16/20 8:34 PM, David Hildenbrand wrote:
> Let's prepare for additional flags and avoid long parameter lists of bools.
> Follow-up patches will also make use of the flags in __free_pages_ok(),
> however, I wasn't able to come up with a better name for the type - should
> be good enough for internal purposes.
> 
> Cc: Andrew Morton <akpm@linux-foundation.org>
> Cc: Alexander Duyck <alexander.h.duyck@linux.intel.com>
> Cc: Mel Gorman <mgorman@techsingularity.net>
> Cc: Michal Hocko <mhocko@kernel.org>
> Cc: Dave Hansen <dave.hansen@intel.com>
> Cc: Vlastimil Babka <vbabka@suse.cz>
> Cc: Wei Yang <richard.weiyang@linux.alibaba.com>
> Cc: Oscar Salvador <osalvador@suse.de>
> Cc: Mike Rapoport <rppt@kernel.org>
> Signed-off-by: David Hildenbrand <david@redhat.com>

Reviewed-by: Vlastimil Babka <vbabka@suse.cz>

> ---
>  mm/page_alloc.c | 28 ++++++++++++++++++++--------
>  1 file changed, 20 insertions(+), 8 deletions(-)
> 
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index 6b699d273d6e..91cefb8157dd 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -77,6 +77,18 @@
>  #include "shuffle.h"
>  #include "page_reporting.h"
>  
> +/* Free One Page flags: for internal, non-pcp variants of free_pages(). */
> +typedef int __bitwise fop_t;
> +
> +/* No special request */
> +#define FOP_NONE		((__force fop_t)0)
> +
> +/*
> + * Skip free page reporting notification after buddy merging (will *not* mark
> + * the page reported, only skip the notification).
> + */
> +#define FOP_SKIP_REPORT_NOTIFY	((__force fop_t)BIT(0))
> +
>  /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
>  static DEFINE_MUTEX(pcp_batch_high_lock);
>  #define MIN_PERCPU_PAGELIST_FRACTION	(8)
> @@ -948,10 +960,9 @@ buddy_merge_likely(unsigned long pfn, unsigned long buddy_pfn,
>   * -- nyc
>   */
>  
> -static inline void __free_one_page(struct page *page,
> -		unsigned long pfn,
> -		struct zone *zone, unsigned int order,
> -		int migratetype, bool report)
> +static inline void __free_one_page(struct page *page, unsigned long pfn,
> +				   struct zone *zone, unsigned int order,
> +				   int migratetype, fop_t fop_flags)
>  {
>  	struct capture_control *capc = task_capc(zone);
>  	unsigned long buddy_pfn;
> @@ -1038,7 +1049,7 @@ static inline void __free_one_page(struct page *page,
>  		add_to_free_list(page, zone, order, migratetype);
>  
>  	/* Notify page reporting subsystem of freed page */
> -	if (report)
> +	if (!(fop_flags & FOP_SKIP_REPORT_NOTIFY))
>  		page_reporting_notify_free(order);
>  }
>  
> @@ -1368,7 +1379,7 @@ static void free_pcppages_bulk(struct zone *zone, int count,
>  		if (unlikely(isolated_pageblocks))
>  			mt = get_pageblock_migratetype(page);
>  
> -		__free_one_page(page, page_to_pfn(page), zone, 0, mt, true);
> +		__free_one_page(page, page_to_pfn(page), zone, 0, mt, FOP_NONE);
>  		trace_mm_page_pcpu_drain(page, 0, mt);
>  	}
>  	spin_unlock(&zone->lock);
> @@ -1384,7 +1395,7 @@ static void free_one_page(struct zone *zone,
>  		is_migrate_isolate(migratetype))) {
>  		migratetype = get_pfnblock_migratetype(page, pfn);
>  	}
> -	__free_one_page(page, pfn, zone, order, migratetype, true);
> +	__free_one_page(page, pfn, zone, order, migratetype, FOP_NONE);
>  	spin_unlock(&zone->lock);
>  }
>  
> @@ -3277,7 +3288,8 @@ void __putback_isolated_page(struct page *page, unsigned int order, int mt)
>  	lockdep_assert_held(&zone->lock);
>  
>  	/* Return isolated page to tail of freelist. */
> -	__free_one_page(page, page_to_pfn(page), zone, order, mt, false);
> +	__free_one_page(page, page_to_pfn(page), zone, order, mt,
> +			FOP_SKIP_REPORT_NOTIFY);
>  }
>  
>  /*
>
Oscar Salvador Sept. 25, 2020, 10:34 a.m. UTC | #5
On Wed, Sep 16, 2020 at 08:34:08PM +0200, David Hildenbrand wrote:
> Let's prepare for additional flags and avoid long parameter lists of bools.
> Follow-up patches will also make use of the flags in __free_pages_ok(),
> however, I wasn't able to come up with a better name for the type - should
> be good enough for internal purposes.
> 
> Cc: Andrew Morton <akpm@linux-foundation.org>
> Cc: Alexander Duyck <alexander.h.duyck@linux.intel.com>
> Cc: Mel Gorman <mgorman@techsingularity.net>
> Cc: Michal Hocko <mhocko@kernel.org>
> Cc: Dave Hansen <dave.hansen@intel.com>
> Cc: Vlastimil Babka <vbabka@suse.cz>
> Cc: Wei Yang <richard.weiyang@linux.alibaba.com>
> Cc: Oscar Salvador <osalvador@suse.de>
> Cc: Mike Rapoport <rppt@kernel.org>
> Signed-off-by: David Hildenbrand <david@redhat.com>

Reviewed-by: Oscar Salvador <osalvador@suse.de>
diff mbox series

Patch

diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 6b699d273d6e..91cefb8157dd 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -77,6 +77,18 @@ 
 #include "shuffle.h"
 #include "page_reporting.h"
 
+/* Free One Page flags: for internal, non-pcp variants of free_pages(). */
+typedef int __bitwise fop_t;
+
+/* No special request */
+#define FOP_NONE		((__force fop_t)0)
+
+/*
+ * Skip free page reporting notification after buddy merging (will *not* mark
+ * the page reported, only skip the notification).
+ */
+#define FOP_SKIP_REPORT_NOTIFY	((__force fop_t)BIT(0))
+
 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
 static DEFINE_MUTEX(pcp_batch_high_lock);
 #define MIN_PERCPU_PAGELIST_FRACTION	(8)
@@ -948,10 +960,9 @@  buddy_merge_likely(unsigned long pfn, unsigned long buddy_pfn,
  * -- nyc
  */
 
-static inline void __free_one_page(struct page *page,
-		unsigned long pfn,
-		struct zone *zone, unsigned int order,
-		int migratetype, bool report)
+static inline void __free_one_page(struct page *page, unsigned long pfn,
+				   struct zone *zone, unsigned int order,
+				   int migratetype, fop_t fop_flags)
 {
 	struct capture_control *capc = task_capc(zone);
 	unsigned long buddy_pfn;
@@ -1038,7 +1049,7 @@  static inline void __free_one_page(struct page *page,
 		add_to_free_list(page, zone, order, migratetype);
 
 	/* Notify page reporting subsystem of freed page */
-	if (report)
+	if (!(fop_flags & FOP_SKIP_REPORT_NOTIFY))
 		page_reporting_notify_free(order);
 }
 
@@ -1368,7 +1379,7 @@  static void free_pcppages_bulk(struct zone *zone, int count,
 		if (unlikely(isolated_pageblocks))
 			mt = get_pageblock_migratetype(page);
 
-		__free_one_page(page, page_to_pfn(page), zone, 0, mt, true);
+		__free_one_page(page, page_to_pfn(page), zone, 0, mt, FOP_NONE);
 		trace_mm_page_pcpu_drain(page, 0, mt);
 	}
 	spin_unlock(&zone->lock);
@@ -1384,7 +1395,7 @@  static void free_one_page(struct zone *zone,
 		is_migrate_isolate(migratetype))) {
 		migratetype = get_pfnblock_migratetype(page, pfn);
 	}
-	__free_one_page(page, pfn, zone, order, migratetype, true);
+	__free_one_page(page, pfn, zone, order, migratetype, FOP_NONE);
 	spin_unlock(&zone->lock);
 }
 
@@ -3277,7 +3288,8 @@  void __putback_isolated_page(struct page *page, unsigned int order, int mt)
 	lockdep_assert_held(&zone->lock);
 
 	/* Return isolated page to tail of freelist. */
-	__free_one_page(page, page_to_pfn(page), zone, order, mt, false);
+	__free_one_page(page, page_to_pfn(page), zone, order, mt,
+			FOP_SKIP_REPORT_NOTIFY);
 }
 
 /*