diff mbox series

[v5,1/6] mm: Adjust shuffle code to allow for future coalescing

Message ID 20190812213324.22097.30886.stgit@localhost.localdomain (mailing list archive)
State New, archived
Headers show
Series mm / virtio: Provide support for unused page reporting | expand

Commit Message

Alexander Duyck Aug. 12, 2019, 9:33 p.m. UTC
From: Alexander Duyck <alexander.h.duyck@linux.intel.com>

This patch is meant to move the head/tail adding logic out of the shuffle
code and into the __free_one_page function since ultimately that is where
it is really needed anyway. By doing this we should be able to reduce the
overhead and can consolidate all of the list addition bits in one spot.

Signed-off-by: Alexander Duyck <alexander.h.duyck@linux.intel.com>
---
 include/linux/mmzone.h |   12 --------
 mm/page_alloc.c        |   70 +++++++++++++++++++++++++++---------------------
 mm/shuffle.c           |   24 ----------------
 mm/shuffle.h           |   32 ++++++++++++++++++++++
 4 files changed, 71 insertions(+), 67 deletions(-)

Comments

Dan Williams Aug. 12, 2019, 10:24 p.m. UTC | #1
On Mon, Aug 12, 2019 at 2:33 PM Alexander Duyck
<alexander.duyck@gmail.com> wrote:
>
> From: Alexander Duyck <alexander.h.duyck@linux.intel.com>
>
> This patch is meant to move the head/tail adding logic out of the shuffle

s/This patch is meant to move/Move/

> code and into the __free_one_page function since ultimately that is where
> it is really needed anyway. By doing this we should be able to reduce the
> overhead

Is the overhead benefit observable? I would expect the overhead of
get_random_u64() dominates.

> and can consolidate all of the list addition bits in one spot.

This sounds the better argument.

[..]
> diff --git a/mm/shuffle.h b/mm/shuffle.h
> index 777a257a0d2f..add763cc0995 100644
> --- a/mm/shuffle.h
> +++ b/mm/shuffle.h
> @@ -3,6 +3,7 @@
>  #ifndef _MM_SHUFFLE_H
>  #define _MM_SHUFFLE_H
>  #include <linux/jump_label.h>
> +#include <linux/random.h>
>
>  /*
>   * SHUFFLE_ENABLE is called from the command line enabling path, or by
> @@ -43,6 +44,32 @@ static inline bool is_shuffle_order(int order)
>                 return false;
>         return order >= SHUFFLE_ORDER;
>  }
> +
> +static inline bool shuffle_add_to_tail(void)
> +{
> +       static u64 rand;
> +       static u8 rand_bits;
> +       u64 rand_old;
> +
> +       /*
> +        * The lack of locking is deliberate. If 2 threads race to
> +        * update the rand state it just adds to the entropy.
> +        */
> +       if (rand_bits-- == 0) {
> +               rand_bits = 64;
> +               rand = get_random_u64();
> +       }
> +
> +       /*
> +        * Test highest order bit while shifting our random value. This
> +        * should result in us testing for the carry flag following the
> +        * shift.
> +        */
> +       rand_old = rand;
> +       rand <<= 1;
> +
> +       return rand < rand_old;
> +}

This function seems too involved to be a static inline and I believe
each compilation unit that might call this routine gets it's own copy
of 'rand' and 'rand_bits' when the original expectation is that they
are global. How about leave this bit to mm/shuffle.c and rename it
coin_flip(), or something more generic, since it does not
'add_to_tail'? The 'add_to_tail' action is something the caller
decides.
Alexander Duyck Aug. 12, 2019, 10:49 p.m. UTC | #2
On Mon, Aug 12, 2019 at 3:24 PM Dan Williams <dan.j.williams@intel.com> wrote:
>
> On Mon, Aug 12, 2019 at 2:33 PM Alexander Duyck
> <alexander.duyck@gmail.com> wrote:
> >
> > From: Alexander Duyck <alexander.h.duyck@linux.intel.com>
> >
> > This patch is meant to move the head/tail adding logic out of the shuffle
>
> s/This patch is meant to move/Move/

I'll update that on next submission.

> > code and into the __free_one_page function since ultimately that is where
> > it is really needed anyway. By doing this we should be able to reduce the
> > overhead
>
> Is the overhead benefit observable? I would expect the overhead of
> get_random_u64() dominates.
>
> > and can consolidate all of the list addition bits in one spot.
>
> This sounds the better argument.

Actually the overhead is the bit where we have to setup the arguments
and call the function. There is only one spot where this function is
ever called and that is in __free_one_page.

> [..]
> > diff --git a/mm/shuffle.h b/mm/shuffle.h
> > index 777a257a0d2f..add763cc0995 100644
> > --- a/mm/shuffle.h
> > +++ b/mm/shuffle.h
> > @@ -3,6 +3,7 @@
> >  #ifndef _MM_SHUFFLE_H
> >  #define _MM_SHUFFLE_H
> >  #include <linux/jump_label.h>
> > +#include <linux/random.h>
> >
> >  /*
> >   * SHUFFLE_ENABLE is called from the command line enabling path, or by
> > @@ -43,6 +44,32 @@ static inline bool is_shuffle_order(int order)
> >                 return false;
> >         return order >= SHUFFLE_ORDER;
> >  }
> > +
> > +static inline bool shuffle_add_to_tail(void)
> > +{
> > +       static u64 rand;
> > +       static u8 rand_bits;
> > +       u64 rand_old;
> > +
> > +       /*
> > +        * The lack of locking is deliberate. If 2 threads race to
> > +        * update the rand state it just adds to the entropy.
> > +        */
> > +       if (rand_bits-- == 0) {
> > +               rand_bits = 64;
> > +               rand = get_random_u64();
> > +       }
> > +
> > +       /*
> > +        * Test highest order bit while shifting our random value. This
> > +        * should result in us testing for the carry flag following the
> > +        * shift.
> > +        */
> > +       rand_old = rand;
> > +       rand <<= 1;
> > +
> > +       return rand < rand_old;
> > +}
>
> This function seems too involved to be a static inline and I believe
> each compilation unit that might call this routine gets it's own copy
> of 'rand' and 'rand_bits' when the original expectation is that they
> are global. How about leave this bit to mm/shuffle.c and rename it
> coin_flip(), or something more generic, since it does not
> 'add_to_tail'? The 'add_to_tail' action is something the caller
> decides.

The thing is there is only one caller to this function, and that is
__free_one_page. That is why I made it a static inline since that way
we can avoid having to call this as a function at all and can just
inline the code into __free_one_page.

As far as making this more generic I guess I can look into that. Maybe
I will look at trying to implement something like get_random_bool()
and then just do a macro to point to that. One other things that
occurs to me now that I am looking over the code is that I am not sure
the original or this modified version actually provide all that much
randomness if multiple threads have access to it at the same time. If
rand_bits races past the 0 you can end up getting streaks of 0s for
256+ bits.
diff mbox series

Patch

diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index aa0dd8ca36c8..c6bd8e9bb476 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -116,18 +116,6 @@  static inline void add_to_free_area_tail(struct page *page, struct free_area *ar
 	area->nr_free++;
 }
 
-#ifdef CONFIG_SHUFFLE_PAGE_ALLOCATOR
-/* Used to preserve page allocation order entropy */
-void add_to_free_area_random(struct page *page, struct free_area *area,
-		int migratetype);
-#else
-static inline void add_to_free_area_random(struct page *page,
-		struct free_area *area, int migratetype)
-{
-	add_to_free_area(page, area, migratetype);
-}
-#endif
-
 /* Used for pages which are on another list */
 static inline void move_to_free_area(struct page *page, struct free_area *area,
 			     int migratetype)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index af29c05e23aa..e3cb6e7aa296 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -877,6 +877,36 @@  static inline struct capture_control *task_capc(struct zone *zone)
 #endif /* CONFIG_COMPACTION */
 
 /*
+ * If this is not the largest possible page, check if the buddy
+ * of the next-highest order is free. If it is, it's possible
+ * that pages are being freed that will coalesce soon. In case,
+ * that is happening, add the free page to the tail of the list
+ * so it's less likely to be used soon and more likely to be merged
+ * as a higher order page
+ */
+static inline bool
+buddy_merge_likely(unsigned long pfn, unsigned long buddy_pfn,
+		   struct page *page, unsigned int order)
+{
+	struct page *higher_page, *higher_buddy;
+	unsigned long combined_pfn;
+
+	if (order >= MAX_ORDER - 2)
+		return false;
+
+	if (!pfn_valid_within(buddy_pfn))
+		return false;
+
+	combined_pfn = buddy_pfn & pfn;
+	higher_page = page + (combined_pfn - pfn);
+	buddy_pfn = __find_buddy_pfn(combined_pfn, order + 1);
+	higher_buddy = higher_page + (buddy_pfn - combined_pfn);
+
+	return pfn_valid_within(buddy_pfn) &&
+	       page_is_buddy(higher_page, higher_buddy, order + 1);
+}
+
+/*
  * Freeing function for a buddy system allocator.
  *
  * The concept of a buddy system is to maintain direct-mapped table
@@ -905,11 +935,12 @@  static inline void __free_one_page(struct page *page,
 		struct zone *zone, unsigned int order,
 		int migratetype)
 {
-	unsigned long combined_pfn;
+	struct capture_control *capc = task_capc(zone);
 	unsigned long uninitialized_var(buddy_pfn);
-	struct page *buddy;
+	unsigned long combined_pfn;
+	struct free_area *area;
 	unsigned int max_order;
-	struct capture_control *capc = task_capc(zone);
+	struct page *buddy;
 
 	max_order = min_t(unsigned int, MAX_ORDER, pageblock_order + 1);
 
@@ -978,35 +1009,12 @@  static inline void __free_one_page(struct page *page,
 done_merging:
 	set_page_order(page, order);
 
-	/*
-	 * If this is not the largest possible page, check if the buddy
-	 * of the next-highest order is free. If it is, it's possible
-	 * that pages are being freed that will coalesce soon. In case,
-	 * that is happening, add the free page to the tail of the list
-	 * so it's less likely to be used soon and more likely to be merged
-	 * as a higher order page
-	 */
-	if ((order < MAX_ORDER-2) && pfn_valid_within(buddy_pfn)
-			&& !is_shuffle_order(order)) {
-		struct page *higher_page, *higher_buddy;
-		combined_pfn = buddy_pfn & pfn;
-		higher_page = page + (combined_pfn - pfn);
-		buddy_pfn = __find_buddy_pfn(combined_pfn, order + 1);
-		higher_buddy = higher_page + (buddy_pfn - combined_pfn);
-		if (pfn_valid_within(buddy_pfn) &&
-		    page_is_buddy(higher_page, higher_buddy, order + 1)) {
-			add_to_free_area_tail(page, &zone->free_area[order],
-					      migratetype);
-			return;
-		}
-	}
-
-	if (is_shuffle_order(order))
-		add_to_free_area_random(page, &zone->free_area[order],
-				migratetype);
+	area = &zone->free_area[order];
+	if (is_shuffle_order(order) ? shuffle_add_to_tail() :
+	    buddy_merge_likely(pfn, buddy_pfn, page, order))
+		add_to_free_area_tail(page, area, migratetype);
 	else
-		add_to_free_area(page, &zone->free_area[order], migratetype);
-
+		add_to_free_area(page, area, migratetype);
 }
 
 /*
diff --git a/mm/shuffle.c b/mm/shuffle.c
index 3ce12481b1dc..55d592e62526 100644
--- a/mm/shuffle.c
+++ b/mm/shuffle.c
@@ -4,7 +4,6 @@ 
 #include <linux/mm.h>
 #include <linux/init.h>
 #include <linux/mmzone.h>
-#include <linux/random.h>
 #include <linux/moduleparam.h>
 #include "internal.h"
 #include "shuffle.h"
@@ -182,26 +181,3 @@  void __meminit __shuffle_free_memory(pg_data_t *pgdat)
 	for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
 		shuffle_zone(z);
 }
-
-void add_to_free_area_random(struct page *page, struct free_area *area,
-		int migratetype)
-{
-	static u64 rand;
-	static u8 rand_bits;
-
-	/*
-	 * The lack of locking is deliberate. If 2 threads race to
-	 * update the rand state it just adds to the entropy.
-	 */
-	if (rand_bits == 0) {
-		rand_bits = 64;
-		rand = get_random_u64();
-	}
-
-	if (rand & 1)
-		add_to_free_area(page, area, migratetype);
-	else
-		add_to_free_area_tail(page, area, migratetype);
-	rand_bits--;
-	rand >>= 1;
-}
diff --git a/mm/shuffle.h b/mm/shuffle.h
index 777a257a0d2f..add763cc0995 100644
--- a/mm/shuffle.h
+++ b/mm/shuffle.h
@@ -3,6 +3,7 @@ 
 #ifndef _MM_SHUFFLE_H
 #define _MM_SHUFFLE_H
 #include <linux/jump_label.h>
+#include <linux/random.h>
 
 /*
  * SHUFFLE_ENABLE is called from the command line enabling path, or by
@@ -43,6 +44,32 @@  static inline bool is_shuffle_order(int order)
 		return false;
 	return order >= SHUFFLE_ORDER;
 }
+
+static inline bool shuffle_add_to_tail(void)
+{
+	static u64 rand;
+	static u8 rand_bits;
+	u64 rand_old;
+
+	/*
+	 * The lack of locking is deliberate. If 2 threads race to
+	 * update the rand state it just adds to the entropy.
+	 */
+	if (rand_bits-- == 0) {
+		rand_bits = 64;
+		rand = get_random_u64();
+	}
+
+	/*
+	 * Test highest order bit while shifting our random value. This
+	 * should result in us testing for the carry flag following the
+	 * shift.
+	 */
+	rand_old = rand;
+	rand <<= 1;
+
+	return rand < rand_old;
+}
 #else
 static inline void shuffle_free_memory(pg_data_t *pgdat)
 {
@@ -60,5 +87,10 @@  static inline bool is_shuffle_order(int order)
 {
 	return false;
 }
+
+static inline bool shuffle_add_to_tail(void)
+{
+	return false;
+}
 #endif
 #endif /* _MM_SHUFFLE_H */