@@ -116,18 +116,6 @@ static inline void add_to_free_area_tail(struct page *page, struct free_area *ar
area->nr_free++;
}
-#ifdef CONFIG_SHUFFLE_PAGE_ALLOCATOR
-/* Used to preserve page allocation order entropy */
-void add_to_free_area_random(struct page *page, struct free_area *area,
- int migratetype);
-#else
-static inline void add_to_free_area_random(struct page *page,
- struct free_area *area, int migratetype)
-{
- add_to_free_area(page, area, migratetype);
-}
-#endif
-
/* Used for pages which are on another list */
static inline void move_to_free_area(struct page *page, struct free_area *area,
int migratetype)
@@ -878,6 +878,36 @@ static inline struct capture_control *task_capc(struct zone *zone)
#endif /* CONFIG_COMPACTION */
/*
+ * If this is not the largest possible page, check if the buddy
+ * of the next-highest order is free. If it is, it's possible
+ * that pages are being freed that will coalesce soon. In case,
+ * that is happening, add the free page to the tail of the list
+ * so it's less likely to be used soon and more likely to be merged
+ * as a higher order page
+ */
+static inline bool
+buddy_merge_likely(unsigned long pfn, unsigned long buddy_pfn,
+ struct page *page, unsigned int order)
+{
+ struct page *higher_page, *higher_buddy;
+ unsigned long combined_pfn;
+
+ if (order >= MAX_ORDER - 2)
+ return false;
+
+ if (!pfn_valid_within(buddy_pfn))
+ return false;
+
+ combined_pfn = buddy_pfn & pfn;
+ higher_page = page + (combined_pfn - pfn);
+ buddy_pfn = __find_buddy_pfn(combined_pfn, order + 1);
+ higher_buddy = higher_page + (buddy_pfn - combined_pfn);
+
+ return pfn_valid_within(buddy_pfn) &&
+ page_is_buddy(higher_page, higher_buddy, order + 1);
+}
+
+/*
* Freeing function for a buddy system allocator.
*
* The concept of a buddy system is to maintain direct-mapped table
@@ -906,11 +936,12 @@ static inline void __free_one_page(struct page *page,
struct zone *zone, unsigned int order,
int migratetype)
{
- unsigned long combined_pfn;
+ struct capture_control *capc = task_capc(zone);
unsigned long uninitialized_var(buddy_pfn);
- struct page *buddy;
+ unsigned long combined_pfn;
+ struct free_area *area;
unsigned int max_order;
- struct capture_control *capc = task_capc(zone);
+ struct page *buddy;
max_order = min_t(unsigned int, MAX_ORDER, pageblock_order + 1);
@@ -979,35 +1010,12 @@ static inline void __free_one_page(struct page *page,
done_merging:
set_page_order(page, order);
- /*
- * If this is not the largest possible page, check if the buddy
- * of the next-highest order is free. If it is, it's possible
- * that pages are being freed that will coalesce soon. In case,
- * that is happening, add the free page to the tail of the list
- * so it's less likely to be used soon and more likely to be merged
- * as a higher order page
- */
- if ((order < MAX_ORDER-2) && pfn_valid_within(buddy_pfn)
- && !is_shuffle_order(order)) {
- struct page *higher_page, *higher_buddy;
- combined_pfn = buddy_pfn & pfn;
- higher_page = page + (combined_pfn - pfn);
- buddy_pfn = __find_buddy_pfn(combined_pfn, order + 1);
- higher_buddy = higher_page + (buddy_pfn - combined_pfn);
- if (pfn_valid_within(buddy_pfn) &&
- page_is_buddy(higher_page, higher_buddy, order + 1)) {
- add_to_free_area_tail(page, &zone->free_area[order],
- migratetype);
- return;
- }
- }
-
- if (is_shuffle_order(order))
- add_to_free_area_random(page, &zone->free_area[order],
- migratetype);
+ area = &zone->free_area[order];
+ if (is_shuffle_order(order) ? shuffle_pick_tail() :
+ buddy_merge_likely(pfn, buddy_pfn, page, order))
+ add_to_free_area_tail(page, area, migratetype);
else
- add_to_free_area(page, &zone->free_area[order], migratetype);
-
+ add_to_free_area(page, area, migratetype);
}
/*
@@ -4,7 +4,6 @@
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/mmzone.h>
-#include <linux/random.h>
#include <linux/moduleparam.h>
#include "internal.h"
#include "shuffle.h"
@@ -190,8 +189,7 @@ struct batched_bit_entropy {
static DEFINE_PER_CPU(struct batched_bit_entropy, batched_entropy_bool);
-void add_to_free_area_random(struct page *page, struct free_area *area,
- int migratetype)
+bool __shuffle_pick_tail(void)
{
struct batched_bit_entropy *batch;
unsigned long entropy;
@@ -213,8 +211,5 @@ void add_to_free_area_random(struct page *page, struct free_area *area,
batch->position = position;
entropy = batch->entropy_bool;
- if (1ul & (entropy >> position))
- add_to_free_area(page, area, migratetype);
- else
- add_to_free_area_tail(page, area, migratetype);
+ return 1ul & (entropy >> position);
}
@@ -3,6 +3,7 @@
#ifndef _MM_SHUFFLE_H
#define _MM_SHUFFLE_H
#include <linux/jump_label.h>
+#include <linux/random.h>
/*
* SHUFFLE_ENABLE is called from the command line enabling path, or by
@@ -22,6 +23,7 @@ enum mm_shuffle_ctl {
DECLARE_STATIC_KEY_FALSE(page_alloc_shuffle_key);
extern void page_alloc_shuffle(enum mm_shuffle_ctl ctl);
extern void __shuffle_free_memory(pg_data_t *pgdat);
+extern bool __shuffle_pick_tail(void);
static inline void shuffle_free_memory(pg_data_t *pgdat)
{
if (!static_branch_unlikely(&page_alloc_shuffle_key))
@@ -43,6 +45,11 @@ static inline bool is_shuffle_order(int order)
return false;
return order >= SHUFFLE_ORDER;
}
+
+static inline bool shuffle_pick_tail(void)
+{
+ return __shuffle_pick_tail();
+}
#else
static inline void shuffle_free_memory(pg_data_t *pgdat)
{
@@ -60,5 +67,10 @@ static inline bool is_shuffle_order(int order)
{
return false;
}
+
+static inline bool shuffle_pick_tail(void)
+{
+ return false;
+}
#endif
#endif /* _MM_SHUFFLE_H */