@@ -16,7 +16,7 @@ struct hyp_pool {
* API at EL2.
*/
hyp_spinlock_t lock;
- struct list_head free_area[MAX_ORDER + 1];
+ struct list_head free_area[MIN_MAX_ORDER + 1];
phys_addr_t range_start;
phys_addr_t range_end;
unsigned short max_order;
@@ -241,7 +241,7 @@ int hyp_pool_init(struct hyp_pool *pool, u64 pfn, unsigned int nr_pages,
int i;
hyp_spin_lock_init(&pool->lock);
- pool->max_order = min(MAX_ORDER, get_order((nr_pages + 1) << PAGE_SHIFT));
+ pool->max_order = min(MIN_MAX_ORDER, get_order((nr_pages + 1) << PAGE_SHIFT));
for (i = 0; i < pool->max_order; i++)
INIT_LIST_HEAD(&pool->free_area[i]);
pool->range_start = phys;
@@ -26,10 +26,13 @@
/* Free memory management - zoned buddy allocator. */
#ifdef CONFIG_SET_MAX_ORDER
#define MAX_ORDER CONFIG_SET_MAX_ORDER
+#define MIN_MAX_ORDER CONFIG_SET_MAX_ORDER
#elif CONFIG_ARCH_FORCE_MAX_ORDER != 0
#define MAX_ORDER CONFIG_ARCH_FORCE_MAX_ORDER
+#define MIN_MAX_ORDER CONFIG_ARCH_FORCE_MAX_ORDER
#else
#define MAX_ORDER 10
+#define MIN_MAX_ORDER MAX_ORDER
#endif
#define MAX_ORDER_NR_PAGES (1 << MAX_ORDER)
@@ -41,7 +41,7 @@ extern unsigned int pageblock_order;
* Huge pages are a constant size, but don't exceed the maximum allocation
* granularity.
*/
-#define pageblock_order min_t(unsigned int, HUGETLB_PAGE_ORDER, MAX_ORDER)
+#define pageblock_order min_t(unsigned int, HUGETLB_PAGE_ORDER, MIN_MAX_ORDER)
#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
@@ -51,7 +51,7 @@ extern unsigned int pageblock_order;
* If huge pages are not used, group by MAX_ORDER_NR_PAGES or
* PAGES_PER_SECTION when MAX_ORDER_NR_PAGES is larger.
*/
-#define pageblock_order (min(PFN_SECTION_SHIFT, MAX_ORDER))
+#define pageblock_order (min(PFN_SECTION_SHIFT, MIN_MAX_ORDER))
#endif /* CONFIG_HUGETLB_PAGE */
@@ -257,8 +257,8 @@ static inline unsigned int arch_slab_minalign(void)
* to do various tricks to work around compiler limitations in order to
* ensure proper constant folding.
*/
-#define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT) <= 25 ? \
- (MAX_ORDER + PAGE_SHIFT) : 25)
+#define KMALLOC_SHIFT_HIGH ((MIN_MAX_ORDER + PAGE_SHIFT) <= 25 ? \
+ (MIN_MAX_ORDER + PAGE_SHIFT) : 25)
#define KMALLOC_SHIFT_MAX KMALLOC_SHIFT_HIGH
#ifndef KMALLOC_SHIFT_LOW
#define KMALLOC_SHIFT_LOW 5
@@ -271,7 +271,7 @@ static inline unsigned int arch_slab_minalign(void)
* (PAGE_SIZE*2). Larger requests are passed to the page allocator.
*/
#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
-#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT)
+#define KMALLOC_SHIFT_MAX (MIN_MAX_ORDER + PAGE_SHIFT)
#ifndef KMALLOC_SHIFT_LOW
#define KMALLOC_SHIFT_LOW 3
#endif
@@ -284,7 +284,7 @@ static inline unsigned int arch_slab_minalign(void)
* be allocated from the same page.
*/
#define KMALLOC_SHIFT_HIGH PAGE_SHIFT
-#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT)
+#define KMALLOC_SHIFT_MAX (MIN_MAX_ORDER + PAGE_SHIFT)
#ifndef KMALLOC_SHIFT_LOW
#define KMALLOC_SHIFT_LOW 3
#endif
@@ -466,7 +466,7 @@ static int __init slab_max_order_setup(char *str)
{
get_option(&str, &slab_max_order);
slab_max_order = slab_max_order < 0 ? 0 :
- min(slab_max_order, MAX_ORDER);
+ min(slab_max_order, MIN_MAX_ORDER);
slab_max_order_set = true;
return 1;
@@ -3891,8 +3891,8 @@ static inline int calculate_order(unsigned int size)
/*
* Doh this slab cannot be placed using slub_max_order.
*/
- order = calc_slab_order(size, 1, MAX_ORDER, 1);
- if (order <= MAX_ORDER)
+ order = calc_slab_order(size, 1, MIN_MAX_ORDER, 1);
+ if (order <= MIN_MAX_ORDER)
return order;
return -ENOSYS;
}
@@ -4403,7 +4403,7 @@ __setup("slub_min_order=", setup_slub_min_order);
static int __init setup_slub_max_order(char *str)
{
get_option(&str, (int *)&slub_max_order);
- slub_max_order = min_t(unsigned int, slub_max_order, MAX_ORDER);
+ slub_max_order = min_t(unsigned int, slub_max_order, MIN_MAX_ORDER);
return 1;
}