@@ -48,15 +48,15 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
{
pmd_t *pmd;
- pmd = (pmd_t *)__get_free_pages(GFP_PGTABLE_KERNEL, PMD_ORDER);
+ pmd = (pmd_t *)__get_free_pages(GFP_PGTABLE_KERNEL, PMD_TABLE_ORDER);
if (likely(pmd))
- memset ((void *)pmd, 0, PAGE_SIZE << PMD_ORDER);
+ memset ((void *)pmd, 0, PAGE_SIZE << PMD_TABLE_ORDER);
return pmd;
}
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
{
- free_pages((unsigned long)pmd, PMD_ORDER);
+ free_pages((unsigned long)pmd, PMD_TABLE_ORDER);
}
#endif
@@ -112,7 +112,7 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
#define KERNEL_INITIAL_SIZE (1 << KERNEL_INITIAL_ORDER)
#if CONFIG_PGTABLE_LEVELS == 3
-#define PMD_ORDER 1
+#define PMD_TABLE_ORDER 1
#define PGD_ORDER 0
#else
#define PGD_ORDER 1
@@ -131,7 +131,7 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
#define PMD_SHIFT (PLD_SHIFT + BITS_PER_PTE)
#define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
-#define BITS_PER_PMD (PAGE_SHIFT + PMD_ORDER - BITS_PER_PMD_ENTRY)
+#define BITS_PER_PMD (PAGE_SHIFT + PMD_TABLE_ORDER - BITS_PER_PMD_ENTRY)
#define PTRS_PER_PMD (1UL << BITS_PER_PMD)
#else
#define BITS_PER_PMD 0
@@ -378,8 +378,8 @@ static void __init map_pages(unsigned long start_vaddr,
#if CONFIG_PGTABLE_LEVELS == 3
if (pud_none(*pud)) {
- pmd = memblock_alloc(PAGE_SIZE << PMD_ORDER,
- PAGE_SIZE << PMD_ORDER);
+ pmd = memblock_alloc(PAGE_SIZE << PMD_TABLE_ORDER,
+ PAGE_SIZE << PMD_TABLE_ORDER);
if (!pmd)
panic("pmd allocation failed.\n");
pud_populate(NULL, pud, pmd);
This is the order of the page table allocation, not the order of a PMD. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> --- arch/parisc/include/asm/pgalloc.h | 6 +++--- arch/parisc/include/asm/pgtable.h | 4 ++-- arch/parisc/mm/init.c | 4 ++-- 3 files changed, 7 insertions(+), 7 deletions(-)