@@ -29,7 +29,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
pte_t *pte;
unsigned long i;
- pte = (pte_t *) __get_free_page(GFP_KERNEL);
+ pte = __pte_alloc_one_kernel(mm);
if (!pte)
return NULL;
@@ -245,7 +245,7 @@ unsigned long iopa(unsigned long addr)
__ref pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
{
if (mem_init_done)
- return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
+ return __pte_alloc_one_kernel(mm);
else
return memblock_alloc_try_nid(PAGE_SIZE, PAGE_SIZE,
MEMBLOCK_LOW_LIMIT,
@@ -118,7 +118,7 @@ pte_t __ref *pte_alloc_one_kernel(struct mm_struct *mm)
pte_t *pte;
if (likely(mem_init_done)) {
- pte = (pte_t *)get_zeroed_page(GFP_KERNEL);
+ pte = __pte_alloc_one_kernel(mm);
} else {
pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
if (!pte)
@@ -851,6 +851,7 @@ int pud_free_pmd_page(pud_t *pud, unsigned long addr)
int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
{
pte_t *pte;
+ struct page *page;
pte = (pte_t *)pmd_page_vaddr(*pmd);
pmd_clear(pmd);
@@ -858,7 +859,9 @@ int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
/* INVLPG to clear all paging-structure caches */
flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1);
- free_page((unsigned long)pte);
+ page = virt_to_page(pte);
+ pgtable_page_dec(page);
+ __free_page(page);
return 1;
}
@@ -18,7 +18,14 @@
*/
static inline pte_t *__pte_alloc_one_kernel(struct mm_struct *mm)
{
- return (pte_t *)__get_free_page(GFP_PGTABLE_KERNEL);
+ struct page *page;
+ gfp_t gfp = GFP_PGTABLE_KERNEL;
+
+ page = alloc_pages(gfp, 0);
+ if (!page)
+ return NULL;
+ pgtable_page_inc(page);
+ return (pte_t *)page_address(page);
}
#ifndef __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL
@@ -41,7 +48,10 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
*/
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
- free_page((unsigned long)pte);
+ struct page *page = virt_to_page(pte);
+
+ pgtable_page_dec(page);
+ __free_page(page);
}
/**
Now the kernel PTE level ptes are always protected by mm->page_table_lock instead of split pagetable lock, so the kernel PTE level pagetable pages are not accounted. Especially the vmalloc()/vmap() can consume lots of kernel pagetable, so to get an accurate pagetable accounting, calling new helpers pgtable_page_inc()/pgtable_page_dec() when allocating or freeing a kernel PTE level pagetable page. Meanwhile converting architectures to use corresponding generic PTE pagetable allocation and freeing functions. Note this patch only adds accounting to the page tables allocated after boot. Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com> Reported-by: kernel test robot <oliver.sang@intel.com> --- arch/csky/include/asm/pgalloc.h | 2 +- arch/microblaze/mm/pgtable.c | 2 +- arch/openrisc/mm/ioremap.c | 2 +- arch/x86/mm/pgtable.c | 5 ++++- include/asm-generic/pgalloc.h | 14 ++++++++++++-- 5 files changed, 19 insertions(+), 6 deletions(-)