@@ -20,6 +20,9 @@
#define __HAVE_ARCH_PMD_FREE
#define __HAVE_ARCH_PTE_ALLOC_ONE
#define __HAVE_ARCH_PTE_FREE
+#define __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL
+#define __HAVE_ARCH_PTE_FREE_KERNEL
+
#include <asm-generic/pgalloc.h>
#define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
@@ -27,6 +30,9 @@
pgtable_t pte_alloc_one(struct mm_struct *mm);
void pte_free(struct mm_struct *mm, struct page *pte_page);
+pte_t *pte_alloc_one_kernel(struct mm_struct *mm);
+void pte_free_kernel(struct mm_struct *mm, pte_t *pte);
+
#if CONFIG_PGTABLE_LEVELS > 2
pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr);
@@ -1402,7 +1402,7 @@ int pmd_free_pte_page(pmd_t *pmdp, unsigned long addr)
table = pte_offset_kernel(pmdp, addr);
pmd_clear(pmdp);
__flush_tlb_kernel_pgtable(addr);
- pte_free_kernel(NULL, table);
+ pte_free_kernel(&init_mm, table);
return 1;
}
@@ -1709,3 +1709,25 @@ void pte_free(struct mm_struct *mm, struct page *pte_page)
pgtable_pte_page_dtor(pte_page);
__free_page(pte_page);
}
+
+pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
+{
+ pte_t *pte = __pte_alloc_one_kernel(mm);
+
+ VM_BUG_ON(mm != &init_mm);
+
+ if (!pte)
+ return NULL;
+ if (page_tables_are_ro())
+ set_pgtable_ro(pte);
+ return pte;
+}
+
+void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+{
+ VM_BUG_ON(mm != &init_mm);
+
+ if (page_tables_are_ro())
+ set_pgtable_rw(pte);
+ free_page((u64)pte);
+}
Now that all kernel page table manipulations are routed through the fixmap API if r/o page tables are enabled, we can remove write access from the linear mapping of those pages. Signed-off-by: Ard Biesheuvel <ardb@kernel.org> --- arch/arm64/include/asm/pgalloc.h | 6 +++++ arch/arm64/mm/mmu.c | 24 +++++++++++++++++++- 2 files changed, 29 insertions(+), 1 deletion(-)