@@ -18,10 +18,15 @@
#define __HAVE_ARCH_PUD_FREE
#define __HAVE_ARCH_PMD_ALLOC_ONE
#define __HAVE_ARCH_PMD_FREE
+#define __HAVE_ARCH_PTE_ALLOC_ONE
+#define __HAVE_ARCH_PTE_FREE
#include <asm-generic/pgalloc.h>
#define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
+pgtable_t pte_alloc_one(struct mm_struct *mm);
+void pte_free(struct mm_struct *mm, struct page *pte_page);
+
#if CONFIG_PGTABLE_LEVELS > 2
pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr);
@@ -75,6 +75,8 @@ static inline void tlb_flush(struct mmu_gather *tlb)
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
unsigned long addr)
{
+ if (page_tables_are_ro())
+ set_pgtable_rw(page_address(pte));
pgtable_pte_page_dtor(pte);
tlb_remove_table(tlb, pte);
}
@@ -1686,3 +1686,26 @@ void pmd_free(struct mm_struct *mm, pmd_t *pmd)
free_page((u64)pmd);
}
#endif
+
+pgtable_t pte_alloc_one(struct mm_struct *mm)
+{
+ pgtable_t pgt = __pte_alloc_one(mm, GFP_PGTABLE_USER);
+
+ VM_BUG_ON(mm == &init_mm);
+
+ if (!pgt)
+ return NULL;
+ if (page_tables_are_ro())
+ set_pgtable_ro(page_address(pgt));
+ return pgt;
+}
+
+void pte_free(struct mm_struct *mm, struct page *pte_page)
+{
+ VM_BUG_ON(mm == &init_mm);
+
+ if (page_tables_are_ro())
+ set_pgtable_rw(page_address(pte_page));
+ pgtable_pte_page_dtor(pte_page);
+ __free_page(pte_page);
+}
Now that all PTE manipulations for user space tables go via the fixmap, we can remap these tables read-only in the linear region so they cannot be corrupted inadvertently. Signed-off-by: Ard Biesheuvel <ardb@kernel.org> --- arch/arm64/include/asm/pgalloc.h | 5 +++++ arch/arm64/include/asm/tlb.h | 2 ++ arch/arm64/mm/mmu.c | 23 ++++++++++++++++++++ 3 files changed, 30 insertions(+)