@@ -16,12 +16,17 @@
#define __HAVE_ARCH_PGD_FREE
#define __HAVE_ARCH_PUD_ALLOC_ONE
#define __HAVE_ARCH_PUD_FREE
+#define __HAVE_ARCH_PMD_ALLOC_ONE
+#define __HAVE_ARCH_PMD_FREE
#include <asm-generic/pgalloc.h>
#define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
#if CONFIG_PGTABLE_LEVELS > 2
+pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr);
+void pmd_free(struct mm_struct *mm, pmd_t *pmd);
+
static inline void __pud_populate(pud_t *pudp, phys_addr_t pmdp, pudval_t prot)
{
set_pud(pudp, __pud(__phys_to_pud_val(pmdp) | prot));
@@ -85,6 +85,8 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
{
struct page *page = virt_to_page(pmdp);
+ if (page_tables_are_ro())
+ set_pgtable_rw(pmdp);
pgtable_pmd_page_dtor(page);
tlb_remove_table(tlb, page);
}
@@ -1665,3 +1665,24 @@ void pud_free(struct mm_struct *mm, pud_t *pud)
free_page((u64)pud);
}
#endif
+
+#ifndef __PAGETABLE_PMD_FOLDED
+pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
+{
+ pmd_t *pmd = __pmd_alloc_one(mm, addr);
+
+ if (!pmd)
+ return NULL;
+ if (page_tables_are_ro())
+ set_pgtable_ro(pmd);
+ return pmd;
+}
+
+void pmd_free(struct mm_struct *mm, pmd_t *pmd)
+{
+ if (page_tables_are_ro())
+ set_pgtable_rw(pmd);
+ pgtable_pmd_page_dtor(virt_to_page(pmd));
+ free_page((u64)pmd);
+}
+#endif
PMD modifications all go through the fixmap update routine, so there is no longer a need to keep it mapped read/write in the linear region. Signed-off-by: Ard Biesheuvel <ardb@kernel.org> --- arch/arm64/include/asm/pgalloc.h | 5 +++++ arch/arm64/include/asm/tlb.h | 2 ++ arch/arm64/mm/mmu.c | 21 ++++++++++++++++++++ 3 files changed, 28 insertions(+)