===8<===
arch/arm64/include/asm/pgtable-prot.h | 1 +
arch/arm64/mm/mmu.c | 36 +++++++++++++++++++++++++----------
2 files changed, 27 insertions(+), 10 deletions(-)
@@ -54,6 +54,7 @@
#define PAGE_KERNEL_ROX __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
#define PAGE_KERNEL_EXEC __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE)
#define PAGE_KERNEL_EXEC_CONT __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT)
+#define PAGE_KERNEL_INVALID __pgprot(0)
#define PAGE_HYP __pgprot(_PAGE_DEFAULT | PTE_HYP | PTE_HYP_XN)
#define PAGE_HYP_EXEC __pgprot(_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY)
@@ -140,7 +140,11 @@ static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
__prot = prot;
}
- set_pte(pte, pfn_pte(pfn, __prot));
+ if (pgprot_val(prot) & PTE_VALID)
+ set_pte(pte, pfn_pte(pfn, __prot));
+ else
+ pte_clear(null, null, pte);
+
pfn++;
/*
@@ -185,7 +189,8 @@ static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
/* try section mapping first */
if (((addr | next | phys) & ~SECTION_MASK) == 0 &&
- !page_mappings_only) {
+ !page_mappings_only &&
+ (pmd_none(old_pmd) || pmd_sect(old_pmd))) {
/*
* Set the contiguous bit for the subsequent group of
* PMDs if its size and alignment are appropriate.
@@ -256,7 +261,8 @@ static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
/*
* For 4K granule only, attempt to put down a 1GB block
*/
- if (use_1G_block(addr, next, phys) && !page_mappings_only) {
+ if (use_1G_block(addr, next, phys) && !page_mappings_only &&
+ (pud_none(old_pud) || pud_sect(old_pud))) {
pud_set_huge(pud, phys, prot);
/*
@@ -334,12 +340,10 @@ static void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt,
__create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL, false);
}
-void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
- unsigned long virt, phys_addr_t size,
- pgprot_t prot, bool page_mappings_only)
+void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
+ unsigned long virt, phys_addr_t size,
+ pgprot_t prot, bool page_mappings_only)
{
- BUG_ON(mm == &init_mm);
-
__create_pgd_mapping(mm->pgd, phys, virt, size, prot,
pgd_pgtable_alloc, page_mappings_only);
}
@@ -791,14 +795,26 @@ int __init arch_ioremap_pmd_supported(void)
int pud_set_huge(pud_t *pud, phys_addr_t phys, pgprot_t prot)
{
BUG_ON(phys & ~PUD_MASK);
- set_pud(pud, __pud(phys | PUD_TYPE_SECT | pgprot_val(mk_sect_prot(prot))));
+
+ if (pgprot_val(prot) & PTE_VALID)
+ set_pud(pud, __pud(phys | PUD_TYPE_SECT |
+ pgprot_val(mk_sect_prot(prot))));
+ else
+ pud_clear(pud);
+
return 1;
}
int pmd_set_huge(pmd_t *pmd, phys_addr_t phys, pgprot_t prot)
{
BUG_ON(phys & ~PMD_MASK);
- set_pmd(pmd, __pmd(phys | PMD_TYPE_SECT | pgprot_val(mk_sect_prot(prot))));
+
+ if (pgprot_val(prot) & PTE_VALID)
+ set_pmd(pmd, __pmd(phys | PMD_TYPE_SECT |
+ pgprot_val(mk_sect_prot(prot))));
+ else
+ pmd_clear(pmd);
+
return 1;
}
===>8===