@@ -195,6 +195,7 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
+ pte_t *pte;
pgd = pgd_offset(mm, addr);
pr_debug("%s: addr:0x%lx pgd:%p\n", __func__, addr, pgd);
@@ -202,19 +203,29 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
return NULL;
pud = pud_offset(pgd, addr);
- if (pud_none(*pud))
+ if (pud_none(*pud) && sz != PUD_SIZE)
return NULL;
/* swap or huge page */
if (!pud_present(*pud) || pud_huge(*pud))
return (pte_t *)pud;
/* table; check the next level */
+ if (sz == CONT_PMD_SIZE)
+ addr &= CONT_PMD_MASK;
+
pmd = pmd_offset(pud, addr);
- if (pmd_none(*pmd))
+ if (pmd_none(*pmd) &&
+ !(sz == PMD_SIZE || sz == CONT_PMD_SIZE))
return NULL;
if (!pmd_present(*pmd) || pmd_huge(*pmd))
return (pte_t *)pmd;
+ if (sz == CONT_PTE_SIZE) {
+ pte = pte_offset_kernel(
+ pmd, (addr & CONT_PTE_MASK));
+ return pte;
+ }
+
return NULL;
}
huge_pte_offset() was updated to correctly handle swap entries for hugepages. With the addition of the size parameter, it is now possible to disambiguate whether the request is for a regular hugepage or a contiguous hugepage. Fix huge_pte_offset() for contiguous hugepages by using the size to find the correct page table entry. Signed-off-by: Punit Agrawal <punit.agrawal@arm.com> Cc: David Woods <dwoods@mellanox.com> --- arch/arm64/mm/hugetlbpage.c | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-)