@@ -1553,6 +1553,68 @@ static int pagemap_pmd_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr,
return err;
}
+static int pagemap_pud_range(pud_t pud, pud_t *pudp, unsigned long addr,
+ unsigned long end, struct mm_walk *walk)
+{
+ struct vm_area_struct *vma = walk->vma;
+ struct pagemapread *pm = walk->private;
+ spinlock_t *ptl;
+ int err = 0;
+
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+ ptl = pud_trans_huge_lock(pudp, vma);
+ if (ptl) {
+ u64 flags = 0, frame = 0;
+ struct page *page = NULL;
+
+ if (memcmp(pudp, &pud, sizeof(pud)) != 0) {
+ walk->action = ACTION_AGAIN;
+ spin_unlock(ptl);
+ return 0;
+ }
+ if (vma->vm_flags & VM_SOFTDIRTY)
+ flags |= PM_SOFT_DIRTY;
+
+ if (pud_present(pud)) {
+ page = pud_page(pud);
+
+ flags |= PM_PRESENT;
+ if (pud_soft_dirty(pud))
+ flags |= PM_SOFT_DIRTY;
+ if (pm->show_pfn)
+ frame = pud_pfn(pud) +
+ ((addr & ~PUD_MASK) >> PAGE_SHIFT);
+ }
+
+ if (page && page_mapcount(page) == 1)
+ flags |= PM_MMAP_EXCLUSIVE;
+
+ for (; addr != end; addr += PAGE_SIZE) {
+ pagemap_entry_t pme = make_pme(frame, flags);
+
+ err = add_to_pagemap(addr, &pme, pm);
+ if (err)
+ break;
+ if (pm->show_pfn) {
+ if (flags & PM_PRESENT)
+ frame++;
+ else if (flags & PM_SWAP)
+ frame += (1 << MAX_SWAPFILES_SHIFT);
+ }
+ }
+ spin_unlock(ptl);
+ walk->action = ACTION_CONTINUE;
+ return err;
+ }
+
+ if (pud_trans_unstable(&pud)) {
+ walk->action = ACTION_AGAIN;
+ return 0;
+ }
+#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
+ return err;
+}
+
#ifdef CONFIG_HUGETLB_PAGE
/* This function walks within one hugetlb entry in the single call */
static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask,
@@ -1603,6 +1665,7 @@ static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask,
#endif /* HUGETLB_PAGE */
static const struct mm_walk_ops pagemap_ops = {
+ .pud_entry = pagemap_pud_range,
.pmd_entry = pagemap_pmd_range,
.pte_hole = pagemap_pte_hole,
.hugetlb_entry = pagemap_hugetlb_range,