@@ -687,6 +687,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) & ~_PAGE_PTE })
#define __swp_entry_to_pte(x) __pte((x).val | _PAGE_PTE)
#define __pmd_to_swp_entry(pmd) (__pte_to_swp_entry(pmd_pte(pmd)))
+#define __pud_to_swp_entry(pud) (__pte_to_swp_entry(pud_pte(pud)))
#define __swp_entry_to_pmd(x) (pte_pmd(__swp_entry_to_pte(x)))
#ifdef CONFIG_MEM_SOFT_DIRTY
@@ -2371,6 +2371,10 @@ struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma,
unsigned long addr, pmd_t pmd);
struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
pmd_t pmd);
+struct folio *vm_normal_folio_pud(struct vm_area_struct *vma,
+ unsigned long addr, pud_t pud);
+struct page *vm_normal_page_pud(struct vm_area_struct *vma, unsigned long addr,
+ pud_t pud);
void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
unsigned long size);
@@ -1990,4 +1990,10 @@ pgprot_t vm_get_page_prot(unsigned long vm_flags) \
} \
EXPORT_SYMBOL(vm_get_page_prot);
+#ifdef CONFIG_HUGETLB_PAGE
+#ifndef __pud_to_swp_entry
+#define __pud_to_swp_entry(pud) ((swp_entry_t) { pud_val(pud) })
+#endif
+#endif
+
#endif /* _LINUX_PGTABLE_H */
@@ -126,6 +126,21 @@ static inline int is_swap_pte(pte_t pte)
return !pte_none(pte) && !pte_present(pte);
}
+#ifdef CONFIG_HUGETLB_PAGE
+static inline int is_swap_pud(pud_t pud)
+{
+ return !pud_none(pud) && !pud_present(pud);
+}
+
+static inline swp_entry_t pud_to_swp_entry(pud_t pud)
+{
+ swp_entry_t arch_entry;
+
+ arch_entry = __pud_to_swp_entry(pud);
+ return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
+}
+#endif
+
/*
* Convert the arch-dependent pte representation of a swp_entry_t into an
* arch-independent swp_entry_t.
@@ -718,6 +718,57 @@ struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma,
}
#endif
+#ifdef CONFIG_HUGETLB_PAGE
+struct page *vm_normal_page_pud(struct vm_area_struct *vma, unsigned long addr,
+ pud_t pud)
+{
+ unsigned long pfn = pud_pfn(pud);
+
+ /*
+ * There is no pmd_special() but there may be special pmds, e.g.
+ * in a direct-access (dax) mapping, so let's just replicate the
+ * !CONFIG_ARCH_HAS_PTE_SPECIAL case from vm_normal_page() here.
+ */
+ if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
+ if (vma->vm_flags & VM_MIXEDMAP) {
+ if (!pfn_valid(pfn))
+ return NULL;
+ goto out;
+ } else {
+ unsigned long off;
+
+ off = (addr - vma->vm_start) >> PAGE_SHIFT;
+ if (pfn == vma->vm_pgoff + off)
+ return NULL;
+ if (!is_cow_mapping(vma->vm_flags))
+ return NULL;
+ }
+ }
+
+ if (pud_devmap(pud))
+ return NULL;
+ if (unlikely(pfn > highest_memmap_pfn))
+ return NULL;
+
+ /*
+ * NOTE! We still have PageReserved() pages in the page tables.
+ * eg. VDSO mappings can cause them to exist.
+ */
+out:
+ return pfn_to_page(pfn);
+}
+
+struct folio *vm_normal_folio_pud(struct vm_area_struct *vma,
+ unsigned long addr, pud_t pud)
+{
+ struct page *page = vm_normal_page_pud(vma, addr, pud);
+
+ if (page)
+ return page_folio(page);
+ return NULL;
+}
+#endif
+
static void restore_exclusive_pte(struct vm_area_struct *vma,
struct page *page, unsigned long address,
pte_t *ptep)
HugeTLB pages will be handled on pud level as well, so we need to implement pud-version of vm_normal_page_pud and swp-pud functions. Signed-off-by: Oscar Salvador <osalvador@suse.de> --- arch/powerpc/include/asm/book3s/64/pgtable.h | 1 + include/linux/mm.h | 4 ++ include/linux/pgtable.h | 6 +++ include/linux/swapops.h | 15 ++++++ mm/memory.c | 51 ++++++++++++++++++++ 5 files changed, 77 insertions(+)