@@ -168,7 +168,7 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd_base)
pte = pmd_pgtable(*pmd);
pmd_clear(pmd);
- pte_free(mm, pte);
+ pte_free(mm, page_ptdesc(pte));
mm_dec_nr_ptes(mm);
no_pmd:
pud_clear(pud);
@@ -39,9 +39,9 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
return get_pointer_table(TABLE_PTE);
}
-static inline void pte_free(struct mm_struct *mm, pgtable_t pgtable)
+static inline void pte_free(struct mm_struct *mm, struct ptdesc *ptdesc)
{
- free_pointer_table(pgtable, TABLE_PTE);
+ free_pointer_table(ptdesc_page(ptdesc), TABLE_PTE);
}
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pgtable,
@@ -162,7 +162,7 @@ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
}
static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
- pgtable_t pte_page)
+ struct ptdesc *pte_page)
{
*pmd = __pmd(__pgtable_ptr_val(pte_page) | PMD_VAL_BITS);
}
@@ -40,7 +40,7 @@ static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
pte_fragment_free((unsigned long *)pte, 1);
}
-static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
+static inline void pte_free(struct mm_struct *mm, struct ptdesc *ptepage)
{
pte_fragment_free((unsigned long *)ptepage, 0);
}
@@ -71,7 +71,7 @@ static inline void free_pte_fast(pte_t *pte)
#define pte_free_kernel(mm, pte) free_pte_fast(pte)
-void pte_free(struct mm_struct * mm, pgtable_t pte);
+void pte_free(struct mm_struct *mm, struct ptdesc *pte);
#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte)
#endif /* _SPARC_PGALLOC_H */
@@ -365,7 +365,7 @@ struct ptdesc *pte_alloc_one(struct mm_struct *mm)
return (struct ptdesc *)ptep;
}
-void pte_free(struct mm_struct *mm, pgtable_t ptep)
+void pte_free(struct mm_struct *mm, struct ptdesc *ptep)
{
struct page *page;
@@ -1246,7 +1246,7 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
fallback:
if (ptdesc)
- pte_free(vma->vm_mm, ptdesc_page(ptdesc));
+ pte_free(vma->vm_mm, ptdesc);
trace_dax_pmd_load_hole_fallback(inode, vmf, zero_folio, *entry);
return VM_FAULT_FALLBACK;
}
@@ -105,10 +105,8 @@ static inline struct ptdesc *pte_alloc_one_noprof(struct mm_struct *mm)
* @mm: the mm_struct of the current context
* @pte_page: the `struct page` referencing the ptdesc
*/
-static inline void pte_free(struct mm_struct *mm, struct page *pte_page)
+static inline void pte_free(struct mm_struct *mm, struct ptdesc *ptdesc)
{
- struct ptdesc *ptdesc = page_ptdesc(pte_page);
-
pagetable_pte_dtor(ptdesc);
pagetable_free(ptdesc);
}
@@ -1049,7 +1049,7 @@ static void __init destroy_args(struct pgtable_debug_args *args)
/* Free page table entries */
if (args->start_ptep) {
- pte_free(args->mm, args->start_ptep);
+ pte_free(args->mm, page_ptdesc(args->start_ptep));
mm_dec_nr_ptes(args->mm);
}
@@ -987,7 +987,7 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
if (userfaultfd_missing(vma)) {
spin_unlock(vmf->ptl);
folio_put(folio);
- pte_free(vma->vm_mm, ptdesc_page(ptdesc));
+ pte_free(vma->vm_mm, ptdesc);
ret = handle_userfault(vmf, VM_UFFD_MISSING);
VM_BUG_ON(ret & VM_FAULT_FALLBACK);
return ret;
@@ -1013,7 +1013,7 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
spin_unlock(vmf->ptl);
release:
if (ptdesc)
- pte_free(vma->vm_mm, ptdesc_page(ptdesc));
+ pte_free(vma->vm_mm, ptdesc);
folio_put(folio);
return ret;
@@ -1096,7 +1096,7 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
return VM_FAULT_OOM;
zero_folio = mm_get_huge_zero_folio(vma->vm_mm);
if (unlikely(!zero_folio)) {
- pte_free(vma->vm_mm, ptdesc_page(ptdesc));
+ pte_free(vma->vm_mm, ptdesc);
count_vm_event(THP_FAULT_FALLBACK);
return VM_FAULT_FALLBACK;
}
@@ -1106,10 +1106,10 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
ret = check_stable_address_space(vma->vm_mm);
if (ret) {
spin_unlock(vmf->ptl);
- pte_free(vma->vm_mm, ptdesc_page(ptdesc));
+ pte_free(vma->vm_mm, ptdesc);
} else if (userfaultfd_missing(vma)) {
spin_unlock(vmf->ptl);
- pte_free(vma->vm_mm, ptdesc_page(ptdesc));
+ pte_free(vma->vm_mm, ptdesc);
ret = handle_userfault(vmf, VM_UFFD_MISSING);
VM_BUG_ON(ret & VM_FAULT_FALLBACK);
} else {
@@ -1120,7 +1120,7 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
}
} else {
spin_unlock(vmf->ptl);
- pte_free(vma->vm_mm, ptdesc_page(ptdesc));
+ pte_free(vma->vm_mm, ptdesc);
}
return ret;
}
@@ -1178,7 +1178,7 @@ static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
out_unlock:
spin_unlock(ptl);
if (ptdesc)
- pte_free(mm, ptdesc_page(ptdesc));
+ pte_free(mm, ptdesc);
}
/**
@@ -1414,7 +1414,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
#endif
if (unlikely(!pmd_trans_huge(pmd))) {
- pte_free(dst_mm, ptdesc_page(ptdesc));
+ pte_free(dst_mm, ptdesc);
goto out_unlock;
}
/*
@@ -1440,7 +1440,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
if (unlikely(folio_try_dup_anon_rmap_pmd(src_folio, src_page, src_vma))) {
/* Page maybe pinned: split and retry the fault on PTEs. */
folio_put(src_folio);
- pte_free(dst_mm, ptdesc_page(ptdesc));
+ pte_free(dst_mm, ptdesc);
spin_unlock(src_ptl);
spin_unlock(dst_ptl);
__split_huge_pmd(src_vma, src_pmd, addr, false, NULL);
@@ -1830,7 +1830,7 @@ static inline void zap_deposited_table(struct mm_struct *mm, pmd_t *pmd)
pgtable_t pgtable;
pgtable = pgtable_trans_huge_withdraw(mm, pmd);
- pte_free(mm, pgtable);
+ pte_free(mm, page_ptdesc(pgtable));
mm_dec_nr_ptes(mm);
}
@@ -451,7 +451,7 @@ int __pte_alloc(struct mm_struct *mm, pmd_t *pmd)
pmd_install(mm, pmd, (pgtable_t *)&ptdesc);
if (ptdesc)
- pte_free(mm, ptdesc_page(ptdesc));
+ pte_free(mm, ptdesc);
return 0;
}
@@ -5196,7 +5196,7 @@ static vm_fault_t do_fault(struct vm_fault *vmf)
/* preallocated pagetable is unused: free it */
if (vmf->prealloc_pte) {
- pte_free(vm_mm, vmf->prealloc_pte);
+ pte_free(vm_mm, page_ptdesc(vmf->prealloc_pte));
vmf->prealloc_pte = NULL;
}
return ret;
@@ -241,7 +241,7 @@ static void pte_free_now(struct rcu_head *head)
struct ptdesc *ptdesc;
ptdesc = container_of(head, struct ptdesc, pt_rcu_head);
- pte_free(NULL /* mm not passed and not used */, (pgtable_t)ptdesc);
+ pte_free(NULL /* mm not passed and not used */, ptdesc);
}
void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable)