@@ -47,7 +47,7 @@ static inline void pte_free(struct mm_struct *mm, struct ptdesc *ptepage)
/* arch use pte_free_defer() implementation in arch/powerpc/mm/pgtable-frag.c */
#define pte_free_defer pte_free_defer
-void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable);
+void pte_free_defer(struct mm_struct *mm, struct ptdesc *pgtable);
/*
* Functions that deal with pagetables that could be at any level of
@@ -144,7 +144,7 @@ static inline void pmd_populate(struct mm_struct *mm,
/* arch use pte_free_defer() implementation in arch/s390/mm/pgalloc.c */
#define pte_free_defer pte_free_defer
-void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable);
+void pte_free_defer(struct mm_struct *mm, struct ptdesc *pgtable);
void vmem_map_init(void);
void *vmem_crst_alloc(unsigned long val);
@@ -214,7 +214,7 @@ static void pte_free_now(struct rcu_head *head)
pagetable_pte_dtor_free(ptdesc);
}
-void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable)
+void pte_free_defer(struct mm_struct *mm, struct ptdesc *pgtable)
{
struct ptdesc *ptdesc = virt_to_ptdesc(pgtable);
@@ -116,7 +116,7 @@ static inline void pte_unmap(pte_t *pte)
}
#endif
-void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable);
+void pte_free_defer(struct mm_struct *mm, struct ptdesc *ptdesc);
/* Find an entry in the second-level page table.. */
#ifndef pmd_offset
@@ -1094,7 +1094,7 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
LIST_HEAD(compound_pagelist);
pmd_t *pmd, _pmd;
pte_t *pte;
- pgtable_t pgtable;
+ struct ptdesc *ptdesc;
struct folio *folio;
spinlock_t *pmd_ptl, *pte_ptl;
int result = SCAN_FAIL;
@@ -1223,7 +1223,7 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
* write.
*/
__folio_mark_uptodate(folio);
- pgtable = pmd_pgtable(_pmd);
+ ptdesc = pmd_ptdesc(&_pmd);
_pmd = mk_huge_pmd(&folio->page, vma->vm_page_prot);
_pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
@@ -1232,7 +1232,7 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
BUG_ON(!pmd_none(*pmd));
folio_add_new_anon_rmap(folio, vma, address, RMAP_EXCLUSIVE);
folio_add_lru_vma(folio, vma);
- pgtable_trans_huge_deposit(mm, pmd, page_ptdesc(pgtable));
+ pgtable_trans_huge_deposit(mm, pmd, ptdesc);
set_pmd_at(mm, address, pmd, _pmd);
update_mmu_cache_pmd(vma, address, pmd);
spin_unlock(pmd_ptl);
@@ -1664,7 +1664,7 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
mm_dec_nr_ptes(mm);
page_table_check_pte_clear_range(mm, haddr, pgt_pmd);
- pte_free_defer(mm, pmd_pgtable(pgt_pmd));
+ pte_free_defer(mm, pmd_ptdesc(&pgt_pmd));
maybe_install_pmd:
/* step 5: install pmd entry */
@@ -1777,7 +1777,7 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
if (retracted) {
mm_dec_nr_ptes(mm);
page_table_check_pte_clear_range(mm, addr, pgt_pmd);
- pte_free_defer(mm, pmd_pgtable(pgt_pmd));
+ pte_free_defer(mm, pmd_ptdesc(&pgt_pmd));
}
}
i_mmap_unlock_read(mapping);
@@ -244,10 +244,8 @@ static void pte_free_now(struct rcu_head *head)
pte_free(NULL /* mm not passed and not used */, ptdesc);
}
-void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable)
+void pte_free_defer(struct mm_struct *mm, struct ptdesc *ptdesc)
{
- struct ptdesc *ptdesc = page_ptdesc(pgtable);
-
call_rcu(&ptdesc->pt_rcu_head, pte_free_now);
}
#endif /* pte_free_defer */