@@ -91,16 +91,15 @@ pte_alloc_one_kernel(struct mm_struct *mm)
#define PGTABLE_HIGHMEM 0
#endif
-static inline pgtable_t
-pte_alloc_one(struct mm_struct *mm)
+static inline struct ptdesc *pte_alloc_one(struct mm_struct *mm)
{
- struct page *pte;
+ struct ptdesc *pte;
pte = __pte_alloc_one(mm, GFP_PGTABLE_USER | PGTABLE_HIGHMEM);
if (!pte)
return NULL;
- if (!PageHighMem(pte))
- clean_pte_table(page_address(pte));
+ if (!PageHighMem(ptdesc_page(pte)))
+ clean_pte_table(ptdesc_address(pte));
return pte;
}
@@ -27,9 +27,9 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
return (pte_t *)pte_fragment_alloc(mm, 1);
}
-static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
+static inline struct ptdesc *pte_alloc_one(struct mm_struct *mm)
{
- return (pgtable_t)pte_fragment_alloc(mm, 0);
+ return (struct ptdesc *)pte_fragment_alloc(mm, 0);
}
void pte_frag_destroy(void *pte_frag);
@@ -137,7 +137,7 @@ static inline void pmd_populate(struct mm_struct *mm,
* page table entry allocation/free routines.
*/
#define pte_alloc_one_kernel(mm) ((pte_t *)page_table_alloc(mm))
-#define pte_alloc_one(mm) ((pte_t *)page_table_alloc(mm))
+#define pte_alloc_one(mm) ((struct ptdesc *)page_table_alloc(mm))
#define pte_free_kernel(mm, pte) page_table_free(mm, (unsigned long *) pte)
#define pte_free(mm, pte) page_table_free(mm, (unsigned long *) pte)
@@ -55,7 +55,7 @@ static inline void free_pmd_fast(pmd_t * pmd)
void pmd_set(pmd_t *pmdp, pte_t *ptep);
#define pmd_populate_kernel pmd_populate
-pgtable_t pte_alloc_one(struct mm_struct *mm);
+struct ptdesc *pte_alloc_one(struct mm_struct *mm);
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
{
@@ -61,7 +61,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
}
pte_t *pte_alloc_one_kernel(struct mm_struct *mm);
-pgtable_t pte_alloc_one(struct mm_struct *mm);
+struct ptdesc *pte_alloc_one(struct mm_struct *mm);
void pte_free_kernel(struct mm_struct *mm, pte_t *pte);
void pte_free(struct mm_struct *mm, pgtable_t ptepage);
@@ -2900,7 +2900,7 @@ pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
return pte;
}
-pgtable_t pte_alloc_one(struct mm_struct *mm)
+struct ptdesc *pte_alloc_one(struct mm_struct *mm)
{
struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL | __GFP_ZERO, 0);
@@ -346,7 +346,7 @@ pgd_t *get_pgd_fast(void)
* Alignments up to the page size are the same for physical and virtual
* addresses of the nocache area.
*/
-pgtable_t pte_alloc_one(struct mm_struct *mm)
+struct ptdesc *pte_alloc_one(struct mm_struct *mm)
{
pte_t *ptep;
struct page *page;
@@ -362,7 +362,7 @@ pgtable_t pte_alloc_one(struct mm_struct *mm)
}
spin_unlock(&mm->page_table_lock);
- return ptep;
+ return (struct ptdesc *)ptep;
}
void pte_free(struct mm_struct *mm, pgtable_t ptep)
@@ -51,7 +51,7 @@ extern gfp_t __userpte_alloc_gfp;
extern pgd_t *pgd_alloc(struct mm_struct *);
extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
-extern pgtable_t pte_alloc_one(struct mm_struct *);
+extern struct ptdesc *pte_alloc_one(struct mm_struct *);
extern void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte);
@@ -28,7 +28,7 @@ void paravirt_tlb_remove_table(struct mmu_gather *tlb, void *table)
gfp_t __userpte_alloc_gfp = GFP_PGTABLE_USER | PGTABLE_HIGHMEM;
-pgtable_t pte_alloc_one(struct mm_struct *mm)
+struct ptdesc *pte_alloc_one(struct mm_struct *mm)
{
return __pte_alloc_one(mm, __userpte_alloc_gfp);
}
@@ -51,15 +51,15 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
return ptep;
}
-static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
+static inline struct ptdesc *pte_alloc_one(struct mm_struct *mm)
{
- struct page *page;
+ struct ptdesc *ptdesc;
- page = __pte_alloc_one(mm, GFP_PGTABLE_USER);
- if (!page)
+ ptdesc = __pte_alloc_one(mm, GFP_PGTABLE_USER);
+ if (!ptdesc)
return NULL;
- ptes_clear(page_address(page));
- return page;
+ ptes_clear(ptdesc_address(ptdesc));
+ return ptdesc;
}
#endif /* CONFIG_MMU */
@@ -1222,7 +1222,7 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
DAX_PMD | DAX_ZERO_PAGE);
if (arch_needs_pgtable_deposit()) {
- ptdesc = page_ptdesc(pte_alloc_one(vma->vm_mm));
+ ptdesc = pte_alloc_one(vma->vm_mm);
if (!ptdesc)
return VM_FAULT_OOM;
}
@@ -63,7 +63,7 @@ static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
*
* Return: `struct page` referencing the ptdesc or %NULL on error
*/
-static inline pgtable_t __pte_alloc_one_noprof(struct mm_struct *mm, gfp_t gfp)
+static inline struct ptdesc *__pte_alloc_one_noprof(struct mm_struct *mm, gfp_t gfp)
{
struct ptdesc *ptdesc;
@@ -75,7 +75,7 @@ static inline pgtable_t __pte_alloc_one_noprof(struct mm_struct *mm, gfp_t gfp)
return NULL;
}
- return ptdesc_page(ptdesc);
+ return ptdesc;
}
#define __pte_alloc_one(...) alloc_hooks(__pte_alloc_one_noprof(__VA_ARGS__))
@@ -88,7 +88,7 @@ static inline pgtable_t __pte_alloc_one_noprof(struct mm_struct *mm, gfp_t gfp)
*
* Return: `struct page` referencing the ptdesc or %NULL on error
*/
-static inline pgtable_t pte_alloc_one_noprof(struct mm_struct *mm)
+static inline struct ptdesc *pte_alloc_one_noprof(struct mm_struct *mm)
{
return __pte_alloc_one_noprof(mm, GFP_PGTABLE_USER);
}
@@ -959,7 +959,7 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
}
folio_throttle_swaprate(folio, gfp);
- ptdesc = page_ptdesc(pte_alloc_one(vma->vm_mm));
+ ptdesc = pte_alloc_one(vma->vm_mm);
if (unlikely(!ptdesc)) {
ret = VM_FAULT_OOM;
goto release;
@@ -1091,7 +1091,7 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
struct folio *zero_folio;
vm_fault_t ret;
- ptdesc = page_ptdesc(pte_alloc_one(vma->vm_mm));
+ ptdesc = pte_alloc_one(vma->vm_mm);
if (unlikely(!ptdesc))
return VM_FAULT_OOM;
zero_folio = mm_get_huge_zero_folio(vma->vm_mm);
@@ -1213,7 +1213,7 @@ vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write)
return VM_FAULT_SIGBUS;
if (arch_needs_pgtable_deposit()) {
- ptdesc = page_ptdesc(pte_alloc_one(vma->vm_mm));
+ ptdesc = pte_alloc_one(vma->vm_mm);
if (!ptdesc)
return VM_FAULT_OOM;
}
@@ -1376,7 +1376,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
if (!vma_is_anonymous(dst_vma))
return 0;
- ptdesc = page_ptdesc(pte_alloc_one(dst_mm));
+ ptdesc = pte_alloc_one(dst_mm);
if (unlikely(!ptdesc))
goto out;
@@ -445,7 +445,7 @@ void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte)
int __pte_alloc(struct mm_struct *mm, pmd_t *pmd)
{
- struct ptdesc *ptdesc = page_ptdesc(pte_alloc_one(mm));
+ struct ptdesc *ptdesc = pte_alloc_one(mm);
if (!ptdesc)
return -ENOMEM;
@@ -4647,7 +4647,7 @@ static vm_fault_t __do_fault(struct vm_fault *vmf)
* # flush A, B to clear the writeback
*/
if (pmd_none(*vmf->pmd) && !vmf->prealloc_pte) {
- vmf->prealloc_pte = pte_alloc_one(vma->vm_mm);
+ vmf->prealloc_pte = ptdesc_page(pte_alloc_one(vma->vm_mm));
if (!vmf->prealloc_pte)
return VM_FAULT_OOM;
}
@@ -4725,7 +4725,7 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
* related to pte entry. Use the preallocated table for that.
*/
if (arch_needs_pgtable_deposit() && !vmf->prealloc_pte) {
- vmf->prealloc_pte = pte_alloc_one(vma->vm_mm);
+ vmf->prealloc_pte = ptdesc_page(pte_alloc_one(vma->vm_mm));
if (!vmf->prealloc_pte)
return VM_FAULT_OOM;
}
@@ -5010,7 +5010,7 @@ static vm_fault_t do_fault_around(struct vm_fault *vmf)
pte_off + vma_pages(vmf->vma) - vma_off) - 1;
if (pmd_none(*vmf->pmd)) {
- vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm);
+ vmf->prealloc_pte = ptdesc_page(pte_alloc_one(vmf->vma->vm_mm));
if (!vmf->prealloc_pte)
return VM_FAULT_OOM;
}