@@ -1206,7 +1206,7 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
unsigned long pmd_addr = vmf->address & PMD_MASK;
struct vm_area_struct *vma = vmf->vma;
struct inode *inode = mapping->host;
- pgtable_t pgtable = NULL;
+ struct ptdesc *ptdesc = NULL;
struct folio *zero_folio;
spinlock_t *ptl;
pmd_t pmd_entry;
@@ -1222,8 +1222,8 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
DAX_PMD | DAX_ZERO_PAGE);
if (arch_needs_pgtable_deposit()) {
- pgtable = pte_alloc_one(vma->vm_mm);
- if (!pgtable)
+ ptdesc = page_ptdesc(pte_alloc_one(vma->vm_mm));
+ if (!ptdesc)
return VM_FAULT_OOM;
}
@@ -1233,8 +1233,8 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
goto fallback;
}
- if (pgtable) {
- pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
+ if (ptdesc) {
+ pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, ptdesc_page(ptdesc));
mm_inc_nr_ptes(vma->vm_mm);
}
pmd_entry = mk_pmd(&zero_folio->page, vmf->vma->vm_page_prot);
@@ -1245,8 +1245,8 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
return VM_FAULT_NOPAGE;
fallback:
- if (pgtable)
- pte_free(vma->vm_mm, pgtable);
+ if (ptdesc)
+ pte_free(vma->vm_mm, ptdesc_page(ptdesc));
trace_dax_pmd_load_hole_fallback(inode, vmf, zero_folio, *entry);
return VM_FAULT_FALLBACK;
}