@@ -1087,16 +1087,16 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
if (!(vmf->flags & FAULT_FLAG_WRITE) &&
!mm_forbids_zeropage(vma->vm_mm) &&
transparent_hugepage_use_zero_page()) {
- pgtable_t pgtable;
+ struct ptdesc *ptdesc;
struct folio *zero_folio;
vm_fault_t ret;
- pgtable = pte_alloc_one(vma->vm_mm);
- if (unlikely(!pgtable))
+ ptdesc = page_ptdesc(pte_alloc_one(vma->vm_mm));
+ if (unlikely(!ptdesc))
return VM_FAULT_OOM;
zero_folio = mm_get_huge_zero_folio(vma->vm_mm);
if (unlikely(!zero_folio)) {
- pte_free(vma->vm_mm, pgtable);
+ pte_free(vma->vm_mm, ptdesc_page(ptdesc));
count_vm_event(THP_FAULT_FALLBACK);
return VM_FAULT_FALLBACK;
}
@@ -1106,21 +1106,21 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
ret = check_stable_address_space(vma->vm_mm);
if (ret) {
spin_unlock(vmf->ptl);
- pte_free(vma->vm_mm, pgtable);
+ pte_free(vma->vm_mm, ptdesc_page(ptdesc));
} else if (userfaultfd_missing(vma)) {
spin_unlock(vmf->ptl);
- pte_free(vma->vm_mm, pgtable);
+ pte_free(vma->vm_mm, ptdesc_page(ptdesc));
ret = handle_userfault(vmf, VM_UFFD_MISSING);
VM_BUG_ON(ret & VM_FAULT_FALLBACK);
} else {
- set_huge_zero_folio(pgtable, vma->vm_mm, vma,
+ set_huge_zero_folio(ptdesc_page(ptdesc), vma->vm_mm, vma,
haddr, vmf->pmd, zero_folio);
update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
spin_unlock(vmf->ptl);
}
} else {
spin_unlock(vmf->ptl);
- pte_free(vma->vm_mm, pgtable);
+ pte_free(vma->vm_mm, ptdesc_page(ptdesc));
}
return ret;
}