@@ -3453,7 +3453,7 @@ static bool filemap_map_pmd(struct vm_fault *vmf, struct folio *folio,
}
if (pmd_none(*vmf->pmd) && vmf->prealloc_pte)
- pmd_install(mm, vmf->pmd, &vmf->prealloc_pte);
+ pmd_install(mm, vmf->pmd, (struct ptdesc **)&vmf->prealloc_pte);
return false;
}
@@ -320,7 +320,7 @@ void folio_activate(struct folio *folio);
void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
struct vm_area_struct *start_vma, unsigned long floor,
unsigned long ceiling, bool mm_wr_locked);
-void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte);
+void pmd_install(struct mm_struct *mm, pmd_t *pmd, struct ptdesc **pte);
struct zap_details;
void unmap_page_range(struct mmu_gather *tlb,
@@ -418,7 +418,7 @@ void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
} while (vma);
}
-void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte)
+void pmd_install(struct mm_struct *mm, pmd_t *pmd, struct ptdesc **pte)
{
spinlock_t *ptl = pmd_lock(mm, pmd);
@@ -438,7 +438,7 @@ void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte)
* smp_rmb() barriers in page table walking code.
*/
smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
- pmd_populate(mm, pmd, (struct ptdesc *)(*pte));
+ pmd_populate(mm, pmd, *pte);
*pte = NULL;
}
spin_unlock(ptl);
@@ -450,7 +450,7 @@ int __pte_alloc(struct mm_struct *mm, pmd_t *pmd)
if (!ptdesc)
return -ENOMEM;
- pmd_install(mm, pmd, (pgtable_t *)&ptdesc);
+ pmd_install(mm, pmd, &ptdesc);
if (ptdesc)
pte_free(mm, ptdesc);
return 0;
@@ -4868,7 +4868,7 @@ vm_fault_t finish_fault(struct vm_fault *vmf)
}
if (vmf->prealloc_pte)
- pmd_install(vma->vm_mm, vmf->pmd, &vmf->prealloc_pte);
+ pmd_install(vma->vm_mm, vmf->pmd, (struct ptdesc **)&vmf->prealloc_pte);
else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd)))
return VM_FAULT_OOM;
}