@@ -198,7 +198,7 @@ static inline pte_t *pte_offset_huge(pmd_t *pmd, unsigned long address)
static inline pte_t *pte_alloc_huge(struct mm_struct *mm, pmd_t *pmd,
unsigned long address)
{
- return pte_alloc(mm, pmd) ? NULL : pte_offset_huge(pmd, address);
+ return pte_alloc(mm, pmd, address) ? NULL : pte_offset_huge(pmd, address);
}
#endif
@@ -2800,7 +2800,7 @@ static inline void mm_inc_nr_ptes(struct mm_struct *mm) {}
static inline void mm_dec_nr_ptes(struct mm_struct *mm) {}
#endif
-int __pte_alloc(struct mm_struct *mm, pmd_t *pmd);
+int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long addr);
int __pte_alloc_kernel(pmd_t *pmd);
#if defined(CONFIG_MMU)
@@ -2987,13 +2987,14 @@ pte_t *pte_offset_map_nolock(struct mm_struct *mm, pmd_t *pmd, pmd_t *pmdvalp,
pte_unmap(pte); \
} while (0)
-#define pte_alloc(mm, pmd) (unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, pmd))
+#define pte_alloc(mm, pmd, addr) \
+ (unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, pmd, addr))
#define pte_alloc_map(mm, pmd, address) \
- (pte_alloc(mm, pmd) ? NULL : pte_offset_map(pmd, address))
+ (pte_alloc(mm, pmd, address) ? NULL : pte_offset_map(pmd, address))
#define pte_alloc_map_lock(mm, pmd, address, ptlp) \
- (pte_alloc(mm, pmd) ? \
+ (pte_alloc(mm, pmd, address) ? \
NULL : pte_offset_map_lock(mm, pmd, address, ptlp))
#define pte_alloc_kernel(pmd, address) \
@@ -1246,7 +1246,7 @@ static int __init init_args(struct pgtable_debug_args *args)
args->start_pmdp = pmd_offset(args->pudp, 0UL);
WARN_ON(!args->start_pmdp);
- if (pte_alloc(args->mm, args->pmdp)) {
+ if (pte_alloc(args->mm, args->pmdp, args->vaddr)) {
pr_err("Failed to allocate pte entries\n");
ret = -ENOMEM;
goto error;
@@ -3453,7 +3453,7 @@ static bool filemap_map_pmd(struct vm_fault *vmf, struct folio *folio,
}
if (pmd_none(*vmf->pmd) && vmf->prealloc_pte)
- pmd_install(mm, vmf->pmd, &vmf->prealloc_pte);
+ pmd_install(mm, vmf->pmd, vmf->address, &vmf->prealloc_pte);
return false;
}
@@ -1105,7 +1105,7 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma,
spin_unlock(ptl);
split_huge_pmd(vma, pmd, address);
/* If pmd was left empty, stuff a page table in there quickly */
- return pte_alloc(mm, pmd) ? ERR_PTR(-ENOMEM) :
+ return pte_alloc(mm, pmd, address) ? ERR_PTR(-ENOMEM) :
follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
}
page = follow_huge_pmd(vma, address, pmd, flags, ctx);
@@ -320,7 +320,8 @@ void folio_activate(struct folio *folio);
void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
struct vm_area_struct *start_vma, unsigned long floor,
unsigned long ceiling, bool mm_wr_locked);
-void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte);
+void pmd_install(struct mm_struct *mm, pmd_t *pmd, unsigned long addr,
+ pgtable_t *pte);
struct zap_details;
void unmap_page_range(struct mmu_gather *tlb,
@@ -417,7 +417,8 @@ void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
} while (vma);
}
-void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte)
+void pmd_install(struct mm_struct *mm, pmd_t *pmd, unsigned long addr,
+ pgtable_t *pte)
{
spinlock_t *ptl = pmd_lock(mm, pmd);
@@ -443,13 +444,13 @@ void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte)
spin_unlock(ptl);
}
-int __pte_alloc(struct mm_struct *mm, pmd_t *pmd)
+int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long addr)
{
pgtable_t new = pte_alloc_one(mm);
if (!new)
return -ENOMEM;
- pmd_install(mm, pmd, &new);
+ pmd_install(mm, pmd, addr, &new);
if (new)
pte_free(mm, new);
return 0;
@@ -2115,7 +2116,7 @@ static int insert_pages(struct vm_area_struct *vma, unsigned long addr,
/* Allocate the PTE if necessary; takes PMD lock once only. */
ret = -ENOMEM;
- if (pte_alloc(mm, pmd))
+ if (pte_alloc(mm, pmd, addr))
goto out;
while (pages_to_write_in_pmd) {
@@ -4521,7 +4522,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
* Use pte_alloc() instead of pte_alloc_map(), so that OOM can
* be distinguished from a transient failure of pte_offset_map().
*/
- if (pte_alloc(vma->vm_mm, vmf->pmd))
+ if (pte_alloc(vma->vm_mm, vmf->pmd, vmf->address))
return VM_FAULT_OOM;
/* Use the zero-page for reads */
@@ -4868,8 +4869,8 @@ vm_fault_t finish_fault(struct vm_fault *vmf)
}
if (vmf->prealloc_pte)
- pmd_install(vma->vm_mm, vmf->pmd, &vmf->prealloc_pte);
- else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd)))
+ pmd_install(vma->vm_mm, vmf->pmd, vmf->address, &vmf->prealloc_pte);
+ else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd, vmf->address)))
return VM_FAULT_OOM;
}
@@ -598,7 +598,7 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
goto abort;
if (pmd_trans_huge(*pmdp) || pmd_devmap(*pmdp))
goto abort;
- if (pte_alloc(mm, pmdp))
+ if (pte_alloc(mm, pmdp, addr))
goto abort;
if (unlikely(anon_vma_prepare(vma)))
goto abort;
@@ -330,11 +330,11 @@ pgtable_populate_needed(struct vm_area_struct *vma, unsigned long cp_flags)
* allocation failures during page faults by kicking OOM and returning
* error.
*/
-#define change_pmd_prepare(vma, pmd, cp_flags) \
+#define change_pmd_prepare(vma, pmd, addr, cp_flags) \
({ \
long err = 0; \
if (unlikely(pgtable_populate_needed(vma, cp_flags))) { \
- if (pte_alloc(vma->vm_mm, pmd)) \
+ if (pte_alloc(vma->vm_mm, pmd, addr)) \
err = -ENOMEM; \
} \
err; \
@@ -375,7 +375,7 @@ static inline long change_pmd_range(struct mmu_gather *tlb,
again:
next = pmd_addr_end(addr, end);
- ret = change_pmd_prepare(vma, pmd, cp_flags);
+ ret = change_pmd_prepare(vma, pmd, addr, cp_flags);
if (ret) {
pages = ret;
break;
@@ -402,7 +402,7 @@ static inline long change_pmd_range(struct mmu_gather *tlb,
* cleared; make sure pmd populated if
* necessary, then fall-through to pte level.
*/
- ret = change_pmd_prepare(vma, pmd, cp_flags);
+ ret = change_pmd_prepare(vma, pmd, addr, cp_flags);
if (ret) {
pages = ret;
break;
@@ -628,7 +628,7 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
}
if (pmd_none(*old_pmd))
continue;
- if (pte_alloc(new_vma->vm_mm, new_pmd))
+ if (pte_alloc(new_vma->vm_mm, new_pmd, new_addr))
break;
if (move_ptes(vma, old_pmd, old_addr, old_addr + extent,
new_vma, new_pmd, new_addr, need_rmap_locks) < 0)
@@ -796,7 +796,7 @@ static __always_inline ssize_t mfill_atomic(struct userfaultfd_ctx *ctx,
break;
}
if (unlikely(pmd_none(dst_pmdval)) &&
- unlikely(__pte_alloc(dst_mm, dst_pmd))) {
+ unlikely(__pte_alloc(dst_mm, dst_pmd, dst_addr))) {
err = -ENOMEM;
break;
}
@@ -1713,13 +1713,13 @@ ssize_t move_pages(struct userfaultfd_ctx *ctx, unsigned long dst_start,
err = -ENOENT;
break;
}
- if (unlikely(__pte_alloc(mm, src_pmd))) {
+ if (unlikely(__pte_alloc(mm, src_pmd, src_addr))) {
err = -ENOMEM;
break;
}
}
- if (unlikely(pte_alloc(mm, dst_pmd))) {
+ if (unlikely(pte_alloc(mm, dst_pmd, dst_addr))) {
err = -ENOMEM;
break;
}
In the subsequent implementation of freeing empty page table pages, we need the address information to flush tlb, so pass address to pmd_install() in advance. No functional changes. Signed-off-by: Qi Zheng <zhengqi.arch@bytedance.com> --- include/linux/hugetlb.h | 2 +- include/linux/mm.h | 9 +++++---- mm/debug_vm_pgtable.c | 2 +- mm/filemap.c | 2 +- mm/gup.c | 2 +- mm/internal.h | 3 ++- mm/memory.c | 15 ++++++++------- mm/migrate_device.c | 2 +- mm/mprotect.c | 8 ++++---- mm/mremap.c | 2 +- mm/userfaultfd.c | 6 +++--- 11 files changed, 28 insertions(+), 25 deletions(-)