Message ID | 20231113152222.3495908-4-wangkefeng.wang@huawei.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | mm: cleanup and use more folio in page fault | expand |
On Mon, Nov 13, 2023 at 11:22:19PM +0800, Kefeng Wang wrote: > Let's rename page_copy_prealloc() to folio_prealloc(), which could > be reused in more functons, as it maybe zero the new page, pass a > new need_zero to it, and call the vma_alloc_zeroed_movable_folio() > if need_zero is true. Reviewed-by: Vishal Moola (Oracle) <vishal.moola@gmail.com> > Reviewed-by: Sidhartha Kumar <sidhartha.kumar@oracle.com> > Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> > --- > mm/memory.c | 13 +++++++++---- > 1 file changed, 9 insertions(+), 4 deletions(-) > > diff --git a/mm/memory.c b/mm/memory.c > index 379354b35891..d85df1c59f52 100644 > --- a/mm/memory.c > +++ b/mm/memory.c > @@ -992,12 +992,17 @@ copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, > return 0; > } > > -static inline struct folio *page_copy_prealloc(struct mm_struct *src_mm, > - struct vm_area_struct *vma, unsigned long addr) > +static inline struct folio *folio_prealloc(struct mm_struct *src_mm, > + struct vm_area_struct *vma, unsigned long addr, bool need_zero) > { > struct folio *new_folio; > > - new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, addr, false); > + if (need_zero) > + new_folio = vma_alloc_zeroed_movable_folio(vma, addr); > + else > + new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, > + addr, false); > + > if (!new_folio) > return NULL; > > @@ -1129,7 +1134,7 @@ copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, > } else if (ret == -EBUSY) { > goto out; > } else if (ret == -EAGAIN) { > - prealloc = page_copy_prealloc(src_mm, src_vma, addr); > + prealloc = folio_prealloc(src_mm, src_vma, addr, false); > if (!prealloc) > return -ENOMEM; > } else if (ret) { > -- > 2.27.0
diff --git a/mm/memory.c b/mm/memory.c index 379354b35891..d85df1c59f52 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -992,12 +992,17 @@ copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, return 0; } -static inline struct folio *page_copy_prealloc(struct mm_struct *src_mm, - struct vm_area_struct *vma, unsigned long addr) +static inline struct folio *folio_prealloc(struct mm_struct *src_mm, + struct vm_area_struct *vma, unsigned long addr, bool need_zero) { struct folio *new_folio; - new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, addr, false); + if (need_zero) + new_folio = vma_alloc_zeroed_movable_folio(vma, addr); + else + new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, + addr, false); + if (!new_folio) return NULL; @@ -1129,7 +1134,7 @@ copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, } else if (ret == -EBUSY) { goto out; } else if (ret == -EAGAIN) { - prealloc = page_copy_prealloc(src_mm, src_vma, addr); + prealloc = folio_prealloc(src_mm, src_vma, addr, false); if (!prealloc) return -ENOMEM; } else if (ret) {