Message ID | 20230703055850.227169-2-sidhartha.kumar@oracle.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | [1/4] mm/memory: convert do_page_mkwrite() to use folios | expand |
On 2023/7/3 13:58, Sidhartha Kumar wrote: > Saves five implicit calls to compound_head(). > > Signed-off-by: Sidhartha Kumar <sidhartha.kumar@oracle.com> Reviewed-by: ZhangPeng <zhangpeng362@huawei.com> > --- > mm/memory.c | 13 +++++++------ > 1 file changed, 7 insertions(+), 6 deletions(-) > > diff --git a/mm/memory.c b/mm/memory.c > index 098fac2f5efc0..93480e846ace6 100644 > --- a/mm/memory.c > +++ b/mm/memory.c > @@ -3286,8 +3286,9 @@ static vm_fault_t wp_page_shared(struct vm_fault *vmf) > { > struct vm_area_struct *vma = vmf->vma; > vm_fault_t ret = 0; > + struct folio *folio = page_folio(vmf->page); > > - get_page(vmf->page); > + folio_get(folio); > > if (vma->vm_ops && vma->vm_ops->page_mkwrite) { > vm_fault_t tmp; > @@ -3296,21 +3297,21 @@ static vm_fault_t wp_page_shared(struct vm_fault *vmf) > tmp = do_page_mkwrite(vmf); > if (unlikely(!tmp || (tmp & > (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) { > - put_page(vmf->page); > + folio_put(folio); > return tmp; > } > tmp = finish_mkwrite_fault(vmf); > if (unlikely(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) { > - unlock_page(vmf->page); > - put_page(vmf->page); > + folio_unlock(folio); > + folio_put(folio); > return tmp; > } > } else { > wp_page_reuse(vmf); > - lock_page(vmf->page); > + folio_lock(folio); > } > ret |= fault_dirty_shared_page(vmf); > - put_page(vmf->page); > + folio_put(folio); > > return ret; > }
On Sun, Jul 02, 2023 at 10:58:48PM -0700, Sidhartha Kumar wrote: > @@ -3296,21 +3297,21 @@ static vm_fault_t wp_page_shared(struct vm_fault *vmf) > tmp = do_page_mkwrite(vmf); A nice improvement to make after the series might be to pass (vmf, folio) to save even more calls to compound_head().
diff --git a/mm/memory.c b/mm/memory.c index 098fac2f5efc0..93480e846ace6 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3286,8 +3286,9 @@ static vm_fault_t wp_page_shared(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; vm_fault_t ret = 0; + struct folio *folio = page_folio(vmf->page); - get_page(vmf->page); + folio_get(folio); if (vma->vm_ops && vma->vm_ops->page_mkwrite) { vm_fault_t tmp; @@ -3296,21 +3297,21 @@ static vm_fault_t wp_page_shared(struct vm_fault *vmf) tmp = do_page_mkwrite(vmf); if (unlikely(!tmp || (tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) { - put_page(vmf->page); + folio_put(folio); return tmp; } tmp = finish_mkwrite_fault(vmf); if (unlikely(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) { - unlock_page(vmf->page); - put_page(vmf->page); + folio_unlock(folio); + folio_put(folio); return tmp; } } else { wp_page_reuse(vmf); - lock_page(vmf->page); + folio_lock(folio); } ret |= fault_dirty_shared_page(vmf); - put_page(vmf->page); + folio_put(folio); return ret; }
Saves five implicit calls to compound_head(). Signed-off-by: Sidhartha Kumar <sidhartha.kumar@oracle.com> --- mm/memory.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-)