Message ID | 20190622000512.923867-1-songliubraving@fb.com (mailing list archive) |
---|---|
Headers | show |
Series | Enable THP for text section of non-shmem files | expand |
Hello On Fri, 21 Jun 2019 17:05:10 -0700 Song Liu <songliubraving@fb.com> wrote: > Next patch will add khugepaged support of non-shmem files. This patch > renames these two functions to reflect the new functionality: > > collapse_shmem() => collapse_file() > khugepaged_scan_shmem() => khugepaged_scan_file() > > Acked-by: Rik van Riel <riel@surriel.com> > Signed-off-by: Song Liu <songliubraving@fb.com> > --- > mm/khugepaged.c | 13 +++++++------ > 1 file changed, 7 insertions(+), 6 deletions(-) > > diff --git a/mm/khugepaged.c b/mm/khugepaged.c > index 0f7419938008..dde8e45552b3 100644 > --- a/mm/khugepaged.c > +++ b/mm/khugepaged.c > @@ -1287,7 +1287,7 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff) > } > > /** > - * collapse_shmem - collapse small tmpfs/shmem pages into huge one. > + * collapse_file - collapse small tmpfs/shmem pages into huge one. > * > * Basic scheme is simple, details are more complex: > * - allocate and lock a new huge page; > @@ -1304,10 +1304,11 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff) > * + restore gaps in the page cache; > * + unlock and free huge page; > */ > -static void collapse_shmem(struct mm_struct *mm, > +static void collapse_file(struct vm_area_struct *vma, > struct address_space *mapping, pgoff_t start, > struct page **hpage, int node) > { > + struct mm_struct *mm = vma->vm_mm; > gfp_t gfp; > struct page *new_page; > struct mem_cgroup *memcg; > @@ -1563,7 +1564,7 @@ static void collapse_shmem(struct mm_struct *mm, > /* TODO: tracepoints */ > } > > -static void khugepaged_scan_shmem(struct mm_struct *mm, > +static void khugepaged_scan_file(struct vm_area_struct *vma, > struct address_space *mapping, > pgoff_t start, struct page **hpage) > { > @@ -1631,14 +1632,14 @@ static void khugepaged_scan_shmem(struct mm_struct *mm, > result = SCAN_EXCEED_NONE_PTE; > } else { > node = khugepaged_find_target_node(); > - collapse_shmem(mm, mapping, start, hpage, node); > + collapse_file(vma, mapping, start, hpage, node); > } > } > > /* TODO: tracepoints */ > } > #else > -static void khugepaged_scan_shmem(struct mm_struct *mm, > +static void khugepaged_scan_file(struct vm_area_struct *vma, > struct address_space *mapping, > pgoff_t start, struct page **hpage) > { > @@ -1722,7 +1723,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, > file = get_file(vma->vm_file); > up_read(&mm->mmap_sem); > ret = 1; > - khugepaged_scan_shmem(mm, file->f_mapping, > + khugepaged_scan_file(vma, file->f_mapping, > pgoff, hpage); > fput(file); Is it a change that should have put some material in the log message? Is it unlikely for vma to go without mmap_sem held? > } else { > -- > 2.17.1 > Hillf
> On Jun 21, 2019, at 8:11 PM, Hillf Danton <hdanton@sina.com> wrote: > > > Hello > > On Fri, 21 Jun 2019 17:05:10 -0700 Song Liu <songliubraving@fb.com> wrote: >> Next patch will add khugepaged support of non-shmem files. This patch >> renames these two functions to reflect the new functionality: >> >> collapse_shmem() => collapse_file() >> khugepaged_scan_shmem() => khugepaged_scan_file() >> >> Acked-by: Rik van Riel <riel@surriel.com> >> Signed-off-by: Song Liu <songliubraving@fb.com> >> --- >> mm/khugepaged.c | 13 +++++++------ >> 1 file changed, 7 insertions(+), 6 deletions(-) >> >> diff --git a/mm/khugepaged.c b/mm/khugepaged.c >> index 0f7419938008..dde8e45552b3 100644 >> --- a/mm/khugepaged.c >> +++ b/mm/khugepaged.c >> @@ -1287,7 +1287,7 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff) >> } >> >> /** >> - * collapse_shmem - collapse small tmpfs/shmem pages into huge one. >> + * collapse_file - collapse small tmpfs/shmem pages into huge one. >> * >> * Basic scheme is simple, details are more complex: >> * - allocate and lock a new huge page; >> @@ -1304,10 +1304,11 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff) >> * + restore gaps in the page cache; >> * + unlock and free huge page; >> */ >> -static void collapse_shmem(struct mm_struct *mm, >> +static void collapse_file(struct vm_area_struct *vma, >> struct address_space *mapping, pgoff_t start, >> struct page **hpage, int node) >> { >> + struct mm_struct *mm = vma->vm_mm; >> gfp_t gfp; >> struct page *new_page; >> struct mem_cgroup *memcg; >> @@ -1563,7 +1564,7 @@ static void collapse_shmem(struct mm_struct *mm, >> /* TODO: tracepoints */ >> } >> >> -static void khugepaged_scan_shmem(struct mm_struct *mm, >> +static void khugepaged_scan_file(struct vm_area_struct *vma, >> struct address_space *mapping, >> pgoff_t start, struct page **hpage) >> { >> @@ -1631,14 +1632,14 @@ static void khugepaged_scan_shmem(struct mm_struct *mm, >> result = SCAN_EXCEED_NONE_PTE; >> } else { >> node = khugepaged_find_target_node(); >> - collapse_shmem(mm, mapping, start, hpage, node); >> + collapse_file(vma, mapping, start, hpage, node); >> } >> } >> >> /* TODO: tracepoints */ >> } >> #else >> -static void khugepaged_scan_shmem(struct mm_struct *mm, >> +static void khugepaged_scan_file(struct vm_area_struct *vma, >> struct address_space *mapping, >> pgoff_t start, struct page **hpage) >> { >> @@ -1722,7 +1723,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, >> file = get_file(vma->vm_file); >> up_read(&mm->mmap_sem); >> ret = 1; >> - khugepaged_scan_shmem(mm, file->f_mapping, >> + khugepaged_scan_file(vma, file->f_mapping, >> pgoff, hpage); >> fput(file); > > Is it a change that should have put some material in the log message? > Is it unlikely for vma to go without mmap_sem held? This is a great point. We really need to be more careful. Let me fix it in the next version. Thanks, Song