Message ID | 20200219014433.88424-4-minchan@kernel.org (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | introduce memory hinting API for external process | expand |
On Tue, Feb 18, 2020 at 5:44 PM Minchan Kim <minchan@kernel.org> wrote: > > Bail out to prevent unnecessary CPU overhead if target process has > pending fatal signal during (MADV_COLD|MADV_PAGEOUT) operation. > > Signed-off-by: Minchan Kim <minchan@kernel.org> > --- > mm/madvise.c | 29 +++++++++++++++++++++-------- > 1 file changed, 21 insertions(+), 8 deletions(-) > > diff --git a/mm/madvise.c b/mm/madvise.c > index f29155b8185d..def1507c2030 100644 > --- a/mm/madvise.c > +++ b/mm/madvise.c > @@ -36,6 +36,7 @@ > struct madvise_walk_private { > struct mmu_gather *tlb; > bool pageout; > + struct task_struct *target_task; > }; > > /* > @@ -316,6 +317,10 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd, > if (fatal_signal_pending(current)) > return -EINTR; > > + if (private->target_task && > + fatal_signal_pending(private->target_task)) > + return -EINTR; > + > #ifdef CONFIG_TRANSPARENT_HUGEPAGE > if (pmd_trans_huge(*pmd)) { > pmd_t orig_pmd; > @@ -471,12 +476,14 @@ static const struct mm_walk_ops cold_walk_ops = { > }; > > static void madvise_cold_page_range(struct mmu_gather *tlb, > + struct task_struct *task, > struct vm_area_struct *vma, > unsigned long addr, unsigned long end) > { > struct madvise_walk_private walk_private = { > .pageout = false, > .tlb = tlb, > + .target_task = task, > }; > > tlb_start_vma(tlb, vma); > @@ -484,7 +491,8 @@ static void madvise_cold_page_range(struct mmu_gather *tlb, > tlb_end_vma(tlb, vma); > } > > -static long madvise_cold(struct vm_area_struct *vma, > +static long madvise_cold(struct task_struct *task, > + struct vm_area_struct *vma, > struct vm_area_struct **prev, > unsigned long start_addr, unsigned long end_addr) > { > @@ -497,19 +505,21 @@ static long madvise_cold(struct vm_area_struct *vma, > > lru_add_drain(); > tlb_gather_mmu(&tlb, mm, start_addr, end_addr); > - madvise_cold_page_range(&tlb, vma, start_addr, end_addr); > + madvise_cold_page_range(&tlb, task, vma, start_addr, end_addr); > tlb_finish_mmu(&tlb, start_addr, end_addr); > > return 0; > } > > static void madvise_pageout_page_range(struct mmu_gather *tlb, > + struct task_struct *task, > struct vm_area_struct *vma, > unsigned long addr, unsigned long end) > { > struct madvise_walk_private walk_private = { > .pageout = true, > .tlb = tlb, > + .target_task = task, > }; > > tlb_start_vma(tlb, vma); > @@ -533,7 +543,8 @@ static inline bool can_do_pageout(struct vm_area_struct *vma) > inode_permission(file_inode(vma->vm_file), MAY_WRITE) == 0; > } > > -static long madvise_pageout(struct vm_area_struct *vma, > +static long madvise_pageout(struct task_struct *task, > + struct vm_area_struct *vma, > struct vm_area_struct **prev, > unsigned long start_addr, unsigned long end_addr) > { > @@ -549,7 +560,7 @@ static long madvise_pageout(struct vm_area_struct *vma, > > lru_add_drain(); > tlb_gather_mmu(&tlb, mm, start_addr, end_addr); > - madvise_pageout_page_range(&tlb, vma, start_addr, end_addr); > + madvise_pageout_page_range(&tlb, task, vma, start_addr, end_addr); > tlb_finish_mmu(&tlb, start_addr, end_addr); > > return 0; > @@ -929,7 +940,8 @@ static int madvise_inject_error(int behavior, > #endif > > static long > -madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev, > +madvise_vma(struct task_struct *task, struct vm_area_struct *vma, > + struct vm_area_struct **prev, > unsigned long start, unsigned long end, int behavior) > { > switch (behavior) { > @@ -938,9 +950,9 @@ madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev, > case MADV_WILLNEED: > return madvise_willneed(vma, prev, start, end); > case MADV_COLD: > - return madvise_cold(vma, prev, start, end); > + return madvise_cold(task, vma, prev, start, end); > case MADV_PAGEOUT: > - return madvise_pageout(vma, prev, start, end); > + return madvise_pageout(task, vma, prev, start, end); > case MADV_FREE: > case MADV_DONTNEED: > return madvise_dontneed_free(vma, prev, start, end, behavior); > @@ -1140,7 +1152,8 @@ int do_madvise(struct task_struct *target_task, struct mm_struct *mm, > tmp = end; > > /* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */ > - error = madvise_vma(vma, &prev, start, tmp, behavior); > + error = madvise_vma(target_task, vma, &prev, > + start, tmp, behavior); > if (error) > goto out; > start = tmp; > -- > 2.25.0.265.gbab2e86ba0-goog > Reviewed-by: Suren Baghdasaryan <surenb@google.com>
diff --git a/mm/madvise.c b/mm/madvise.c index f29155b8185d..def1507c2030 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -36,6 +36,7 @@ struct madvise_walk_private { struct mmu_gather *tlb; bool pageout; + struct task_struct *target_task; }; /* @@ -316,6 +317,10 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd, if (fatal_signal_pending(current)) return -EINTR; + if (private->target_task && + fatal_signal_pending(private->target_task)) + return -EINTR; + #ifdef CONFIG_TRANSPARENT_HUGEPAGE if (pmd_trans_huge(*pmd)) { pmd_t orig_pmd; @@ -471,12 +476,14 @@ static const struct mm_walk_ops cold_walk_ops = { }; static void madvise_cold_page_range(struct mmu_gather *tlb, + struct task_struct *task, struct vm_area_struct *vma, unsigned long addr, unsigned long end) { struct madvise_walk_private walk_private = { .pageout = false, .tlb = tlb, + .target_task = task, }; tlb_start_vma(tlb, vma); @@ -484,7 +491,8 @@ static void madvise_cold_page_range(struct mmu_gather *tlb, tlb_end_vma(tlb, vma); } -static long madvise_cold(struct vm_area_struct *vma, +static long madvise_cold(struct task_struct *task, + struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start_addr, unsigned long end_addr) { @@ -497,19 +505,21 @@ static long madvise_cold(struct vm_area_struct *vma, lru_add_drain(); tlb_gather_mmu(&tlb, mm, start_addr, end_addr); - madvise_cold_page_range(&tlb, vma, start_addr, end_addr); + madvise_cold_page_range(&tlb, task, vma, start_addr, end_addr); tlb_finish_mmu(&tlb, start_addr, end_addr); return 0; } static void madvise_pageout_page_range(struct mmu_gather *tlb, + struct task_struct *task, struct vm_area_struct *vma, unsigned long addr, unsigned long end) { struct madvise_walk_private walk_private = { .pageout = true, .tlb = tlb, + .target_task = task, }; tlb_start_vma(tlb, vma); @@ -533,7 +543,8 @@ static inline bool can_do_pageout(struct vm_area_struct *vma) inode_permission(file_inode(vma->vm_file), MAY_WRITE) == 0; } -static long madvise_pageout(struct vm_area_struct *vma, +static long madvise_pageout(struct task_struct *task, + struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start_addr, unsigned long end_addr) { @@ -549,7 +560,7 @@ static long madvise_pageout(struct vm_area_struct *vma, lru_add_drain(); tlb_gather_mmu(&tlb, mm, start_addr, end_addr); - madvise_pageout_page_range(&tlb, vma, start_addr, end_addr); + madvise_pageout_page_range(&tlb, task, vma, start_addr, end_addr); tlb_finish_mmu(&tlb, start_addr, end_addr); return 0; @@ -929,7 +940,8 @@ static int madvise_inject_error(int behavior, #endif static long -madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev, +madvise_vma(struct task_struct *task, struct vm_area_struct *vma, + struct vm_area_struct **prev, unsigned long start, unsigned long end, int behavior) { switch (behavior) { @@ -938,9 +950,9 @@ madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev, case MADV_WILLNEED: return madvise_willneed(vma, prev, start, end); case MADV_COLD: - return madvise_cold(vma, prev, start, end); + return madvise_cold(task, vma, prev, start, end); case MADV_PAGEOUT: - return madvise_pageout(vma, prev, start, end); + return madvise_pageout(task, vma, prev, start, end); case MADV_FREE: case MADV_DONTNEED: return madvise_dontneed_free(vma, prev, start, end, behavior); @@ -1140,7 +1152,8 @@ int do_madvise(struct task_struct *target_task, struct mm_struct *mm, tmp = end; /* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */ - error = madvise_vma(vma, &prev, start, tmp, behavior); + error = madvise_vma(target_task, vma, &prev, + start, tmp, behavior); if (error) goto out; start = tmp;
Bail out to prevent unnecessary CPU overhead if target process has pending fatal signal during (MADV_COLD|MADV_PAGEOUT) operation. Signed-off-by: Minchan Kim <minchan@kernel.org> --- mm/madvise.c | 29 +++++++++++++++++++++-------- 1 file changed, 21 insertions(+), 8 deletions(-)