Message ID | 20191010134058.11949-2-thomas_os@shipmail.org (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | mm: pagewalk: Rework callback return values and optionally skip the pte level | expand |
On Thu, Oct 10, 2019 at 03:40:55PM +0200, Thomas Hellström (VMware) wrote: > From: Linus Torvalds <torvalds@linux-foundation.org> > > The pagewalk code is being reworked to have positive callback return codes > do walk control. Avoid using positive return codes: "1" is replaced by > "-EBUSY". > > Co-developed-by: Thomas Hellstrom <thellstrom@vmware.com> > Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com> > --- > mm/mempolicy.c | 16 ++++++++-------- > 1 file changed, 8 insertions(+), 8 deletions(-) > > diff --git a/mm/mempolicy.c b/mm/mempolicy.c > index 4ae967bcf954..df34c7498c27 100644 > --- a/mm/mempolicy.c > +++ b/mm/mempolicy.c > @@ -482,8 +482,8 @@ static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr, > * > * queue_pages_pte_range() has three possible return values: > * 0 - pages are placed on the right node or queued successfully. > - * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were > - * specified. > + * -EBUSY - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were > + * specified. > * -EIO - only MPOL_MF_STRICT was specified and an existing page was already > * on a node that does not follow the policy. > */ > @@ -503,7 +503,7 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr, > if (ptl) { > ret = queue_pages_pmd(pmd, ptl, addr, end, walk); > if (ret != 2) > - return ret; > + return (ret == 1) ? -EBUSY : ret; It would be cleaner to propagate the error code logic to queue_pages_pmd() too: 0 - placed, 1 - split, -EBUSY - unmovable, ... > } > /* THP was split, fall through to pte walk */ > > @@ -546,7 +546,7 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr, > cond_resched(); > > if (has_unmovable) > - return 1; > + return -EBUSY; > > return addr != end ? -EIO : 0; > } > @@ -669,9 +669,9 @@ static const struct mm_walk_ops queue_pages_walk_ops = { > * passed via @private. > * > * queue_pages_range() has three possible return values: > - * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were > - * specified. > * 0 - queue pages successfully or no misplaced page. > + * -EBUSY - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were > + * specified. > * -EIO - there is misplaced page and only MPOL_MF_STRICT was specified. > */ > static int > @@ -1285,7 +1285,7 @@ static long do_mbind(unsigned long start, unsigned long len, > ret = queue_pages_range(mm, start, end, nmask, > flags | MPOL_MF_INVERT, &pagelist); > > - if (ret < 0) { > + if (ret < 0 && ret != -EBUSY) { > err = -EIO; > goto up_out; > } > @@ -1303,7 +1303,7 @@ static long do_mbind(unsigned long start, unsigned long len, > putback_movable_pages(&pagelist); > } > > - if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT))) > + if ((ret < 0) || (nr_failed && (flags & MPOL_MF_STRICT))) > err = -EIO; > } else > putback_movable_pages(&pagelist); > -- > 2.21.0 >
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 4ae967bcf954..df34c7498c27 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -482,8 +482,8 @@ static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr, * * queue_pages_pte_range() has three possible return values: * 0 - pages are placed on the right node or queued successfully. - * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were - * specified. + * -EBUSY - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were + * specified. * -EIO - only MPOL_MF_STRICT was specified and an existing page was already * on a node that does not follow the policy. */ @@ -503,7 +503,7 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr, if (ptl) { ret = queue_pages_pmd(pmd, ptl, addr, end, walk); if (ret != 2) - return ret; + return (ret == 1) ? -EBUSY : ret; } /* THP was split, fall through to pte walk */ @@ -546,7 +546,7 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr, cond_resched(); if (has_unmovable) - return 1; + return -EBUSY; return addr != end ? -EIO : 0; } @@ -669,9 +669,9 @@ static const struct mm_walk_ops queue_pages_walk_ops = { * passed via @private. * * queue_pages_range() has three possible return values: - * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were - * specified. * 0 - queue pages successfully or no misplaced page. + * -EBUSY - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were + * specified. * -EIO - there is misplaced page and only MPOL_MF_STRICT was specified. */ static int @@ -1285,7 +1285,7 @@ static long do_mbind(unsigned long start, unsigned long len, ret = queue_pages_range(mm, start, end, nmask, flags | MPOL_MF_INVERT, &pagelist); - if (ret < 0) { + if (ret < 0 && ret != -EBUSY) { err = -EIO; goto up_out; } @@ -1303,7 +1303,7 @@ static long do_mbind(unsigned long start, unsigned long len, putback_movable_pages(&pagelist); } - if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT))) + if ((ret < 0) || (nr_failed && (flags & MPOL_MF_STRICT))) err = -EIO; } else putback_movable_pages(&pagelist);