Message ID | 20201120095445.1195585-3-daniel.vetter@ffwll.ch (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | mmu_notifier fs fs_reclaim lockdep annotations | expand |
Hi, On 11/20/20 1:54 AM, Daniel Vetter wrote: > diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h > index d5ece7a9a403..f94405d43fd1 100644 > --- a/include/linux/sched/mm.h > +++ b/include/linux/sched/mm.h > @@ -180,6 +180,22 @@ static inline void fs_reclaim_acquire(gfp_t gfp_mask) { } > static inline void fs_reclaim_release(gfp_t gfp_mask) { } > #endif > > +/** > + * might_alloc - Marks possible allocation sites Mark > + * @gfp_mask: gfp_t flags that would be use to allocate used > + * > + * Similar to might_sleep() and other annotations this can be used in functions annotations, > + * that might allocate, but often dont. Compiles to nothing without don't. > + * CONFIG_LOCKDEP. Includes a conditional might_sleep() if @gfp allows blocking. ? might_sleep_if() if > + */ > +static inline void might_alloc(gfp_t gfp_mask) > +{ > + fs_reclaim_acquire(gfp_mask); > + fs_reclaim_release(gfp_mask); > + > + might_sleep_if(gfpflags_allow_blocking(gfp_mask)); > +}
On Fri, Nov 20, 2020 at 6:20 PM Randy Dunlap <rdunlap@infradead.org> wrote: > > Hi, > > On 11/20/20 1:54 AM, Daniel Vetter wrote: > > diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h > > index d5ece7a9a403..f94405d43fd1 100644 > > --- a/include/linux/sched/mm.h > > +++ b/include/linux/sched/mm.h > > @@ -180,6 +180,22 @@ static inline void fs_reclaim_acquire(gfp_t gfp_mask) { } > > static inline void fs_reclaim_release(gfp_t gfp_mask) { } > > #endif > > > > +/** > > + * might_alloc - Marks possible allocation sites > > Mark > > > + * @gfp_mask: gfp_t flags that would be use to allocate > > used > > > + * > > + * Similar to might_sleep() and other annotations this can be used in functions > > annotations, > > > + * that might allocate, but often dont. Compiles to nothing without > > don't. > > > + * CONFIG_LOCKDEP. Includes a conditional might_sleep() if @gfp allows blocking. > > ? might_sleep_if() if That's one if too many, I'll do the others for next round. Thanks for taking a look. -Daniel > > > + */ > > +static inline void might_alloc(gfp_t gfp_mask) > > +{ > > + fs_reclaim_acquire(gfp_mask); > > + fs_reclaim_release(gfp_mask); > > + > > + might_sleep_if(gfpflags_allow_blocking(gfp_mask)); > > +} > > > -- > ~Randy >
On Fri, Nov 20, 2020 at 02:07:19PM -0400, Jason Gunthorpe wrote: > On Fri, Nov 20, 2020 at 10:54:43AM +0100, Daniel Vetter wrote: > > diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h > > index d5ece7a9a403..f94405d43fd1 100644 > > --- a/include/linux/sched/mm.h > > +++ b/include/linux/sched/mm.h > > @@ -180,6 +180,22 @@ static inline void fs_reclaim_acquire(gfp_t gfp_mask) { } > > static inline void fs_reclaim_release(gfp_t gfp_mask) { } > > #endif > > > > +/** > > + * might_alloc - Marks possible allocation sites > > + * @gfp_mask: gfp_t flags that would be use to allocate > > + * > > + * Similar to might_sleep() and other annotations this can be used in functions > > + * that might allocate, but often dont. Compiles to nothing without > > + * CONFIG_LOCKDEP. Includes a conditional might_sleep() if @gfp allows blocking. > > + */ > > +static inline void might_alloc(gfp_t gfp_mask) > > +{ > > + fs_reclaim_acquire(gfp_mask); > > + fs_reclaim_release(gfp_mask); > > + > > + might_sleep_if(gfpflags_allow_blocking(gfp_mask)); > > +} > > Reviewed-by: Jason Gunthorpe <jgg@nvidia.com> > > Oh, I just had a another thread with Matt about xarray, this would be > perfect to add before xas_nomem(): Yeah I think there's plenty of places where this will be useful. Want to slap a sob onto this diff so I can include it for the next round, or will you or Matt send this out when my might_alloc has landed? -Daniel > > diff --git a/lib/idr.c b/lib/idr.c > index f4ab4f4aa3c7f5..722d9ddff53221 100644 > --- a/lib/idr.c > +++ b/lib/idr.c > @@ -391,6 +391,8 @@ int ida_alloc_range(struct ida *ida, unsigned int min, unsigned int max, > if ((int)max < 0) > max = INT_MAX; > > + might_alloc(gfp); > + > retry: > xas_lock_irqsave(&xas, flags); > next: > diff --git a/lib/xarray.c b/lib/xarray.c > index 5fa51614802ada..dd260ee7dcae9a 100644 > --- a/lib/xarray.c > +++ b/lib/xarray.c > @@ -1534,6 +1534,8 @@ void *__xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp) > XA_STATE(xas, xa, index); > void *curr; > > + might_alloc(gfp); > + > if (WARN_ON_ONCE(xa_is_advanced(entry))) > return XA_ERROR(-EINVAL); > if (xa_track_free(xa) && !entry) > @@ -1600,6 +1602,8 @@ void *__xa_cmpxchg(struct xarray *xa, unsigned long index, > XA_STATE(xas, xa, index); > void *curr; > > + might_alloc(gfp); > + > if (WARN_ON_ONCE(xa_is_advanced(entry))) > return XA_ERROR(-EINVAL); > > @@ -1637,6 +1641,8 @@ int __xa_insert(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp) > XA_STATE(xas, xa, index); > void *curr; > > + might_alloc(gfp); > + > if (WARN_ON_ONCE(xa_is_advanced(entry))) > return -EINVAL; > if (!entry) > @@ -1806,6 +1812,8 @@ int __xa_alloc(struct xarray *xa, u32 *id, void *entry, > { > XA_STATE(xas, xa, 0); > > + might_alloc(gfp); > + > if (WARN_ON_ONCE(xa_is_advanced(entry))) > return -EINVAL; > if (WARN_ON_ONCE(!xa_track_free(xa)))
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h index d5ece7a9a403..f94405d43fd1 100644 --- a/include/linux/sched/mm.h +++ b/include/linux/sched/mm.h @@ -180,6 +180,22 @@ static inline void fs_reclaim_acquire(gfp_t gfp_mask) { } static inline void fs_reclaim_release(gfp_t gfp_mask) { } #endif +/** + * might_alloc - Marks possible allocation sites + * @gfp_mask: gfp_t flags that would be use to allocate + * + * Similar to might_sleep() and other annotations this can be used in functions + * that might allocate, but often dont. Compiles to nothing without + * CONFIG_LOCKDEP. Includes a conditional might_sleep() if @gfp allows blocking. + */ +static inline void might_alloc(gfp_t gfp_mask) +{ + fs_reclaim_acquire(gfp_mask); + fs_reclaim_release(gfp_mask); + + might_sleep_if(gfpflags_allow_blocking(gfp_mask)); +} + /** * memalloc_noio_save - Marks implicit GFP_NOIO allocation scope. * diff --git a/mm/slab.h b/mm/slab.h index 6d7c6a5056ba..37b981247e5d 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -500,10 +500,7 @@ static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s, { flags &= gfp_allowed_mask; - fs_reclaim_acquire(flags); - fs_reclaim_release(flags); - - might_sleep_if(gfpflags_allow_blocking(flags)); + might_alloc(flags); if (should_failslab(s, flags)) return NULL; diff --git a/mm/slob.c b/mm/slob.c index 7cc9805c8091..8d4bfa46247f 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -474,8 +474,7 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller) gfp &= gfp_allowed_mask; - fs_reclaim_acquire(gfp); - fs_reclaim_release(gfp); + might_alloc(gfp); if (size < PAGE_SIZE - minalign) { int align = minalign; @@ -597,8 +596,7 @@ static void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node) flags &= gfp_allowed_mask; - fs_reclaim_acquire(flags); - fs_reclaim_release(flags); + might_alloc(flags); if (c->size < PAGE_SIZE) { b = slob_alloc(c->size, flags, c->align, node, 0);