Message ID | 20220712133946.307181-7-42.hyeyoo@gmail.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | common kmalloc v3 | expand |
On 7/12/22 15:39, Hyeonggon Yoo wrote: > There is no caller of kmalloc_order_trace() except kmalloc_large(). > Fold it into kmalloc_large() and remove kmalloc_order{,_trace}(). > > Also add tracepoint in kmalloc_large() that was previously > in kmalloc_order_trace(). > > Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> > Reviewed-by: Vlastimil Babka <vbabka@suse.cz> Hmm noticed a small change in how we call trace_kmalloc() which will now include the __GFP_COMP. I think we could just call alloc_pages() from kmalloc_large() with flags | __GFP_COMP instead of doing flags |= __GFP_COMP; first. AFAICS both kasan_kmalloc_large() and kmemleak_alloc() will filter it out anyway. > --- > include/linux/slab.h | 22 ++-------------------- > mm/slab_common.c | 14 +++----------- > 2 files changed, 5 insertions(+), 31 deletions(-) > > diff --git a/include/linux/slab.h b/include/linux/slab.h > index a0e57df3d5a4..15a4c59da59e 100644 > --- a/include/linux/slab.h > +++ b/include/linux/slab.h > @@ -489,26 +489,8 @@ static __always_inline void *kmem_cache_alloc_node_trace(struct kmem_cache *s, g > } > #endif /* CONFIG_TRACING */ > > -extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment > - __alloc_size(1); > - > -#ifdef CONFIG_TRACING > -extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) > - __assume_page_alignment __alloc_size(1); > -#else > -static __always_inline __alloc_size(1) void *kmalloc_order_trace(size_t size, gfp_t flags, > - unsigned int order) > -{ > - return kmalloc_order(size, flags, order); > -} > -#endif > - > -static __always_inline __alloc_size(1) void *kmalloc_large(size_t size, gfp_t flags) > -{ > - unsigned int order = get_order(size); > - return kmalloc_order_trace(size, flags, order); > -} > - > +void *kmalloc_large(size_t size, gfp_t flags) __assume_page_alignment > + __alloc_size(1); > /** > * kmalloc - allocate memory > * @size: how many bytes of memory are required. > diff --git a/mm/slab_common.c b/mm/slab_common.c > index 6c9aac5d8f4a..1f8af2106df0 100644 > --- a/mm/slab_common.c > +++ b/mm/slab_common.c > @@ -932,10 +932,11 @@ gfp_t kmalloc_fix_flags(gfp_t flags) > * directly to the page allocator. We use __GFP_COMP, because we will need to > * know the allocation order to free the pages properly in kfree. > */ > -void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) > +void *kmalloc_large(size_t size, gfp_t flags) > { > void *ret = NULL; > struct page *page; > + unsigned int order = get_order(size); > > if (unlikely(flags & GFP_SLAB_BUG_MASK)) > flags = kmalloc_fix_flags(flags); > @@ -950,19 +951,10 @@ void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) > ret = kasan_kmalloc_large(ret, size, flags); > /* As ret might get tagged, call kmemleak hook after KASAN. */ > kmemleak_alloc(ret, size, 1, flags); > - return ret; > -} > -EXPORT_SYMBOL(kmalloc_order); > - > -#ifdef CONFIG_TRACING > -void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) > -{ > - void *ret = kmalloc_order(size, flags, order); > trace_kmalloc(_RET_IP_, ret, NULL, size, PAGE_SIZE << order, flags); > return ret; > } > -EXPORT_SYMBOL(kmalloc_order_trace); > -#endif > +EXPORT_SYMBOL(kmalloc_large); > > #ifdef CONFIG_SLAB_FREELIST_RANDOM > /* Randomize a generic freelist */
On Thu, Jul 28, 2022 at 05:23:29PM +0200, Vlastimil Babka wrote: > On 7/12/22 15:39, Hyeonggon Yoo wrote: > > There is no caller of kmalloc_order_trace() except kmalloc_large(). > > Fold it into kmalloc_large() and remove kmalloc_order{,_trace}(). > > > > Also add tracepoint in kmalloc_large() that was previously > > in kmalloc_order_trace(). > > > > Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> > > Reviewed-by: Vlastimil Babka <vbabka@suse.cz> > Thank you for review. > Hmm noticed a small change in how we call trace_kmalloc() which will now > include the __GFP_COMP. Good catch. > I think we could just call alloc_pages() from > kmalloc_large() with flags | __GFP_COMP instead of doing flags |= > __GFP_COMP; first. I'll consider that in next version. > AFAICS both kasan_kmalloc_large() and kmemleak_alloc() > will filter it out anyway. AFAIK not passing __GFP_COMP to both kasan_kmalloc_large() and kmemleak_alloc() will affect their behavior. - kasan_kmalloc_large() only checks if flag has __GFP_DIRECT_RECLAIM, - kmemleak_alloc() only use it to allocate kmemleak_object from slab/pool, which does not require __GFP_COMP. simply calling alloc_pages() with flags | GFP_COMP kasan_kmalloc_large() with flags kmemleak_alloc() with flags trace_kmalloc() with flags will be okay. BTW this is fixed after patch 9. > > --- > > include/linux/slab.h | 22 ++-------------------- > > mm/slab_common.c | 14 +++----------- > > 2 files changed, 5 insertions(+), 31 deletions(-) > > > > diff --git a/include/linux/slab.h b/include/linux/slab.h > > index a0e57df3d5a4..15a4c59da59e 100644 > > --- a/include/linux/slab.h > > +++ b/include/linux/slab.h > > @@ -489,26 +489,8 @@ static __always_inline void *kmem_cache_alloc_node_trace(struct kmem_cache *s, g > > } > > #endif /* CONFIG_TRACING */ > > > > -extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment > > - __alloc_size(1); > > - > > -#ifdef CONFIG_TRACING > > -extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) > > - __assume_page_alignment __alloc_size(1); > > -#else > > -static __always_inline __alloc_size(1) void *kmalloc_order_trace(size_t size, gfp_t flags, > > - unsigned int order) > > -{ > > - return kmalloc_order(size, flags, order); > > -} > > -#endif > > - > > -static __always_inline __alloc_size(1) void *kmalloc_large(size_t size, gfp_t flags) > > -{ > > - unsigned int order = get_order(size); > > - return kmalloc_order_trace(size, flags, order); > > -} > > - > > +void *kmalloc_large(size_t size, gfp_t flags) __assume_page_alignment > > + __alloc_size(1); > > /** > > * kmalloc - allocate memory > > * @size: how many bytes of memory are required. > > diff --git a/mm/slab_common.c b/mm/slab_common.c > > index 6c9aac5d8f4a..1f8af2106df0 100644 > > --- a/mm/slab_common.c > > +++ b/mm/slab_common.c > > @@ -932,10 +932,11 @@ gfp_t kmalloc_fix_flags(gfp_t flags) > > * directly to the page allocator. We use __GFP_COMP, because we will need to > > * know the allocation order to free the pages properly in kfree. > > */ > > -void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) > > +void *kmalloc_large(size_t size, gfp_t flags) > > { > > void *ret = NULL; > > struct page *page; > > + unsigned int order = get_order(size); > > > > if (unlikely(flags & GFP_SLAB_BUG_MASK)) > > flags = kmalloc_fix_flags(flags); > > @@ -950,19 +951,10 @@ void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) > > ret = kasan_kmalloc_large(ret, size, flags); > > /* As ret might get tagged, call kmemleak hook after KASAN. */ > > kmemleak_alloc(ret, size, 1, flags); > > - return ret; > > -} > > -EXPORT_SYMBOL(kmalloc_order); > > - > > -#ifdef CONFIG_TRACING > > -void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) > > -{ > > - void *ret = kmalloc_order(size, flags, order); > > trace_kmalloc(_RET_IP_, ret, NULL, size, PAGE_SIZE << order, flags); > > return ret; > > } > > -EXPORT_SYMBOL(kmalloc_order_trace); > > -#endif > > +EXPORT_SYMBOL(kmalloc_large); > > > > #ifdef CONFIG_SLAB_FREELIST_RANDOM > > /* Randomize a generic freelist */ >
On 8/1/22 15:26, Hyeonggon Yoo wrote: > On Thu, Jul 28, 2022 at 05:23:29PM +0200, Vlastimil Babka wrote: >> On 7/12/22 15:39, Hyeonggon Yoo wrote: >> > There is no caller of kmalloc_order_trace() except kmalloc_large(). >> > Fold it into kmalloc_large() and remove kmalloc_order{,_trace}(). >> > >> > Also add tracepoint in kmalloc_large() that was previously >> > in kmalloc_order_trace(). >> > >> > Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> >> > Reviewed-by: Vlastimil Babka <vbabka@suse.cz> >> > > Thank you for review. > >> Hmm noticed a small change in how we call trace_kmalloc() which will now >> include the __GFP_COMP. > > Good catch. > >> I think we could just call alloc_pages() from >> kmalloc_large() with flags | __GFP_COMP instead of doing flags |= >> __GFP_COMP; first. > > I'll consider that in next version. > >> AFAICS both kasan_kmalloc_large() and kmemleak_alloc() >> will filter it out anyway. > > AFAIK not passing __GFP_COMP to both kasan_kmalloc_large() > and kmemleak_alloc() will affect their behavior. You mean "will NOT affect...", right? > - kasan_kmalloc_large() only checks if flag has __GFP_DIRECT_RECLAIM, > - kmemleak_alloc() only use it to allocate kmemleak_object from > slab/pool, which does not require __GFP_COMP. > > simply calling > alloc_pages() with flags | GFP_COMP > kasan_kmalloc_large() with flags > kmemleak_alloc() with flags > trace_kmalloc() with flags > will be okay. > > BTW this is fixed after patch 9. Ah, right. __kmalloc_large_node_notrace() does the same flags |= __GFP_COMP; but the tracepoint is outside and unaffected.
On Mon, Aug 01, 2022 at 03:36:48PM +0200, Vlastimil Babka wrote: > On 8/1/22 15:26, Hyeonggon Yoo wrote: > > On Thu, Jul 28, 2022 at 05:23:29PM +0200, Vlastimil Babka wrote: > >> On 7/12/22 15:39, Hyeonggon Yoo wrote: > >> > There is no caller of kmalloc_order_trace() except kmalloc_large(). > >> > Fold it into kmalloc_large() and remove kmalloc_order{,_trace}(). > >> > > >> > Also add tracepoint in kmalloc_large() that was previously > >> > in kmalloc_order_trace(). > >> > > >> > Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> > >> > Reviewed-by: Vlastimil Babka <vbabka@suse.cz> > >> > > > > Thank you for review. > > > >> Hmm noticed a small change in how we call trace_kmalloc() which will now > >> include the __GFP_COMP. > > > > Good catch. > > > >> I think we could just call alloc_pages() from > >> kmalloc_large() with flags | __GFP_COMP instead of doing flags |= > >> __GFP_COMP; first. > > > > I'll consider that in next version. > > > >> AFAICS both kasan_kmalloc_large() and kmemleak_alloc() > >> will filter it out anyway. > > > > AFAIK not passing __GFP_COMP to both kasan_kmalloc_large() > > and kmemleak_alloc() will affect their behavior. > > You mean "will NOT affect...", right? Ah, yes. mistake. > > > - kasan_kmalloc_large() only checks if flag has __GFP_DIRECT_RECLAIM, > > - kmemleak_alloc() only use it to allocate kmemleak_object from > > slab/pool, which does not require __GFP_COMP. > > > > simply calling > > alloc_pages() with flags | GFP_COMP > > kasan_kmalloc_large() with flags > > kmemleak_alloc() with flags > > trace_kmalloc() with flags > > will be okay. > > > > BTW this is fixed after patch 9. > > Ah, right. __kmalloc_large_node_notrace() does the same flags |= > __GFP_COMP; but the tracepoint is outside and unaffected. I'll reflect your suggestion anyway.
diff --git a/include/linux/slab.h b/include/linux/slab.h index a0e57df3d5a4..15a4c59da59e 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -489,26 +489,8 @@ static __always_inline void *kmem_cache_alloc_node_trace(struct kmem_cache *s, g } #endif /* CONFIG_TRACING */ -extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment - __alloc_size(1); - -#ifdef CONFIG_TRACING -extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) - __assume_page_alignment __alloc_size(1); -#else -static __always_inline __alloc_size(1) void *kmalloc_order_trace(size_t size, gfp_t flags, - unsigned int order) -{ - return kmalloc_order(size, flags, order); -} -#endif - -static __always_inline __alloc_size(1) void *kmalloc_large(size_t size, gfp_t flags) -{ - unsigned int order = get_order(size); - return kmalloc_order_trace(size, flags, order); -} - +void *kmalloc_large(size_t size, gfp_t flags) __assume_page_alignment + __alloc_size(1); /** * kmalloc - allocate memory * @size: how many bytes of memory are required. diff --git a/mm/slab_common.c b/mm/slab_common.c index 6c9aac5d8f4a..1f8af2106df0 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -932,10 +932,11 @@ gfp_t kmalloc_fix_flags(gfp_t flags) * directly to the page allocator. We use __GFP_COMP, because we will need to * know the allocation order to free the pages properly in kfree. */ -void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) +void *kmalloc_large(size_t size, gfp_t flags) { void *ret = NULL; struct page *page; + unsigned int order = get_order(size); if (unlikely(flags & GFP_SLAB_BUG_MASK)) flags = kmalloc_fix_flags(flags); @@ -950,19 +951,10 @@ void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) ret = kasan_kmalloc_large(ret, size, flags); /* As ret might get tagged, call kmemleak hook after KASAN. */ kmemleak_alloc(ret, size, 1, flags); - return ret; -} -EXPORT_SYMBOL(kmalloc_order); - -#ifdef CONFIG_TRACING -void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) -{ - void *ret = kmalloc_order(size, flags, order); trace_kmalloc(_RET_IP_, ret, NULL, size, PAGE_SIZE << order, flags); return ret; } -EXPORT_SYMBOL(kmalloc_order_trace); -#endif +EXPORT_SYMBOL(kmalloc_large); #ifdef CONFIG_SLAB_FREELIST_RANDOM /* Randomize a generic freelist */