Message ID | 20230202185801.4179599-4-edumazet@google.com (mailing list archive) |
---|---|
State | Superseded |
Delegated to: | Netdev Maintainers |
Headers | show |
Series | net: core: use a dedicated kmem_cache for skb head allocs | expand |
Context | Check | Description |
---|---|---|
netdev/tree_selection | success | Clearly marked for net-next |
netdev/fixes_present | success | Fixes tag not required for -next series |
netdev/subject_prefix | success | Link |
netdev/cover_letter | success | Series has a cover letter |
netdev/patch_count | success | Link |
netdev/header_inline | success | No static functions without inline keyword in header files |
netdev/build_32bit | success | Errors and warnings before: 2 this patch: 2 |
netdev/cc_maintainers | success | CCed 5 of 5 maintainers |
netdev/build_clang | success | Errors and warnings before: 1 this patch: 1 |
netdev/module_param | success | Was 0 now: 0 |
netdev/verify_signedoff | success | Signed-off-by tag matches author and committer |
netdev/check_selftest | success | No net selftest shell script |
netdev/verify_fixes | success | No Fixes tag |
netdev/build_allmodconfig_warn | success | Errors and warnings before: 2 this patch: 2 |
netdev/checkpatch | success | total: 0 errors, 0 warnings, 0 checks, 71 lines checked |
netdev/kdoc | success | Errors and warnings before: 0 this patch: 0 |
netdev/source_inline | success | Was 0 now: 0 |
On Thu, Feb 2, 2023 at 1:58 PM Eric Dumazet <edumazet@google.com> wrote: > > All kmalloc_reserve() callers have to make the same computation, > we can factorize them, to prepare following patch in the series. > > Signed-off-by: Eric Dumazet <edumazet@google.com> Acked-by: Soheil Hassas Yeganeh <soheil@google.com> > --- > net/core/skbuff.c | 27 +++++++++++---------------- > 1 file changed, 11 insertions(+), 16 deletions(-) > > diff --git a/net/core/skbuff.c b/net/core/skbuff.c > index a82df5289208d69716e60c5c1f201ec3ca50a258..ae0b2aa1f01e8060cc4fe69137e9bd98e44280cc 100644 > --- a/net/core/skbuff.c > +++ b/net/core/skbuff.c > @@ -478,17 +478,20 @@ EXPORT_SYMBOL(napi_build_skb); > * may be used. Otherwise, the packet data may be discarded until enough > * memory is free > */ > -static void *kmalloc_reserve(size_t size, gfp_t flags, int node, > +static void *kmalloc_reserve(unsigned int *size, gfp_t flags, int node, > bool *pfmemalloc) > { > - void *obj; > bool ret_pfmemalloc = false; > + unsigned int obj_size; > + void *obj; > > + obj_size = SKB_HEAD_ALIGN(*size); > + *size = obj_size = kmalloc_size_roundup(obj_size); > /* > * Try a regular allocation, when that fails and we're not entitled > * to the reserves, fail. > */ > - obj = kmalloc_node_track_caller(size, > + obj = kmalloc_node_track_caller(obj_size, > flags | __GFP_NOMEMALLOC | __GFP_NOWARN, > node); > if (obj || !(gfp_pfmemalloc_allowed(flags))) > @@ -496,7 +499,7 @@ static void *kmalloc_reserve(size_t size, gfp_t flags, int node, > > /* Try again but now we are using pfmemalloc reserves */ > ret_pfmemalloc = true; > - obj = kmalloc_node_track_caller(size, flags, node); > + obj = kmalloc_node_track_caller(obj_size, flags, node); > > out: > if (pfmemalloc) > @@ -557,9 +560,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, > * aligned memory blocks, unless SLUB/SLAB debug is enabled. > * Both skb->head and skb_shared_info are cache line aligned. > */ > - size = SKB_HEAD_ALIGN(size); > - size = kmalloc_size_roundup(size); > - data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc); > + data = kmalloc_reserve(&size, gfp_mask, node, &pfmemalloc); > if (unlikely(!data)) > goto nodata; > /* kmalloc_size_roundup() might give us more room than requested. > @@ -1931,9 +1932,7 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, > if (skb_pfmemalloc(skb)) > gfp_mask |= __GFP_MEMALLOC; > > - size = SKB_HEAD_ALIGN(size); > - size = kmalloc_size_roundup(size); > - data = kmalloc_reserve(size, gfp_mask, NUMA_NO_NODE, NULL); > + data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL); > if (!data) > goto nodata; > size = SKB_WITH_OVERHEAD(size); > @@ -6282,9 +6281,7 @@ static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off, > if (skb_pfmemalloc(skb)) > gfp_mask |= __GFP_MEMALLOC; > > - size = SKB_HEAD_ALIGN(size); > - size = kmalloc_size_roundup(size); > - data = kmalloc_reserve(size, gfp_mask, NUMA_NO_NODE, NULL); > + data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL); > if (!data) > return -ENOMEM; > size = SKB_WITH_OVERHEAD(size); > @@ -6400,9 +6397,7 @@ static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off, > if (skb_pfmemalloc(skb)) > gfp_mask |= __GFP_MEMALLOC; > > - size = SKB_HEAD_ALIGN(size); > - size = kmalloc_size_roundup(size); > - data = kmalloc_reserve(size, gfp_mask, NUMA_NO_NODE, NULL); > + data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL); > if (!data) > return -ENOMEM; > size = SKB_WITH_OVERHEAD(size); > -- > 2.39.1.456.gfc5497dd1b-goog >
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index a82df5289208d69716e60c5c1f201ec3ca50a258..ae0b2aa1f01e8060cc4fe69137e9bd98e44280cc 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -478,17 +478,20 @@ EXPORT_SYMBOL(napi_build_skb); * may be used. Otherwise, the packet data may be discarded until enough * memory is free */ -static void *kmalloc_reserve(size_t size, gfp_t flags, int node, +static void *kmalloc_reserve(unsigned int *size, gfp_t flags, int node, bool *pfmemalloc) { - void *obj; bool ret_pfmemalloc = false; + unsigned int obj_size; + void *obj; + obj_size = SKB_HEAD_ALIGN(*size); + *size = obj_size = kmalloc_size_roundup(obj_size); /* * Try a regular allocation, when that fails and we're not entitled * to the reserves, fail. */ - obj = kmalloc_node_track_caller(size, + obj = kmalloc_node_track_caller(obj_size, flags | __GFP_NOMEMALLOC | __GFP_NOWARN, node); if (obj || !(gfp_pfmemalloc_allowed(flags))) @@ -496,7 +499,7 @@ static void *kmalloc_reserve(size_t size, gfp_t flags, int node, /* Try again but now we are using pfmemalloc reserves */ ret_pfmemalloc = true; - obj = kmalloc_node_track_caller(size, flags, node); + obj = kmalloc_node_track_caller(obj_size, flags, node); out: if (pfmemalloc) @@ -557,9 +560,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, * aligned memory blocks, unless SLUB/SLAB debug is enabled. * Both skb->head and skb_shared_info are cache line aligned. */ - size = SKB_HEAD_ALIGN(size); - size = kmalloc_size_roundup(size); - data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc); + data = kmalloc_reserve(&size, gfp_mask, node, &pfmemalloc); if (unlikely(!data)) goto nodata; /* kmalloc_size_roundup() might give us more room than requested. @@ -1931,9 +1932,7 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, if (skb_pfmemalloc(skb)) gfp_mask |= __GFP_MEMALLOC; - size = SKB_HEAD_ALIGN(size); - size = kmalloc_size_roundup(size); - data = kmalloc_reserve(size, gfp_mask, NUMA_NO_NODE, NULL); + data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL); if (!data) goto nodata; size = SKB_WITH_OVERHEAD(size); @@ -6282,9 +6281,7 @@ static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off, if (skb_pfmemalloc(skb)) gfp_mask |= __GFP_MEMALLOC; - size = SKB_HEAD_ALIGN(size); - size = kmalloc_size_roundup(size); - data = kmalloc_reserve(size, gfp_mask, NUMA_NO_NODE, NULL); + data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL); if (!data) return -ENOMEM; size = SKB_WITH_OVERHEAD(size); @@ -6400,9 +6397,7 @@ static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off, if (skb_pfmemalloc(skb)) gfp_mask |= __GFP_MEMALLOC; - size = SKB_HEAD_ALIGN(size); - size = kmalloc_size_roundup(size); - data = kmalloc_reserve(size, gfp_mask, NUMA_NO_NODE, NULL); + data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL); if (!data) return -ENOMEM; size = SKB_WITH_OVERHEAD(size);
All kmalloc_reserve() callers have to make the same computation, we can factorize them, to prepare following patch in the series. Signed-off-by: Eric Dumazet <edumazet@google.com> --- net/core/skbuff.c | 27 +++++++++++---------------- 1 file changed, 11 insertions(+), 16 deletions(-)