Message ID | 20230202185801.4179599-2-edumazet@google.com (mailing list archive) |
---|---|
State | Superseded |
Delegated to: | Netdev Maintainers |
Headers | show |
Series | net: core: use a dedicated kmem_cache for skb head allocs | expand |
On Thu, Feb 2, 2023 at 1:58 PM Eric Dumazet <edumazet@google.com> wrote: > > We have many places using this expression: > > SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) > > Use of SKB_HEAD_ALIGN() will allow to clean them. > > Signed-off-by: Eric Dumazet <edumazet@google.com> Acked-by: Soheil Hassas Yeganeh <soheil@google.com> > --- > include/linux/skbuff.h | 8 ++++++++ > net/core/skbuff.c | 18 ++++++------------ > 2 files changed, 14 insertions(+), 12 deletions(-) > > diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h > index 5ba12185f43e311e37c9045763c3ee0efc274f2a..f2141b7e3940cee060e8443dbaa147b843eb43a0 100644 > --- a/include/linux/skbuff.h > +++ b/include/linux/skbuff.h > @@ -255,6 +255,14 @@ > #define SKB_DATA_ALIGN(X) ALIGN(X, SMP_CACHE_BYTES) > #define SKB_WITH_OVERHEAD(X) \ > ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) > + > +/* For X bytes available in skb->head, what is the minimal > + * allocation needed, knowing struct skb_shared_info needs > + * to be aligned. > + */ > +#define SKB_HEAD_ALIGN(X) (SKB_DATA_ALIGN(X) + \ > + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) > + > #define SKB_MAX_ORDER(X, ORDER) \ > SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X)) > #define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0)) > diff --git a/net/core/skbuff.c b/net/core/skbuff.c > index bb79b4cb89db344d23609f93b2bcca5103f1e92d..b73de8fb0756c02cf9ba4b7e90854c9c17728463 100644 > --- a/net/core/skbuff.c > +++ b/net/core/skbuff.c > @@ -558,8 +558,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, > * aligned memory blocks, unless SLUB/SLAB debug is enabled. > * Both skb->head and skb_shared_info are cache line aligned. > */ > - size = SKB_DATA_ALIGN(size); > - size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); > + size = SKB_HEAD_ALIGN(size); > osize = kmalloc_size_roundup(size); > data = kmalloc_reserve(osize, gfp_mask, node, &pfmemalloc); > if (unlikely(!data)) > @@ -632,8 +631,7 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len, > goto skb_success; > } > > - len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); > - len = SKB_DATA_ALIGN(len); > + len = SKB_HEAD_ALIGN(len); > > if (sk_memalloc_socks()) > gfp_mask |= __GFP_MEMALLOC; > @@ -732,8 +730,7 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len, > data = page_frag_alloc_1k(&nc->page_small, gfp_mask); > pfmemalloc = NAPI_SMALL_PAGE_PFMEMALLOC(nc->page_small); > } else { > - len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); > - len = SKB_DATA_ALIGN(len); > + len = SKB_HEAD_ALIGN(len); > > data = page_frag_alloc(&nc->page, len, gfp_mask); > pfmemalloc = nc->page.pfmemalloc; > @@ -1936,8 +1933,7 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, > if (skb_pfmemalloc(skb)) > gfp_mask |= __GFP_MEMALLOC; > > - size = SKB_DATA_ALIGN(size); > - size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); > + size = SKB_HEAD_ALIGN(size); > size = kmalloc_size_roundup(size); > data = kmalloc_reserve(size, gfp_mask, NUMA_NO_NODE, NULL); > if (!data) > @@ -6288,8 +6284,7 @@ static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off, > if (skb_pfmemalloc(skb)) > gfp_mask |= __GFP_MEMALLOC; > > - size = SKB_DATA_ALIGN(size); > - size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); > + size = SKB_HEAD_ALIGN(size); > size = kmalloc_size_roundup(size); > data = kmalloc_reserve(size, gfp_mask, NUMA_NO_NODE, NULL); > if (!data) > @@ -6407,8 +6402,7 @@ static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off, > if (skb_pfmemalloc(skb)) > gfp_mask |= __GFP_MEMALLOC; > > - size = SKB_DATA_ALIGN(size); > - size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); > + size = SKB_HEAD_ALIGN(size); > size = kmalloc_size_roundup(size); > data = kmalloc_reserve(size, gfp_mask, NUMA_NO_NODE, NULL); > if (!data) > -- > 2.39.1.456.gfc5497dd1b-goog >
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 5ba12185f43e311e37c9045763c3ee0efc274f2a..f2141b7e3940cee060e8443dbaa147b843eb43a0 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -255,6 +255,14 @@ #define SKB_DATA_ALIGN(X) ALIGN(X, SMP_CACHE_BYTES) #define SKB_WITH_OVERHEAD(X) \ ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) + +/* For X bytes available in skb->head, what is the minimal + * allocation needed, knowing struct skb_shared_info needs + * to be aligned. + */ +#define SKB_HEAD_ALIGN(X) (SKB_DATA_ALIGN(X) + \ + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) + #define SKB_MAX_ORDER(X, ORDER) \ SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X)) #define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0)) diff --git a/net/core/skbuff.c b/net/core/skbuff.c index bb79b4cb89db344d23609f93b2bcca5103f1e92d..b73de8fb0756c02cf9ba4b7e90854c9c17728463 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -558,8 +558,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, * aligned memory blocks, unless SLUB/SLAB debug is enabled. * Both skb->head and skb_shared_info are cache line aligned. */ - size = SKB_DATA_ALIGN(size); - size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + size = SKB_HEAD_ALIGN(size); osize = kmalloc_size_roundup(size); data = kmalloc_reserve(osize, gfp_mask, node, &pfmemalloc); if (unlikely(!data)) @@ -632,8 +631,7 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len, goto skb_success; } - len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); - len = SKB_DATA_ALIGN(len); + len = SKB_HEAD_ALIGN(len); if (sk_memalloc_socks()) gfp_mask |= __GFP_MEMALLOC; @@ -732,8 +730,7 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len, data = page_frag_alloc_1k(&nc->page_small, gfp_mask); pfmemalloc = NAPI_SMALL_PAGE_PFMEMALLOC(nc->page_small); } else { - len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); - len = SKB_DATA_ALIGN(len); + len = SKB_HEAD_ALIGN(len); data = page_frag_alloc(&nc->page, len, gfp_mask); pfmemalloc = nc->page.pfmemalloc; @@ -1936,8 +1933,7 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, if (skb_pfmemalloc(skb)) gfp_mask |= __GFP_MEMALLOC; - size = SKB_DATA_ALIGN(size); - size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + size = SKB_HEAD_ALIGN(size); size = kmalloc_size_roundup(size); data = kmalloc_reserve(size, gfp_mask, NUMA_NO_NODE, NULL); if (!data) @@ -6288,8 +6284,7 @@ static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off, if (skb_pfmemalloc(skb)) gfp_mask |= __GFP_MEMALLOC; - size = SKB_DATA_ALIGN(size); - size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + size = SKB_HEAD_ALIGN(size); size = kmalloc_size_roundup(size); data = kmalloc_reserve(size, gfp_mask, NUMA_NO_NODE, NULL); if (!data) @@ -6407,8 +6402,7 @@ static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off, if (skb_pfmemalloc(skb)) gfp_mask |= __GFP_MEMALLOC; - size = SKB_DATA_ALIGN(size); - size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + size = SKB_HEAD_ALIGN(size); size = kmalloc_size_roundup(size); data = kmalloc_reserve(size, gfp_mask, NUMA_NO_NODE, NULL); if (!data)
We have many places using this expression: SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) Use of SKB_HEAD_ALIGN() will allow to clean them. Signed-off-by: Eric Dumazet <edumazet@google.com> --- include/linux/skbuff.h | 8 ++++++++ net/core/skbuff.c | 18 ++++++------------ 2 files changed, 14 insertions(+), 12 deletions(-)