skbuff: pass the result of data ksize to __build_skb_around

Avoid to call ksize again in __build_skb_around by passing
the result of data ksize to __build_skb_around

nginx stress test shows this change can reduce ksize cpu usage,
and give a little performance boost

Signed-off-by: Li RongQing <lirongqing@baidu.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Li RongQing 2021-09-22 14:17:19 +08:00 committed by David S. Miller
parent db4278c55f
commit a5df6333f1
1 changed files with 5 additions and 3 deletions

View File

@ -394,8 +394,9 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
{ {
struct kmem_cache *cache; struct kmem_cache *cache;
struct sk_buff *skb; struct sk_buff *skb;
u8 *data; unsigned int osize;
bool pfmemalloc; bool pfmemalloc;
u8 *data;
cache = (flags & SKB_ALLOC_FCLONE) cache = (flags & SKB_ALLOC_FCLONE)
? skbuff_fclone_cache : skbuff_head_cache; ? skbuff_fclone_cache : skbuff_head_cache;
@ -427,7 +428,8 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
* Put skb_shared_info exactly at the end of allocated zone, * Put skb_shared_info exactly at the end of allocated zone,
* to allow max possible filling before reallocation. * to allow max possible filling before reallocation.
*/ */
size = SKB_WITH_OVERHEAD(ksize(data)); osize = ksize(data);
size = SKB_WITH_OVERHEAD(osize);
prefetchw(data + size); prefetchw(data + size);
/* /*
@ -436,7 +438,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
* the tail pointer in struct sk_buff! * the tail pointer in struct sk_buff!
*/ */
memset(skb, 0, offsetof(struct sk_buff, tail)); memset(skb, 0, offsetof(struct sk_buff, tail));
__build_skb_around(skb, data, 0); __build_skb_around(skb, data, osize);
skb->pfmemalloc = pfmemalloc; skb->pfmemalloc = pfmemalloc;
if (flags & SKB_ALLOC_FCLONE) { if (flags & SKB_ALLOC_FCLONE) {