net: factorize code in kmalloc_reserve()
All kmalloc_reserve() callers have to make the same computation, we can factorize them, to prepare following patch in the series. Signed-off-by: Eric Dumazet <edumazet@google.com> Acked-by: Soheil Hassas Yeganeh <soheil@google.com> Acked-by: Paolo Abeni <pabeni@redhat.com> Reviewed-by: Alexander Duyck <alexanderduyck@fb.com> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
parent
65998d2bf8
commit
5c0e820cbb
|
@ -478,17 +478,20 @@ EXPORT_SYMBOL(napi_build_skb);
|
|||
* may be used. Otherwise, the packet data may be discarded until enough
|
||||
* memory is free
|
||||
*/
|
||||
static void *kmalloc_reserve(size_t size, gfp_t flags, int node,
|
||||
static void *kmalloc_reserve(unsigned int *size, gfp_t flags, int node,
|
||||
bool *pfmemalloc)
|
||||
{
|
||||
void *obj;
|
||||
bool ret_pfmemalloc = false;
|
||||
unsigned int obj_size;
|
||||
void *obj;
|
||||
|
||||
obj_size = SKB_HEAD_ALIGN(*size);
|
||||
*size = obj_size = kmalloc_size_roundup(obj_size);
|
||||
/*
|
||||
* Try a regular allocation, when that fails and we're not entitled
|
||||
* to the reserves, fail.
|
||||
*/
|
||||
obj = kmalloc_node_track_caller(size,
|
||||
obj = kmalloc_node_track_caller(obj_size,
|
||||
flags | __GFP_NOMEMALLOC | __GFP_NOWARN,
|
||||
node);
|
||||
if (obj || !(gfp_pfmemalloc_allowed(flags)))
|
||||
|
@ -496,7 +499,7 @@ static void *kmalloc_reserve(size_t size, gfp_t flags, int node,
|
|||
|
||||
/* Try again but now we are using pfmemalloc reserves */
|
||||
ret_pfmemalloc = true;
|
||||
obj = kmalloc_node_track_caller(size, flags, node);
|
||||
obj = kmalloc_node_track_caller(obj_size, flags, node);
|
||||
|
||||
out:
|
||||
if (pfmemalloc)
|
||||
|
@ -557,9 +560,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
|
|||
* aligned memory blocks, unless SLUB/SLAB debug is enabled.
|
||||
* Both skb->head and skb_shared_info are cache line aligned.
|
||||
*/
|
||||
size = SKB_HEAD_ALIGN(size);
|
||||
size = kmalloc_size_roundup(size);
|
||||
data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc);
|
||||
data = kmalloc_reserve(&size, gfp_mask, node, &pfmemalloc);
|
||||
if (unlikely(!data))
|
||||
goto nodata;
|
||||
/* kmalloc_size_roundup() might give us more room than requested.
|
||||
|
@ -1933,9 +1934,7 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
|
|||
if (skb_pfmemalloc(skb))
|
||||
gfp_mask |= __GFP_MEMALLOC;
|
||||
|
||||
size = SKB_HEAD_ALIGN(size);
|
||||
size = kmalloc_size_roundup(size);
|
||||
data = kmalloc_reserve(size, gfp_mask, NUMA_NO_NODE, NULL);
|
||||
data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL);
|
||||
if (!data)
|
||||
goto nodata;
|
||||
size = SKB_WITH_OVERHEAD(size);
|
||||
|
@ -6283,9 +6282,7 @@ static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off,
|
|||
if (skb_pfmemalloc(skb))
|
||||
gfp_mask |= __GFP_MEMALLOC;
|
||||
|
||||
size = SKB_HEAD_ALIGN(size);
|
||||
size = kmalloc_size_roundup(size);
|
||||
data = kmalloc_reserve(size, gfp_mask, NUMA_NO_NODE, NULL);
|
||||
data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL);
|
||||
if (!data)
|
||||
return -ENOMEM;
|
||||
size = SKB_WITH_OVERHEAD(size);
|
||||
|
@ -6401,9 +6398,7 @@ static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off,
|
|||
if (skb_pfmemalloc(skb))
|
||||
gfp_mask |= __GFP_MEMALLOC;
|
||||
|
||||
size = SKB_HEAD_ALIGN(size);
|
||||
size = kmalloc_size_roundup(size);
|
||||
data = kmalloc_reserve(size, gfp_mask, NUMA_NO_NODE, NULL);
|
||||
data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL);
|
||||
if (!data)
|
||||
return -ENOMEM;
|
||||
size = SKB_WITH_OVERHEAD(size);
|
||||
|
|
Loading…
Reference in New Issue