bpf: Add percpu allocation support to bpf_mem_alloc.
Extend bpf_mem_alloc to cache free list of fixed size per-cpu allocations. Once such cache is created bpf_mem_cache_alloc() will return per-cpu objects. bpf_mem_cache_free() will free them back into global per-cpu pool after observing RCU grace period. per-cpu flavor of bpf_mem_alloc is going to be used by per-cpu hash maps. The free list cache consists of tuples { llist_node, per-cpu pointer } Unlike alloc_percpu() that returns per-cpu pointer the bpf_mem_cache_alloc() returns a pointer to per-cpu pointer and bpf_mem_cache_free() expects to receive it back. Signed-off-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Kumar Kartikeya Dwivedi <memxor@gmail.com> Acked-by: Andrii Nakryiko <andrii@kernel.org> Link: https://lore.kernel.org/bpf/20220902211058.60789-11-alexei.starovoitov@gmail.com
This commit is contained in:
parent
8d5a8011b3
commit
4ab67149f3
|
@ -12,7 +12,7 @@ struct bpf_mem_alloc {
|
|||
struct bpf_mem_cache __percpu *cache;
|
||||
};
|
||||
|
||||
int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size);
|
||||
int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu);
|
||||
void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma);
|
||||
|
||||
/* kmalloc/kfree equivalent: */
|
||||
|
|
|
@ -607,7 +607,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
|
|||
goto free_prealloc;
|
||||
}
|
||||
} else {
|
||||
err = bpf_mem_alloc_init(&htab->ma, htab->elem_size);
|
||||
err = bpf_mem_alloc_init(&htab->ma, htab->elem_size, false);
|
||||
if (err)
|
||||
goto free_map_locked;
|
||||
}
|
||||
|
|
|
@ -101,6 +101,7 @@ struct bpf_mem_cache {
|
|||
/* count of objects in free_llist */
|
||||
int free_cnt;
|
||||
int low_watermark, high_watermark, batch;
|
||||
bool percpu;
|
||||
|
||||
struct rcu_head rcu;
|
||||
struct llist_head free_by_rcu;
|
||||
|
@ -133,6 +134,19 @@ static void *__alloc(struct bpf_mem_cache *c, int node)
|
|||
*/
|
||||
gfp_t flags = GFP_NOWAIT | __GFP_NOWARN | __GFP_ACCOUNT;
|
||||
|
||||
if (c->percpu) {
|
||||
void **obj = kmem_cache_alloc_node(c->kmem_cache, flags, node);
|
||||
void *pptr = __alloc_percpu_gfp(c->unit_size, 8, flags);
|
||||
|
||||
if (!obj || !pptr) {
|
||||
free_percpu(pptr);
|
||||
kfree(obj);
|
||||
return NULL;
|
||||
}
|
||||
obj[1] = pptr;
|
||||
return obj;
|
||||
}
|
||||
|
||||
if (c->kmem_cache)
|
||||
return kmem_cache_alloc_node(c->kmem_cache, flags, node);
|
||||
|
||||
|
@ -193,6 +207,12 @@ static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node)
|
|||
|
||||
static void free_one(struct bpf_mem_cache *c, void *obj)
|
||||
{
|
||||
if (c->percpu) {
|
||||
free_percpu(((void **)obj)[1]);
|
||||
kmem_cache_free(c->kmem_cache, obj);
|
||||
return;
|
||||
}
|
||||
|
||||
if (c->kmem_cache)
|
||||
kmem_cache_free(c->kmem_cache, obj);
|
||||
else
|
||||
|
@ -332,21 +352,30 @@ static void prefill_mem_cache(struct bpf_mem_cache *c, int cpu)
|
|||
* kmalloc/kfree. Max allocation size is 4096 in this case.
|
||||
* This is bpf_dynptr and bpf_kptr use case.
|
||||
*/
|
||||
int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size)
|
||||
int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
|
||||
{
|
||||
static u16 sizes[NUM_CACHES] = {96, 192, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096};
|
||||
struct bpf_mem_caches *cc, __percpu *pcc;
|
||||
struct bpf_mem_cache *c, __percpu *pc;
|
||||
struct kmem_cache *kmem_cache;
|
||||
struct kmem_cache *kmem_cache = NULL;
|
||||
struct obj_cgroup *objcg = NULL;
|
||||
char buf[32];
|
||||
int cpu, i;
|
||||
int cpu, i, unit_size;
|
||||
|
||||
if (size) {
|
||||
pc = __alloc_percpu_gfp(sizeof(*pc), 8, GFP_KERNEL);
|
||||
if (!pc)
|
||||
return -ENOMEM;
|
||||
size += LLIST_NODE_SZ; /* room for llist_node */
|
||||
|
||||
if (percpu) {
|
||||
unit_size = size;
|
||||
/* room for llist_node and per-cpu pointer */
|
||||
size = LLIST_NODE_SZ + sizeof(void *);
|
||||
} else {
|
||||
size += LLIST_NODE_SZ; /* room for llist_node */
|
||||
unit_size = size;
|
||||
}
|
||||
|
||||
snprintf(buf, sizeof(buf), "bpf-%u", size);
|
||||
kmem_cache = kmem_cache_create(buf, size, 8, 0, NULL);
|
||||
if (!kmem_cache) {
|
||||
|
@ -359,14 +388,19 @@ int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size)
|
|||
for_each_possible_cpu(cpu) {
|
||||
c = per_cpu_ptr(pc, cpu);
|
||||
c->kmem_cache = kmem_cache;
|
||||
c->unit_size = size;
|
||||
c->unit_size = unit_size;
|
||||
c->objcg = objcg;
|
||||
c->percpu = percpu;
|
||||
prefill_mem_cache(c, cpu);
|
||||
}
|
||||
ma->cache = pc;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* size == 0 && percpu is an invalid combination */
|
||||
if (WARN_ON_ONCE(percpu))
|
||||
return -EINVAL;
|
||||
|
||||
pcc = __alloc_percpu_gfp(sizeof(*cc), 8, GFP_KERNEL);
|
||||
if (!pcc)
|
||||
return -ENOMEM;
|
||||
|
|
Loading…
Reference in New Issue