bpf: Add a hint to allocated objects.

To address OOM issue when one cpu is allocating and another cpu is freeing add
a target bpf_mem_cache hint to allocated objects and when local cpu free_llist
overflows free to that bpf_mem_cache. The hint addresses the OOM while
maintaining the same performance for common case when alloc/free are done on the
same cpu.

Note that do_call_rcu_ttrace() now has to check 'draining' flag in one more case,
since do_call_rcu_ttrace() is called not only for current cpu.

Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Hou Tao <houtao1@huawei.com>
Link: https://lore.kernel.org/bpf/20230706033447.54696-9-alexei.starovoitov@gmail.com
This commit is contained in:
Alexei Starovoitov 2023-07-05 20:34:41 -07:00 committed by Daniel Borkmann
parent d114dde245
commit 822fb26bdb
1 changed files with 31 additions and 19 deletions

View File

@ -99,6 +99,7 @@ struct bpf_mem_cache {
int low_watermark, high_watermark, batch;
int percpu_size;
bool draining;
struct bpf_mem_cache *tgt;
/* list of objects to be freed after RCU tasks trace GP */
struct llist_head free_by_rcu_ttrace;
@ -199,18 +200,11 @@ static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node)
for (i = 0; i < cnt; i++) {
/*
* free_by_rcu_ttrace is only manipulated by irq work refill_work().
* IRQ works on the same CPU are called sequentially, so it is
* safe to use __llist_del_first() here. If alloc_bulk() is
* invoked by the initial prefill, there will be no running
* refill_work(), so __llist_del_first() is fine as well.
*
* In most cases, objects on free_by_rcu_ttrace are from the same CPU.
* If some objects come from other CPUs, it doesn't incur any
* harm because NUMA_NO_NODE means the preference for current
* numa node and it is not a guarantee.
* For every 'c' llist_del_first(&c->free_by_rcu_ttrace); is
* done only by one CPU == current CPU. Other CPUs might
* llist_add() and llist_del_all() in parallel.
*/
obj = __llist_del_first(&c->free_by_rcu_ttrace);
obj = llist_del_first(&c->free_by_rcu_ttrace);
if (!obj)
break;
add_obj_to_free_list(c, obj);
@ -284,18 +278,23 @@ static void enque_to_free(struct bpf_mem_cache *c, void *obj)
/* bpf_mem_cache is a per-cpu object. Freeing happens in irq_work.
* Nothing races to add to free_by_rcu_ttrace list.
*/
__llist_add(llnode, &c->free_by_rcu_ttrace);
llist_add(llnode, &c->free_by_rcu_ttrace);
}
static void do_call_rcu_ttrace(struct bpf_mem_cache *c)
{
struct llist_node *llnode, *t;
if (atomic_xchg(&c->call_rcu_ttrace_in_progress, 1))
if (atomic_xchg(&c->call_rcu_ttrace_in_progress, 1)) {
if (unlikely(READ_ONCE(c->draining))) {
llnode = llist_del_all(&c->free_by_rcu_ttrace);
free_all(llnode, !!c->percpu_size);
}
return;
}
WARN_ON_ONCE(!llist_empty(&c->waiting_for_gp_ttrace));
llist_for_each_safe(llnode, t, __llist_del_all(&c->free_by_rcu_ttrace))
llist_for_each_safe(llnode, t, llist_del_all(&c->free_by_rcu_ttrace))
/* There is no concurrent __llist_add(waiting_for_gp_ttrace) access.
* It doesn't race with llist_del_all either.
* But there could be two concurrent llist_del_all(waiting_for_gp_ttrace):
@ -318,10 +317,13 @@ static void do_call_rcu_ttrace(struct bpf_mem_cache *c)
static void free_bulk(struct bpf_mem_cache *c)
{
struct bpf_mem_cache *tgt = c->tgt;
struct llist_node *llnode, *t;
unsigned long flags;
int cnt;
WARN_ON_ONCE(tgt->unit_size != c->unit_size);
do {
inc_active(c, &flags);
llnode = __llist_del_first(&c->free_llist);
@ -331,13 +333,13 @@ static void free_bulk(struct bpf_mem_cache *c)
cnt = 0;
dec_active(c, flags);
if (llnode)
enque_to_free(c, llnode);
enque_to_free(tgt, llnode);
} while (cnt > (c->high_watermark + c->low_watermark) / 2);
/* and drain free_llist_extra */
llist_for_each_safe(llnode, t, llist_del_all(&c->free_llist_extra))
enque_to_free(c, llnode);
do_call_rcu_ttrace(c);
enque_to_free(tgt, llnode);
do_call_rcu_ttrace(tgt);
}
static void bpf_mem_refill(struct irq_work *work)
@ -436,6 +438,7 @@ int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
c->unit_size = unit_size;
c->objcg = objcg;
c->percpu_size = percpu_size;
c->tgt = c;
prefill_mem_cache(c, cpu);
}
ma->cache = pc;
@ -458,6 +461,7 @@ int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
c = &cc->cache[i];
c->unit_size = sizes[i];
c->objcg = objcg;
c->tgt = c;
prefill_mem_cache(c, cpu);
}
}
@ -476,7 +480,7 @@ static void drain_mem_cache(struct bpf_mem_cache *c)
* Except for waiting_for_gp_ttrace list, there are no concurrent operations
* on these lists, so it is safe to use __llist_del_all().
*/
free_all(__llist_del_all(&c->free_by_rcu_ttrace), percpu);
free_all(llist_del_all(&c->free_by_rcu_ttrace), percpu);
free_all(llist_del_all(&c->waiting_for_gp_ttrace), percpu);
free_all(__llist_del_all(&c->free_llist), percpu);
free_all(__llist_del_all(&c->free_llist_extra), percpu);
@ -601,8 +605,10 @@ static void notrace *unit_alloc(struct bpf_mem_cache *c)
local_irq_save(flags);
if (local_inc_return(&c->active) == 1) {
llnode = __llist_del_first(&c->free_llist);
if (llnode)
if (llnode) {
cnt = --c->free_cnt;
*(struct bpf_mem_cache **)llnode = c;
}
}
local_dec(&c->active);
local_irq_restore(flags);
@ -626,6 +632,12 @@ static void notrace unit_free(struct bpf_mem_cache *c, void *ptr)
BUILD_BUG_ON(LLIST_NODE_SZ > 8);
/*
* Remember bpf_mem_cache that allocated this object.
* The hint is not accurate.
*/
c->tgt = *(struct bpf_mem_cache **)llnode;
local_irq_save(flags);
if (local_inc_return(&c->active) == 1) {
__llist_add(llnode, &c->free_llist);