bpf: Allow reuse from waiting_for_gp_ttrace list.
alloc_bulk() can reuse elements from free_by_rcu_ttrace. Let it reuse from waiting_for_gp_ttrace as well to avoid unnecessary kmalloc(). Signed-off-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Link: https://lore.kernel.org/bpf/20230706033447.54696-10-alexei.starovoitov@gmail.com
This commit is contained in:
parent
822fb26bdb
commit
04fabf00b4
|
@ -212,6 +212,15 @@ static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node)
|
||||||
if (i >= cnt)
|
if (i >= cnt)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
for (; i < cnt; i++) {
|
||||||
|
obj = llist_del_first(&c->waiting_for_gp_ttrace);
|
||||||
|
if (!obj)
|
||||||
|
break;
|
||||||
|
add_obj_to_free_list(c, obj);
|
||||||
|
}
|
||||||
|
if (i >= cnt)
|
||||||
|
return;
|
||||||
|
|
||||||
memcg = get_memcg(c);
|
memcg = get_memcg(c);
|
||||||
old_memcg = set_active_memcg(memcg);
|
old_memcg = set_active_memcg(memcg);
|
||||||
for (; i < cnt; i++) {
|
for (; i < cnt; i++) {
|
||||||
|
@ -295,12 +304,7 @@ static void do_call_rcu_ttrace(struct bpf_mem_cache *c)
|
||||||
|
|
||||||
WARN_ON_ONCE(!llist_empty(&c->waiting_for_gp_ttrace));
|
WARN_ON_ONCE(!llist_empty(&c->waiting_for_gp_ttrace));
|
||||||
llist_for_each_safe(llnode, t, llist_del_all(&c->free_by_rcu_ttrace))
|
llist_for_each_safe(llnode, t, llist_del_all(&c->free_by_rcu_ttrace))
|
||||||
/* There is no concurrent __llist_add(waiting_for_gp_ttrace) access.
|
llist_add(llnode, &c->waiting_for_gp_ttrace);
|
||||||
* It doesn't race with llist_del_all either.
|
|
||||||
* But there could be two concurrent llist_del_all(waiting_for_gp_ttrace):
|
|
||||||
* from __free_rcu() and from drain_mem_cache().
|
|
||||||
*/
|
|
||||||
__llist_add(llnode, &c->waiting_for_gp_ttrace);
|
|
||||||
|
|
||||||
if (unlikely(READ_ONCE(c->draining))) {
|
if (unlikely(READ_ONCE(c->draining))) {
|
||||||
__free_rcu(&c->rcu_ttrace);
|
__free_rcu(&c->rcu_ttrace);
|
||||||
|
|
Loading…
Reference in New Issue