bpf: Refactor alloc_bulk().
Factor out inner body of alloc_bulk into separate helper. No functional changes. Signed-off-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Hou Tao <houtao1@huawei.com> Link: https://lore.kernel.org/bpf/20230706033447.54696-5-alexei.starovoitov@gmail.com
This commit is contained in:
parent
9de3e81521
commit
05ae68656a
|
@ -154,11 +154,35 @@ static struct mem_cgroup *get_memcg(const struct bpf_mem_cache *c)
|
|||
#endif
|
||||
}
|
||||
|
||||
static void add_obj_to_free_list(struct bpf_mem_cache *c, void *obj)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (IS_ENABLED(CONFIG_PREEMPT_RT))
|
||||
/* In RT irq_work runs in per-cpu kthread, so disable
|
||||
* interrupts to avoid preemption and interrupts and
|
||||
* reduce the chance of bpf prog executing on this cpu
|
||||
* when active counter is busy.
|
||||
*/
|
||||
local_irq_save(flags);
|
||||
/* alloc_bulk runs from irq_work which will not preempt a bpf
|
||||
* program that does unit_alloc/unit_free since IRQs are
|
||||
* disabled there. There is no race to increment 'active'
|
||||
* counter. It protects free_llist from corruption in case NMI
|
||||
* bpf prog preempted this loop.
|
||||
*/
|
||||
WARN_ON_ONCE(local_inc_return(&c->active) != 1);
|
||||
__llist_add(obj, &c->free_llist);
|
||||
c->free_cnt++;
|
||||
local_dec(&c->active);
|
||||
if (IS_ENABLED(CONFIG_PREEMPT_RT))
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/* Mostly runs from irq_work except __init phase. */
|
||||
static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node)
|
||||
{
|
||||
struct mem_cgroup *memcg = NULL, *old_memcg;
|
||||
unsigned long flags;
|
||||
void *obj;
|
||||
int i;
|
||||
|
||||
|
@ -188,25 +212,7 @@ static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node)
|
|||
if (!obj)
|
||||
break;
|
||||
}
|
||||
if (IS_ENABLED(CONFIG_PREEMPT_RT))
|
||||
/* In RT irq_work runs in per-cpu kthread, so disable
|
||||
* interrupts to avoid preemption and interrupts and
|
||||
* reduce the chance of bpf prog executing on this cpu
|
||||
* when active counter is busy.
|
||||
*/
|
||||
local_irq_save(flags);
|
||||
/* alloc_bulk runs from irq_work which will not preempt a bpf
|
||||
* program that does unit_alloc/unit_free since IRQs are
|
||||
* disabled there. There is no race to increment 'active'
|
||||
* counter. It protects free_llist from corruption in case NMI
|
||||
* bpf prog preempted this loop.
|
||||
*/
|
||||
WARN_ON_ONCE(local_inc_return(&c->active) != 1);
|
||||
__llist_add(obj, &c->free_llist);
|
||||
c->free_cnt++;
|
||||
local_dec(&c->active);
|
||||
if (IS_ENABLED(CONFIG_PREEMPT_RT))
|
||||
local_irq_restore(flags);
|
||||
add_obj_to_free_list(c, obj);
|
||||
}
|
||||
set_active_memcg(old_memcg);
|
||||
mem_cgroup_put(memcg);
|
||||
|
|
Loading…
Reference in New Issue