bpf: allow to disable bpf prog memory accounting
We can simply disable the bpf prog memory accouting by not setting the GFP_ACCOUNT. Signed-off-by: Yafang Shao <laoar.shao@gmail.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Roman Gushchin <roman.gushchin@linux.dev> Link: https://lore.kernel.org/r/20230210154734.4416-5-laoar.shao@gmail.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
parent
ee53cbfb1e
commit
bf39650824
|
@ -35,6 +35,7 @@
|
||||||
#include <linux/bpf_verifier.h>
|
#include <linux/bpf_verifier.h>
|
||||||
#include <linux/nodemask.h>
|
#include <linux/nodemask.h>
|
||||||
#include <linux/bpf_mem_alloc.h>
|
#include <linux/bpf_mem_alloc.h>
|
||||||
|
#include <linux/memcontrol.h>
|
||||||
|
|
||||||
#include <asm/barrier.h>
|
#include <asm/barrier.h>
|
||||||
#include <asm/unaligned.h>
|
#include <asm/unaligned.h>
|
||||||
|
@ -87,7 +88,7 @@ void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, uns
|
||||||
|
|
||||||
struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags)
|
struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags)
|
||||||
{
|
{
|
||||||
gfp_t gfp_flags = GFP_KERNEL_ACCOUNT | __GFP_ZERO | gfp_extra_flags;
|
gfp_t gfp_flags = bpf_memcg_flags(GFP_KERNEL | __GFP_ZERO | gfp_extra_flags);
|
||||||
struct bpf_prog_aux *aux;
|
struct bpf_prog_aux *aux;
|
||||||
struct bpf_prog *fp;
|
struct bpf_prog *fp;
|
||||||
|
|
||||||
|
@ -96,12 +97,12 @@ struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flag
|
||||||
if (fp == NULL)
|
if (fp == NULL)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
aux = kzalloc(sizeof(*aux), GFP_KERNEL_ACCOUNT | gfp_extra_flags);
|
aux = kzalloc(sizeof(*aux), bpf_memcg_flags(GFP_KERNEL | gfp_extra_flags));
|
||||||
if (aux == NULL) {
|
if (aux == NULL) {
|
||||||
vfree(fp);
|
vfree(fp);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
fp->active = alloc_percpu_gfp(int, GFP_KERNEL_ACCOUNT | gfp_extra_flags);
|
fp->active = alloc_percpu_gfp(int, bpf_memcg_flags(GFP_KERNEL | gfp_extra_flags));
|
||||||
if (!fp->active) {
|
if (!fp->active) {
|
||||||
vfree(fp);
|
vfree(fp);
|
||||||
kfree(aux);
|
kfree(aux);
|
||||||
|
@ -126,7 +127,7 @@ struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flag
|
||||||
|
|
||||||
struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
|
struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
|
||||||
{
|
{
|
||||||
gfp_t gfp_flags = GFP_KERNEL_ACCOUNT | __GFP_ZERO | gfp_extra_flags;
|
gfp_t gfp_flags = bpf_memcg_flags(GFP_KERNEL | __GFP_ZERO | gfp_extra_flags);
|
||||||
struct bpf_prog *prog;
|
struct bpf_prog *prog;
|
||||||
int cpu;
|
int cpu;
|
||||||
|
|
||||||
|
@ -159,7 +160,7 @@ int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog)
|
||||||
|
|
||||||
prog->aux->jited_linfo = kvcalloc(prog->aux->nr_linfo,
|
prog->aux->jited_linfo = kvcalloc(prog->aux->nr_linfo,
|
||||||
sizeof(*prog->aux->jited_linfo),
|
sizeof(*prog->aux->jited_linfo),
|
||||||
GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
|
bpf_memcg_flags(GFP_KERNEL | __GFP_NOWARN));
|
||||||
if (!prog->aux->jited_linfo)
|
if (!prog->aux->jited_linfo)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
@ -234,7 +235,7 @@ void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,
|
||||||
struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
|
struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
|
||||||
gfp_t gfp_extra_flags)
|
gfp_t gfp_extra_flags)
|
||||||
{
|
{
|
||||||
gfp_t gfp_flags = GFP_KERNEL_ACCOUNT | __GFP_ZERO | gfp_extra_flags;
|
gfp_t gfp_flags = bpf_memcg_flags(GFP_KERNEL | __GFP_ZERO | gfp_extra_flags);
|
||||||
struct bpf_prog *fp;
|
struct bpf_prog *fp;
|
||||||
u32 pages;
|
u32 pages;
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue