Merge branch 'bpf-cgroup-multi-prog'
Alexei Starovoitov says: ==================== bpf: muli prog support for cgroup-bpf v1->v2: - fixed accidentally swapped two lines which caused static_key not going to zero - addressed Martin's feedback and changed prog_query to be consistent with verifier output: return -enospc and fill supplied buffer instead of just returning -enospc when buffer is too small to fit all prog_ids v1: cgroup-bpf use cases are getting more advanced and running only one program per cgroup is no longer enough. Therefore introduce support for attaching multiple programs per cgroup and running a set of effective programs. These patches introduces BPF_F_ALLOW_MULTI flag for BPF_PROG_ATTACH cmd. The default is still NONE and behavior of BPF_F_ALLOW_OVERRIDE flag is unchanged. The difference between three possible flags for BPF_PROG_ATTACH command: - NONE(default): No further bpf programs allowed in the subtree. - BPF_F_ALLOW_OVERRIDE: If a sub-cgroup installs some bpf program, the program in this cgroup yields to sub-cgroup program. - BPF_F_ALLOW_MULTI: If a sub-cgroup installs some bpf program, that cgroup program gets run in addition to the program in this cgroup. Most of the logic is in patch 1. Even when cgroup doesn't have any programs attached its set of effective program can be non-empty. To quickly execute them and avoid penalizing cgroups without any effective programs introduce 'struct bpf_prog_array' which has an optimization for cgroups with zero effective programs. Patch 2 introduces BPF_PROG_QUERY command for introspection Patch 3 makes verifier more strict for cgroup-bpf program types. Patch 4+ are tests. More details in individual patches ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
b295edc54b
|
@ -14,27 +14,46 @@ struct bpf_sock_ops_kern;
|
|||
extern struct static_key_false cgroup_bpf_enabled_key;
|
||||
#define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key)
|
||||
|
||||
struct bpf_prog_list {
|
||||
struct list_head node;
|
||||
struct bpf_prog *prog;
|
||||
};
|
||||
|
||||
struct bpf_prog_array;
|
||||
|
||||
struct cgroup_bpf {
|
||||
/*
|
||||
* Store two sets of bpf_prog pointers, one for programs that are
|
||||
* pinned directly to this cgroup, and one for those that are effective
|
||||
* when this cgroup is accessed.
|
||||
/* array of effective progs in this cgroup */
|
||||
struct bpf_prog_array __rcu *effective[MAX_BPF_ATTACH_TYPE];
|
||||
|
||||
/* attached progs to this cgroup and attach flags
|
||||
* when flags == 0 or BPF_F_ALLOW_OVERRIDE the progs list will
|
||||
* have either zero or one element
|
||||
* when BPF_F_ALLOW_MULTI the list can have up to BPF_CGROUP_MAX_PROGS
|
||||
*/
|
||||
struct bpf_prog *prog[MAX_BPF_ATTACH_TYPE];
|
||||
struct bpf_prog __rcu *effective[MAX_BPF_ATTACH_TYPE];
|
||||
bool disallow_override[MAX_BPF_ATTACH_TYPE];
|
||||
struct list_head progs[MAX_BPF_ATTACH_TYPE];
|
||||
u32 flags[MAX_BPF_ATTACH_TYPE];
|
||||
|
||||
/* temp storage for effective prog array used by prog_attach/detach */
|
||||
struct bpf_prog_array __rcu *inactive;
|
||||
};
|
||||
|
||||
void cgroup_bpf_put(struct cgroup *cgrp);
|
||||
void cgroup_bpf_inherit(struct cgroup *cgrp, struct cgroup *parent);
|
||||
int cgroup_bpf_inherit(struct cgroup *cgrp);
|
||||
|
||||
int __cgroup_bpf_update(struct cgroup *cgrp, struct cgroup *parent,
|
||||
struct bpf_prog *prog, enum bpf_attach_type type,
|
||||
bool overridable);
|
||||
int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
|
||||
enum bpf_attach_type type, u32 flags);
|
||||
int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
|
||||
enum bpf_attach_type type, u32 flags);
|
||||
int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
|
||||
union bpf_attr __user *uattr);
|
||||
|
||||
/* Wrapper for __cgroup_bpf_update() protected by cgroup_mutex */
|
||||
int cgroup_bpf_update(struct cgroup *cgrp, struct bpf_prog *prog,
|
||||
enum bpf_attach_type type, bool overridable);
|
||||
/* Wrapper for __cgroup_bpf_*() protected by cgroup_mutex */
|
||||
int cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
|
||||
enum bpf_attach_type type, u32 flags);
|
||||
int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
|
||||
enum bpf_attach_type type, u32 flags);
|
||||
int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
|
||||
union bpf_attr __user *uattr);
|
||||
|
||||
int __cgroup_bpf_run_filter_skb(struct sock *sk,
|
||||
struct sk_buff *skb,
|
||||
|
@ -96,8 +115,7 @@ int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
|
|||
|
||||
struct cgroup_bpf {};
|
||||
static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
|
||||
static inline void cgroup_bpf_inherit(struct cgroup *cgrp,
|
||||
struct cgroup *parent) {}
|
||||
static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
|
||||
|
||||
#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
|
||||
#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
|
||||
|
|
|
@ -241,6 +241,41 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
|
|||
int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
|
||||
union bpf_attr __user *uattr);
|
||||
|
||||
/* an array of programs to be executed under rcu_lock.
|
||||
*
|
||||
* Typical usage:
|
||||
* ret = BPF_PROG_RUN_ARRAY(&bpf_prog_array, ctx, BPF_PROG_RUN);
|
||||
*
|
||||
* the structure returned by bpf_prog_array_alloc() should be populated
|
||||
* with program pointers and the last pointer must be NULL.
|
||||
* The user has to keep refcnt on the program and make sure the program
|
||||
* is removed from the array before bpf_prog_put().
|
||||
* The 'struct bpf_prog_array *' should only be replaced with xchg()
|
||||
* since other cpus are walking the array of pointers in parallel.
|
||||
*/
|
||||
struct bpf_prog_array {
|
||||
struct rcu_head rcu;
|
||||
struct bpf_prog *progs[0];
|
||||
};
|
||||
|
||||
struct bpf_prog_array __rcu *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags);
|
||||
void bpf_prog_array_free(struct bpf_prog_array __rcu *progs);
|
||||
int bpf_prog_array_length(struct bpf_prog_array __rcu *progs);
|
||||
int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
|
||||
__u32 __user *prog_ids, u32 cnt);
|
||||
|
||||
#define BPF_PROG_RUN_ARRAY(array, ctx, func) \
|
||||
({ \
|
||||
struct bpf_prog **_prog; \
|
||||
u32 _ret = 1; \
|
||||
rcu_read_lock(); \
|
||||
_prog = rcu_dereference(array)->progs; \
|
||||
for (; *_prog; _prog++) \
|
||||
_ret &= func(*_prog, ctx); \
|
||||
rcu_read_unlock(); \
|
||||
_ret; \
|
||||
})
|
||||
|
||||
#ifdef CONFIG_BPF_SYSCALL
|
||||
DECLARE_PER_CPU(int, bpf_prog_active);
|
||||
|
||||
|
|
|
@ -481,7 +481,7 @@ struct sk_filter {
|
|||
struct bpf_prog *prog;
|
||||
};
|
||||
|
||||
#define BPF_PROG_RUN(filter, ctx) (*filter->bpf_func)(ctx, filter->insnsi)
|
||||
#define BPF_PROG_RUN(filter, ctx) (*(filter)->bpf_func)(ctx, (filter)->insnsi)
|
||||
|
||||
#define BPF_SKB_CB_LEN QDISC_CB_PRIV_LEN
|
||||
|
||||
|
|
|
@ -92,6 +92,7 @@ enum bpf_cmd {
|
|||
BPF_PROG_GET_FD_BY_ID,
|
||||
BPF_MAP_GET_FD_BY_ID,
|
||||
BPF_OBJ_GET_INFO_BY_FD,
|
||||
BPF_PROG_QUERY,
|
||||
};
|
||||
|
||||
enum bpf_map_type {
|
||||
|
@ -143,11 +144,47 @@ enum bpf_attach_type {
|
|||
|
||||
#define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE
|
||||
|
||||
/* If BPF_F_ALLOW_OVERRIDE flag is used in BPF_PROG_ATTACH command
|
||||
* to the given target_fd cgroup the descendent cgroup will be able to
|
||||
* override effective bpf program that was inherited from this cgroup
|
||||
/* cgroup-bpf attach flags used in BPF_PROG_ATTACH command
|
||||
*
|
||||
* NONE(default): No further bpf programs allowed in the subtree.
|
||||
*
|
||||
* BPF_F_ALLOW_OVERRIDE: If a sub-cgroup installs some bpf program,
|
||||
* the program in this cgroup yields to sub-cgroup program.
|
||||
*
|
||||
* BPF_F_ALLOW_MULTI: If a sub-cgroup installs some bpf program,
|
||||
* that cgroup program gets run in addition to the program in this cgroup.
|
||||
*
|
||||
* Only one program is allowed to be attached to a cgroup with
|
||||
* NONE or BPF_F_ALLOW_OVERRIDE flag.
|
||||
* Attaching another program on top of NONE or BPF_F_ALLOW_OVERRIDE will
|
||||
* release old program and attach the new one. Attach flags has to match.
|
||||
*
|
||||
* Multiple programs are allowed to be attached to a cgroup with
|
||||
* BPF_F_ALLOW_MULTI flag. They are executed in FIFO order
|
||||
* (those that were attached first, run first)
|
||||
* The programs of sub-cgroup are executed first, then programs of
|
||||
* this cgroup and then programs of parent cgroup.
|
||||
* When children program makes decision (like picking TCP CA or sock bind)
|
||||
* parent program has a chance to override it.
|
||||
*
|
||||
* A cgroup with MULTI or OVERRIDE flag allows any attach flags in sub-cgroups.
|
||||
* A cgroup with NONE doesn't allow any programs in sub-cgroups.
|
||||
* Ex1:
|
||||
* cgrp1 (MULTI progs A, B) ->
|
||||
* cgrp2 (OVERRIDE prog C) ->
|
||||
* cgrp3 (MULTI prog D) ->
|
||||
* cgrp4 (OVERRIDE prog E) ->
|
||||
* cgrp5 (NONE prog F)
|
||||
* the event in cgrp5 triggers execution of F,D,A,B in that order.
|
||||
* if prog F is detached, the execution is E,D,A,B
|
||||
* if prog F and D are detached, the execution is E,A,B
|
||||
* if prog F, E and D are detached, the execution is C,A,B
|
||||
*
|
||||
* All eligible programs are executed regardless of return code from
|
||||
* earlier programs.
|
||||
*/
|
||||
#define BPF_F_ALLOW_OVERRIDE (1U << 0)
|
||||
#define BPF_F_ALLOW_MULTI (1U << 1)
|
||||
|
||||
/* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the
|
||||
* verifier will perform strict alignment checking as if the kernel
|
||||
|
@ -175,6 +212,9 @@ enum bpf_attach_type {
|
|||
/* Specify numa node during map creation */
|
||||
#define BPF_F_NUMA_NODE (1U << 2)
|
||||
|
||||
/* flags for BPF_PROG_QUERY */
|
||||
#define BPF_F_QUERY_EFFECTIVE (1U << 0)
|
||||
|
||||
#define BPF_OBJ_NAME_LEN 16U
|
||||
|
||||
union bpf_attr {
|
||||
|
@ -253,6 +293,15 @@ union bpf_attr {
|
|||
__u32 info_len;
|
||||
__aligned_u64 info;
|
||||
} info;
|
||||
|
||||
struct { /* anonymous struct used by BPF_PROG_QUERY command */
|
||||
__u32 target_fd; /* container object to query */
|
||||
__u32 attach_type;
|
||||
__u32 query_flags;
|
||||
__u32 attach_flags;
|
||||
__aligned_u64 prog_ids;
|
||||
__u32 prog_cnt;
|
||||
} query;
|
||||
} __attribute__((aligned(8)));
|
||||
|
||||
/* BPF helper function descriptions:
|
||||
|
|
|
@ -27,129 +27,407 @@ void cgroup_bpf_put(struct cgroup *cgrp)
|
|||
{
|
||||
unsigned int type;
|
||||
|
||||
for (type = 0; type < ARRAY_SIZE(cgrp->bpf.prog); type++) {
|
||||
struct bpf_prog *prog = cgrp->bpf.prog[type];
|
||||
for (type = 0; type < ARRAY_SIZE(cgrp->bpf.progs); type++) {
|
||||
struct list_head *progs = &cgrp->bpf.progs[type];
|
||||
struct bpf_prog_list *pl, *tmp;
|
||||
|
||||
if (prog) {
|
||||
bpf_prog_put(prog);
|
||||
list_for_each_entry_safe(pl, tmp, progs, node) {
|
||||
list_del(&pl->node);
|
||||
bpf_prog_put(pl->prog);
|
||||
kfree(pl);
|
||||
static_branch_dec(&cgroup_bpf_enabled_key);
|
||||
}
|
||||
bpf_prog_array_free(cgrp->bpf.effective[type]);
|
||||
}
|
||||
}
|
||||
|
||||
/* count number of elements in the list.
|
||||
* it's slow but the list cannot be long
|
||||
*/
|
||||
static u32 prog_list_length(struct list_head *head)
|
||||
{
|
||||
struct bpf_prog_list *pl;
|
||||
u32 cnt = 0;
|
||||
|
||||
list_for_each_entry(pl, head, node) {
|
||||
if (!pl->prog)
|
||||
continue;
|
||||
cnt++;
|
||||
}
|
||||
return cnt;
|
||||
}
|
||||
|
||||
/* if parent has non-overridable prog attached,
|
||||
* disallow attaching new programs to the descendent cgroup.
|
||||
* if parent has overridable or multi-prog, allow attaching
|
||||
*/
|
||||
static bool hierarchy_allows_attach(struct cgroup *cgrp,
|
||||
enum bpf_attach_type type,
|
||||
u32 new_flags)
|
||||
{
|
||||
struct cgroup *p;
|
||||
|
||||
p = cgroup_parent(cgrp);
|
||||
if (!p)
|
||||
return true;
|
||||
do {
|
||||
u32 flags = p->bpf.flags[type];
|
||||
u32 cnt;
|
||||
|
||||
if (flags & BPF_F_ALLOW_MULTI)
|
||||
return true;
|
||||
cnt = prog_list_length(&p->bpf.progs[type]);
|
||||
WARN_ON_ONCE(cnt > 1);
|
||||
if (cnt == 1)
|
||||
return !!(flags & BPF_F_ALLOW_OVERRIDE);
|
||||
p = cgroup_parent(p);
|
||||
} while (p);
|
||||
return true;
|
||||
}
|
||||
|
||||
/* compute a chain of effective programs for a given cgroup:
|
||||
* start from the list of programs in this cgroup and add
|
||||
* all parent programs.
|
||||
* Note that parent's F_ALLOW_OVERRIDE-type program is yielding
|
||||
* to programs in this cgroup
|
||||
*/
|
||||
static int compute_effective_progs(struct cgroup *cgrp,
|
||||
enum bpf_attach_type type,
|
||||
struct bpf_prog_array __rcu **array)
|
||||
{
|
||||
struct bpf_prog_array __rcu *progs;
|
||||
struct bpf_prog_list *pl;
|
||||
struct cgroup *p = cgrp;
|
||||
int cnt = 0;
|
||||
|
||||
/* count number of effective programs by walking parents */
|
||||
do {
|
||||
if (cnt == 0 || (p->bpf.flags[type] & BPF_F_ALLOW_MULTI))
|
||||
cnt += prog_list_length(&p->bpf.progs[type]);
|
||||
p = cgroup_parent(p);
|
||||
} while (p);
|
||||
|
||||
progs = bpf_prog_array_alloc(cnt, GFP_KERNEL);
|
||||
if (!progs)
|
||||
return -ENOMEM;
|
||||
|
||||
/* populate the array with effective progs */
|
||||
cnt = 0;
|
||||
p = cgrp;
|
||||
do {
|
||||
if (cnt == 0 || (p->bpf.flags[type] & BPF_F_ALLOW_MULTI))
|
||||
list_for_each_entry(pl,
|
||||
&p->bpf.progs[type], node) {
|
||||
if (!pl->prog)
|
||||
continue;
|
||||
rcu_dereference_protected(progs, 1)->
|
||||
progs[cnt++] = pl->prog;
|
||||
}
|
||||
p = cgroup_parent(p);
|
||||
} while (p);
|
||||
|
||||
*array = progs;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void activate_effective_progs(struct cgroup *cgrp,
|
||||
enum bpf_attach_type type,
|
||||
struct bpf_prog_array __rcu *array)
|
||||
{
|
||||
struct bpf_prog_array __rcu *old_array;
|
||||
|
||||
old_array = xchg(&cgrp->bpf.effective[type], array);
|
||||
/* free prog array after grace period, since __cgroup_bpf_run_*()
|
||||
* might be still walking the array
|
||||
*/
|
||||
bpf_prog_array_free(old_array);
|
||||
}
|
||||
|
||||
/**
|
||||
* cgroup_bpf_inherit() - inherit effective programs from parent
|
||||
* @cgrp: the cgroup to modify
|
||||
* @parent: the parent to inherit from
|
||||
*/
|
||||
void cgroup_bpf_inherit(struct cgroup *cgrp, struct cgroup *parent)
|
||||
int cgroup_bpf_inherit(struct cgroup *cgrp)
|
||||
{
|
||||
unsigned int type;
|
||||
/* has to use marco instead of const int, since compiler thinks
|
||||
* that array below is variable length
|
||||
*/
|
||||
#define NR ARRAY_SIZE(cgrp->bpf.effective)
|
||||
struct bpf_prog_array __rcu *arrays[NR] = {};
|
||||
int i;
|
||||
|
||||
for (type = 0; type < ARRAY_SIZE(cgrp->bpf.effective); type++) {
|
||||
struct bpf_prog *e;
|
||||
for (i = 0; i < NR; i++)
|
||||
INIT_LIST_HEAD(&cgrp->bpf.progs[i]);
|
||||
|
||||
e = rcu_dereference_protected(parent->bpf.effective[type],
|
||||
lockdep_is_held(&cgroup_mutex));
|
||||
rcu_assign_pointer(cgrp->bpf.effective[type], e);
|
||||
cgrp->bpf.disallow_override[type] = parent->bpf.disallow_override[type];
|
||||
}
|
||||
for (i = 0; i < NR; i++)
|
||||
if (compute_effective_progs(cgrp, i, &arrays[i]))
|
||||
goto cleanup;
|
||||
|
||||
for (i = 0; i < NR; i++)
|
||||
activate_effective_progs(cgrp, i, arrays[i]);
|
||||
|
||||
return 0;
|
||||
cleanup:
|
||||
for (i = 0; i < NR; i++)
|
||||
bpf_prog_array_free(arrays[i]);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
#define BPF_CGROUP_MAX_PROGS 64
|
||||
|
||||
/**
|
||||
* __cgroup_bpf_update() - Update the pinned program of a cgroup, and
|
||||
* __cgroup_bpf_attach() - Attach the program to a cgroup, and
|
||||
* propagate the change to descendants
|
||||
* @cgrp: The cgroup which descendants to traverse
|
||||
* @parent: The parent of @cgrp, or %NULL if @cgrp is the root
|
||||
* @prog: A new program to pin
|
||||
* @type: Type of pinning operation (ingress/egress)
|
||||
*
|
||||
* Each cgroup has a set of two pointers for bpf programs; one for eBPF
|
||||
* programs it owns, and which is effective for execution.
|
||||
*
|
||||
* If @prog is not %NULL, this function attaches a new program to the cgroup
|
||||
* and releases the one that is currently attached, if any. @prog is then made
|
||||
* the effective program of type @type in that cgroup.
|
||||
*
|
||||
* If @prog is %NULL, the currently attached program of type @type is released,
|
||||
* and the effective program of the parent cgroup (if any) is inherited to
|
||||
* @cgrp.
|
||||
*
|
||||
* Then, the descendants of @cgrp are walked and the effective program for
|
||||
* each of them is set to the effective program of @cgrp unless the
|
||||
* descendant has its own program attached, in which case the subbranch is
|
||||
* skipped. This ensures that delegated subcgroups with own programs are left
|
||||
* untouched.
|
||||
* @prog: A program to attach
|
||||
* @type: Type of attach operation
|
||||
*
|
||||
* Must be called with cgroup_mutex held.
|
||||
*/
|
||||
int __cgroup_bpf_update(struct cgroup *cgrp, struct cgroup *parent,
|
||||
struct bpf_prog *prog, enum bpf_attach_type type,
|
||||
bool new_overridable)
|
||||
int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
|
||||
enum bpf_attach_type type, u32 flags)
|
||||
{
|
||||
struct bpf_prog *old_prog, *effective = NULL;
|
||||
struct cgroup_subsys_state *pos;
|
||||
bool overridable = true;
|
||||
struct list_head *progs = &cgrp->bpf.progs[type];
|
||||
struct bpf_prog *old_prog = NULL;
|
||||
struct cgroup_subsys_state *css;
|
||||
struct bpf_prog_list *pl;
|
||||
bool pl_was_allocated;
|
||||
u32 old_flags;
|
||||
int err;
|
||||
|
||||
if (parent) {
|
||||
overridable = !parent->bpf.disallow_override[type];
|
||||
effective = rcu_dereference_protected(parent->bpf.effective[type],
|
||||
lockdep_is_held(&cgroup_mutex));
|
||||
}
|
||||
if ((flags & BPF_F_ALLOW_OVERRIDE) && (flags & BPF_F_ALLOW_MULTI))
|
||||
/* invalid combination */
|
||||
return -EINVAL;
|
||||
|
||||
if (prog && effective && !overridable)
|
||||
/* if parent has non-overridable prog attached, disallow
|
||||
* attaching new programs to descendent cgroup
|
||||
if (!hierarchy_allows_attach(cgrp, type, flags))
|
||||
return -EPERM;
|
||||
|
||||
if (!list_empty(progs) && cgrp->bpf.flags[type] != flags)
|
||||
/* Disallow attaching non-overridable on top
|
||||
* of existing overridable in this cgroup.
|
||||
* Disallow attaching multi-prog if overridable or none
|
||||
*/
|
||||
return -EPERM;
|
||||
|
||||
if (prog && effective && overridable != new_overridable)
|
||||
/* if parent has overridable prog attached, only
|
||||
* allow overridable programs in descendent cgroup
|
||||
*/
|
||||
return -EPERM;
|
||||
if (prog_list_length(progs) >= BPF_CGROUP_MAX_PROGS)
|
||||
return -E2BIG;
|
||||
|
||||
old_prog = cgrp->bpf.prog[type];
|
||||
if (flags & BPF_F_ALLOW_MULTI) {
|
||||
list_for_each_entry(pl, progs, node)
|
||||
if (pl->prog == prog)
|
||||
/* disallow attaching the same prog twice */
|
||||
return -EINVAL;
|
||||
|
||||
if (prog) {
|
||||
overridable = new_overridable;
|
||||
effective = prog;
|
||||
if (old_prog &&
|
||||
cgrp->bpf.disallow_override[type] == new_overridable)
|
||||
/* disallow attaching non-overridable on top
|
||||
* of existing overridable in this cgroup
|
||||
* and vice versa
|
||||
*/
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
if (!prog && !old_prog)
|
||||
/* report error when trying to detach and nothing is attached */
|
||||
return -ENOENT;
|
||||
|
||||
cgrp->bpf.prog[type] = prog;
|
||||
|
||||
css_for_each_descendant_pre(pos, &cgrp->self) {
|
||||
struct cgroup *desc = container_of(pos, struct cgroup, self);
|
||||
|
||||
/* skip the subtree if the descendant has its own program */
|
||||
if (desc->bpf.prog[type] && desc != cgrp) {
|
||||
pos = css_rightmost_descendant(pos);
|
||||
pl = kmalloc(sizeof(*pl), GFP_KERNEL);
|
||||
if (!pl)
|
||||
return -ENOMEM;
|
||||
pl_was_allocated = true;
|
||||
pl->prog = prog;
|
||||
list_add_tail(&pl->node, progs);
|
||||
} else {
|
||||
if (list_empty(progs)) {
|
||||
pl = kmalloc(sizeof(*pl), GFP_KERNEL);
|
||||
if (!pl)
|
||||
return -ENOMEM;
|
||||
pl_was_allocated = true;
|
||||
list_add_tail(&pl->node, progs);
|
||||
} else {
|
||||
rcu_assign_pointer(desc->bpf.effective[type],
|
||||
effective);
|
||||
desc->bpf.disallow_override[type] = !overridable;
|
||||
pl = list_first_entry(progs, typeof(*pl), node);
|
||||
old_prog = pl->prog;
|
||||
pl_was_allocated = false;
|
||||
}
|
||||
pl->prog = prog;
|
||||
}
|
||||
|
||||
if (prog)
|
||||
static_branch_inc(&cgroup_bpf_enabled_key);
|
||||
old_flags = cgrp->bpf.flags[type];
|
||||
cgrp->bpf.flags[type] = flags;
|
||||
|
||||
/* allocate and recompute effective prog arrays */
|
||||
css_for_each_descendant_pre(css, &cgrp->self) {
|
||||
struct cgroup *desc = container_of(css, struct cgroup, self);
|
||||
|
||||
err = compute_effective_progs(desc, type, &desc->bpf.inactive);
|
||||
if (err)
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
/* all allocations were successful. Activate all prog arrays */
|
||||
css_for_each_descendant_pre(css, &cgrp->self) {
|
||||
struct cgroup *desc = container_of(css, struct cgroup, self);
|
||||
|
||||
activate_effective_progs(desc, type, desc->bpf.inactive);
|
||||
desc->bpf.inactive = NULL;
|
||||
}
|
||||
|
||||
static_branch_inc(&cgroup_bpf_enabled_key);
|
||||
if (old_prog) {
|
||||
bpf_prog_put(old_prog);
|
||||
static_branch_dec(&cgroup_bpf_enabled_key);
|
||||
}
|
||||
return 0;
|
||||
|
||||
cleanup:
|
||||
/* oom while computing effective. Free all computed effective arrays
|
||||
* since they were not activated
|
||||
*/
|
||||
css_for_each_descendant_pre(css, &cgrp->self) {
|
||||
struct cgroup *desc = container_of(css, struct cgroup, self);
|
||||
|
||||
bpf_prog_array_free(desc->bpf.inactive);
|
||||
desc->bpf.inactive = NULL;
|
||||
}
|
||||
|
||||
/* and cleanup the prog list */
|
||||
pl->prog = old_prog;
|
||||
if (pl_was_allocated) {
|
||||
list_del(&pl->node);
|
||||
kfree(pl);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
* __cgroup_bpf_detach() - Detach the program from a cgroup, and
|
||||
* propagate the change to descendants
|
||||
* @cgrp: The cgroup which descendants to traverse
|
||||
* @prog: A program to detach or NULL
|
||||
* @type: Type of detach operation
|
||||
*
|
||||
* Must be called with cgroup_mutex held.
|
||||
*/
|
||||
int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
|
||||
enum bpf_attach_type type, u32 unused_flags)
|
||||
{
|
||||
struct list_head *progs = &cgrp->bpf.progs[type];
|
||||
u32 flags = cgrp->bpf.flags[type];
|
||||
struct bpf_prog *old_prog = NULL;
|
||||
struct cgroup_subsys_state *css;
|
||||
struct bpf_prog_list *pl;
|
||||
int err;
|
||||
|
||||
if (flags & BPF_F_ALLOW_MULTI) {
|
||||
if (!prog)
|
||||
/* to detach MULTI prog the user has to specify valid FD
|
||||
* of the program to be detached
|
||||
*/
|
||||
return -EINVAL;
|
||||
} else {
|
||||
if (list_empty(progs))
|
||||
/* report error when trying to detach and nothing is attached */
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
if (flags & BPF_F_ALLOW_MULTI) {
|
||||
/* find the prog and detach it */
|
||||
list_for_each_entry(pl, progs, node) {
|
||||
if (pl->prog != prog)
|
||||
continue;
|
||||
old_prog = prog;
|
||||
/* mark it deleted, so it's ignored while
|
||||
* recomputing effective
|
||||
*/
|
||||
pl->prog = NULL;
|
||||
break;
|
||||
}
|
||||
if (!old_prog)
|
||||
return -ENOENT;
|
||||
} else {
|
||||
/* to maintain backward compatibility NONE and OVERRIDE cgroups
|
||||
* allow detaching with invalid FD (prog==NULL)
|
||||
*/
|
||||
pl = list_first_entry(progs, typeof(*pl), node);
|
||||
old_prog = pl->prog;
|
||||
pl->prog = NULL;
|
||||
}
|
||||
|
||||
/* allocate and recompute effective prog arrays */
|
||||
css_for_each_descendant_pre(css, &cgrp->self) {
|
||||
struct cgroup *desc = container_of(css, struct cgroup, self);
|
||||
|
||||
err = compute_effective_progs(desc, type, &desc->bpf.inactive);
|
||||
if (err)
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
/* all allocations were successful. Activate all prog arrays */
|
||||
css_for_each_descendant_pre(css, &cgrp->self) {
|
||||
struct cgroup *desc = container_of(css, struct cgroup, self);
|
||||
|
||||
activate_effective_progs(desc, type, desc->bpf.inactive);
|
||||
desc->bpf.inactive = NULL;
|
||||
}
|
||||
|
||||
/* now can actually delete it from this cgroup list */
|
||||
list_del(&pl->node);
|
||||
kfree(pl);
|
||||
if (list_empty(progs))
|
||||
/* last program was detached, reset flags to zero */
|
||||
cgrp->bpf.flags[type] = 0;
|
||||
|
||||
bpf_prog_put(old_prog);
|
||||
static_branch_dec(&cgroup_bpf_enabled_key);
|
||||
return 0;
|
||||
|
||||
cleanup:
|
||||
/* oom while computing effective. Free all computed effective arrays
|
||||
* since they were not activated
|
||||
*/
|
||||
css_for_each_descendant_pre(css, &cgrp->self) {
|
||||
struct cgroup *desc = container_of(css, struct cgroup, self);
|
||||
|
||||
bpf_prog_array_free(desc->bpf.inactive);
|
||||
desc->bpf.inactive = NULL;
|
||||
}
|
||||
|
||||
/* and restore back old_prog */
|
||||
pl->prog = old_prog;
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Must be called with cgroup_mutex held to avoid races. */
|
||||
int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
|
||||
union bpf_attr __user *uattr)
|
||||
{
|
||||
__u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
|
||||
enum bpf_attach_type type = attr->query.attach_type;
|
||||
struct list_head *progs = &cgrp->bpf.progs[type];
|
||||
u32 flags = cgrp->bpf.flags[type];
|
||||
int cnt, ret = 0, i;
|
||||
|
||||
if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE)
|
||||
cnt = bpf_prog_array_length(cgrp->bpf.effective[type]);
|
||||
else
|
||||
cnt = prog_list_length(progs);
|
||||
|
||||
if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)))
|
||||
return -EFAULT;
|
||||
if (copy_to_user(&uattr->query.prog_cnt, &cnt, sizeof(cnt)))
|
||||
return -EFAULT;
|
||||
if (attr->query.prog_cnt == 0 || !prog_ids || !cnt)
|
||||
/* return early if user requested only program count + flags */
|
||||
return 0;
|
||||
if (attr->query.prog_cnt < cnt) {
|
||||
cnt = attr->query.prog_cnt;
|
||||
ret = -ENOSPC;
|
||||
}
|
||||
|
||||
if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE) {
|
||||
return bpf_prog_array_copy_to_user(cgrp->bpf.effective[type],
|
||||
prog_ids, cnt);
|
||||
} else {
|
||||
struct bpf_prog_list *pl;
|
||||
u32 id;
|
||||
|
||||
i = 0;
|
||||
list_for_each_entry(pl, progs, node) {
|
||||
id = pl->prog->aux->id;
|
||||
if (copy_to_user(prog_ids + i, &id, sizeof(id)))
|
||||
return -EFAULT;
|
||||
if (++i == cnt)
|
||||
break;
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -171,36 +449,26 @@ int __cgroup_bpf_run_filter_skb(struct sock *sk,
|
|||
struct sk_buff *skb,
|
||||
enum bpf_attach_type type)
|
||||
{
|
||||
struct bpf_prog *prog;
|
||||
unsigned int offset = skb->data - skb_network_header(skb);
|
||||
struct sock *save_sk;
|
||||
struct cgroup *cgrp;
|
||||
int ret = 0;
|
||||
int ret;
|
||||
|
||||
if (!sk || !sk_fullsock(sk))
|
||||
return 0;
|
||||
|
||||
if (sk->sk_family != AF_INET &&
|
||||
sk->sk_family != AF_INET6)
|
||||
if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)
|
||||
return 0;
|
||||
|
||||
cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
prog = rcu_dereference(cgrp->bpf.effective[type]);
|
||||
if (prog) {
|
||||
unsigned int offset = skb->data - skb_network_header(skb);
|
||||
struct sock *save_sk = skb->sk;
|
||||
|
||||
skb->sk = sk;
|
||||
__skb_push(skb, offset);
|
||||
ret = bpf_prog_run_save_cb(prog, skb) == 1 ? 0 : -EPERM;
|
||||
__skb_pull(skb, offset);
|
||||
skb->sk = save_sk;
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
return ret;
|
||||
save_sk = skb->sk;
|
||||
skb->sk = sk;
|
||||
__skb_push(skb, offset);
|
||||
ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], skb,
|
||||
bpf_prog_run_save_cb);
|
||||
__skb_pull(skb, offset);
|
||||
skb->sk = save_sk;
|
||||
return ret == 1 ? 0 : -EPERM;
|
||||
}
|
||||
EXPORT_SYMBOL(__cgroup_bpf_run_filter_skb);
|
||||
|
||||
|
@ -221,19 +489,10 @@ int __cgroup_bpf_run_filter_sk(struct sock *sk,
|
|||
enum bpf_attach_type type)
|
||||
{
|
||||
struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
|
||||
struct bpf_prog *prog;
|
||||
int ret = 0;
|
||||
int ret;
|
||||
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
prog = rcu_dereference(cgrp->bpf.effective[type]);
|
||||
if (prog)
|
||||
ret = BPF_PROG_RUN(prog, sk) == 1 ? 0 : -EPERM;
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
return ret;
|
||||
ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], sk, BPF_PROG_RUN);
|
||||
return ret == 1 ? 0 : -EPERM;
|
||||
}
|
||||
EXPORT_SYMBOL(__cgroup_bpf_run_filter_sk);
|
||||
|
||||
|
@ -258,18 +517,10 @@ int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
|
|||
enum bpf_attach_type type)
|
||||
{
|
||||
struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
|
||||
struct bpf_prog *prog;
|
||||
int ret = 0;
|
||||
int ret;
|
||||
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
prog = rcu_dereference(cgrp->bpf.effective[type]);
|
||||
if (prog)
|
||||
ret = BPF_PROG_RUN(prog, sock_ops) == 1 ? 0 : -EPERM;
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
return ret;
|
||||
ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], sock_ops,
|
||||
BPF_PROG_RUN);
|
||||
return ret == 1 ? 0 : -EPERM;
|
||||
}
|
||||
EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_ops);
|
||||
|
|
|
@ -1381,6 +1381,75 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
|
||||
|
||||
/* to avoid allocating empty bpf_prog_array for cgroups that
|
||||
* don't have bpf program attached use one global 'empty_prog_array'
|
||||
* It will not be modified the caller of bpf_prog_array_alloc()
|
||||
* (since caller requested prog_cnt == 0)
|
||||
* that pointer should be 'freed' by bpf_prog_array_free()
|
||||
*/
|
||||
static struct {
|
||||
struct bpf_prog_array hdr;
|
||||
struct bpf_prog *null_prog;
|
||||
} empty_prog_array = {
|
||||
.null_prog = NULL,
|
||||
};
|
||||
|
||||
struct bpf_prog_array __rcu *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags)
|
||||
{
|
||||
if (prog_cnt)
|
||||
return kzalloc(sizeof(struct bpf_prog_array) +
|
||||
sizeof(struct bpf_prog *) * (prog_cnt + 1),
|
||||
flags);
|
||||
|
||||
return &empty_prog_array.hdr;
|
||||
}
|
||||
|
||||
void bpf_prog_array_free(struct bpf_prog_array __rcu *progs)
|
||||
{
|
||||
if (!progs ||
|
||||
progs == (struct bpf_prog_array __rcu *)&empty_prog_array.hdr)
|
||||
return;
|
||||
kfree_rcu(progs, rcu);
|
||||
}
|
||||
|
||||
int bpf_prog_array_length(struct bpf_prog_array __rcu *progs)
|
||||
{
|
||||
struct bpf_prog **prog;
|
||||
u32 cnt = 0;
|
||||
|
||||
rcu_read_lock();
|
||||
prog = rcu_dereference(progs)->progs;
|
||||
for (; *prog; prog++)
|
||||
cnt++;
|
||||
rcu_read_unlock();
|
||||
return cnt;
|
||||
}
|
||||
|
||||
int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
|
||||
__u32 __user *prog_ids, u32 cnt)
|
||||
{
|
||||
struct bpf_prog **prog;
|
||||
u32 i = 0, id;
|
||||
|
||||
rcu_read_lock();
|
||||
prog = rcu_dereference(progs)->progs;
|
||||
for (; *prog; prog++) {
|
||||
id = (*prog)->aux->id;
|
||||
if (copy_to_user(prog_ids + i, &id, sizeof(id))) {
|
||||
rcu_read_unlock();
|
||||
return -EFAULT;
|
||||
}
|
||||
if (++i == cnt) {
|
||||
prog++;
|
||||
break;
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
if (*prog)
|
||||
return -ENOSPC;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void bpf_prog_free_deferred(struct work_struct *work)
|
||||
{
|
||||
struct bpf_prog_aux *aux;
|
||||
|
|
|
@ -1168,6 +1168,9 @@ static int sockmap_get_from_fd(const union bpf_attr *attr, bool attach)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#define BPF_F_ATTACH_MASK \
|
||||
(BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI)
|
||||
|
||||
static int bpf_prog_attach(const union bpf_attr *attr)
|
||||
{
|
||||
enum bpf_prog_type ptype;
|
||||
|
@ -1181,7 +1184,7 @@ static int bpf_prog_attach(const union bpf_attr *attr)
|
|||
if (CHECK_ATTR(BPF_PROG_ATTACH))
|
||||
return -EINVAL;
|
||||
|
||||
if (attr->attach_flags & ~BPF_F_ALLOW_OVERRIDE)
|
||||
if (attr->attach_flags & ~BPF_F_ATTACH_MASK)
|
||||
return -EINVAL;
|
||||
|
||||
switch (attr->attach_type) {
|
||||
|
@ -1212,8 +1215,8 @@ static int bpf_prog_attach(const union bpf_attr *attr)
|
|||
return PTR_ERR(cgrp);
|
||||
}
|
||||
|
||||
ret = cgroup_bpf_update(cgrp, prog, attr->attach_type,
|
||||
attr->attach_flags & BPF_F_ALLOW_OVERRIDE);
|
||||
ret = cgroup_bpf_attach(cgrp, prog, attr->attach_type,
|
||||
attr->attach_flags);
|
||||
if (ret)
|
||||
bpf_prog_put(prog);
|
||||
cgroup_put(cgrp);
|
||||
|
@ -1225,6 +1228,8 @@ static int bpf_prog_attach(const union bpf_attr *attr)
|
|||
|
||||
static int bpf_prog_detach(const union bpf_attr *attr)
|
||||
{
|
||||
enum bpf_prog_type ptype;
|
||||
struct bpf_prog *prog;
|
||||
struct cgroup *cgrp;
|
||||
int ret;
|
||||
|
||||
|
@ -1237,26 +1242,67 @@ static int bpf_prog_detach(const union bpf_attr *attr)
|
|||
switch (attr->attach_type) {
|
||||
case BPF_CGROUP_INET_INGRESS:
|
||||
case BPF_CGROUP_INET_EGRESS:
|
||||
ptype = BPF_PROG_TYPE_CGROUP_SKB;
|
||||
break;
|
||||
case BPF_CGROUP_INET_SOCK_CREATE:
|
||||
ptype = BPF_PROG_TYPE_CGROUP_SOCK;
|
||||
break;
|
||||
case BPF_CGROUP_SOCK_OPS:
|
||||
cgrp = cgroup_get_from_fd(attr->target_fd);
|
||||
if (IS_ERR(cgrp))
|
||||
return PTR_ERR(cgrp);
|
||||
|
||||
ret = cgroup_bpf_update(cgrp, NULL, attr->attach_type, false);
|
||||
cgroup_put(cgrp);
|
||||
ptype = BPF_PROG_TYPE_SOCK_OPS;
|
||||
break;
|
||||
case BPF_SK_SKB_STREAM_PARSER:
|
||||
case BPF_SK_SKB_STREAM_VERDICT:
|
||||
ret = sockmap_get_from_fd(attr, false);
|
||||
break;
|
||||
return sockmap_get_from_fd(attr, false);
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
cgrp = cgroup_get_from_fd(attr->target_fd);
|
||||
if (IS_ERR(cgrp))
|
||||
return PTR_ERR(cgrp);
|
||||
|
||||
prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
|
||||
if (IS_ERR(prog))
|
||||
prog = NULL;
|
||||
|
||||
ret = cgroup_bpf_detach(cgrp, prog, attr->attach_type, 0);
|
||||
if (prog)
|
||||
bpf_prog_put(prog);
|
||||
cgroup_put(cgrp);
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define BPF_PROG_QUERY_LAST_FIELD query.prog_cnt
|
||||
|
||||
static int bpf_prog_query(const union bpf_attr *attr,
|
||||
union bpf_attr __user *uattr)
|
||||
{
|
||||
struct cgroup *cgrp;
|
||||
int ret;
|
||||
|
||||
if (!capable(CAP_NET_ADMIN))
|
||||
return -EPERM;
|
||||
if (CHECK_ATTR(BPF_PROG_QUERY))
|
||||
return -EINVAL;
|
||||
if (attr->query.query_flags & ~BPF_F_QUERY_EFFECTIVE)
|
||||
return -EINVAL;
|
||||
|
||||
switch (attr->query.attach_type) {
|
||||
case BPF_CGROUP_INET_INGRESS:
|
||||
case BPF_CGROUP_INET_EGRESS:
|
||||
case BPF_CGROUP_INET_SOCK_CREATE:
|
||||
case BPF_CGROUP_SOCK_OPS:
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
cgrp = cgroup_get_from_fd(attr->query.target_fd);
|
||||
if (IS_ERR(cgrp))
|
||||
return PTR_ERR(cgrp);
|
||||
ret = cgroup_bpf_query(cgrp, attr, uattr);
|
||||
cgroup_put(cgrp);
|
||||
return ret;
|
||||
}
|
||||
#endif /* CONFIG_CGROUP_BPF */
|
||||
|
||||
#define BPF_PROG_TEST_RUN_LAST_FIELD test.duration
|
||||
|
@ -1553,6 +1599,9 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
|
|||
case BPF_PROG_DETACH:
|
||||
err = bpf_prog_detach(&attr);
|
||||
break;
|
||||
case BPF_PROG_QUERY:
|
||||
err = bpf_prog_query(&attr, uattr);
|
||||
break;
|
||||
#endif
|
||||
case BPF_PROG_TEST_RUN:
|
||||
err = bpf_prog_test_run(&attr, uattr);
|
||||
|
|
|
@ -3073,6 +3073,43 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int check_return_code(struct bpf_verifier_env *env)
|
||||
{
|
||||
struct bpf_reg_state *reg;
|
||||
struct tnum range = tnum_range(0, 1);
|
||||
|
||||
switch (env->prog->type) {
|
||||
case BPF_PROG_TYPE_CGROUP_SKB:
|
||||
case BPF_PROG_TYPE_CGROUP_SOCK:
|
||||
case BPF_PROG_TYPE_SOCK_OPS:
|
||||
break;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
|
||||
reg = &env->cur_state.regs[BPF_REG_0];
|
||||
if (reg->type != SCALAR_VALUE) {
|
||||
verbose("At program exit the register R0 is not a known value (%s)\n",
|
||||
reg_type_str[reg->type]);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!tnum_in(range, reg->var_off)) {
|
||||
verbose("At program exit the register R0 ");
|
||||
if (!tnum_is_unknown(reg->var_off)) {
|
||||
char tn_buf[48];
|
||||
|
||||
tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
|
||||
verbose("has value %s", tn_buf);
|
||||
} else {
|
||||
verbose("has unknown scalar value");
|
||||
}
|
||||
verbose(" should have been 0 or 1\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* non-recursive DFS pseudo code
|
||||
* 1 procedure DFS-iterative(G,v):
|
||||
* 2 label v as discovered
|
||||
|
@ -3863,6 +3900,9 @@ static int do_check(struct bpf_verifier_env *env)
|
|||
return -EACCES;
|
||||
}
|
||||
|
||||
err = check_return_code(env);
|
||||
if (err)
|
||||
return err;
|
||||
process_bpf_exit:
|
||||
insn_idx = pop_stack(env, &prev_insn_idx);
|
||||
if (insn_idx < 0) {
|
||||
|
|
|
@ -1896,6 +1896,9 @@ int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask, int ref_flags)
|
|||
if (ret)
|
||||
goto destroy_root;
|
||||
|
||||
ret = cgroup_bpf_inherit(root_cgrp);
|
||||
WARN_ON_ONCE(ret);
|
||||
|
||||
trace_cgroup_setup_root(root);
|
||||
|
||||
/*
|
||||
|
@ -4713,6 +4716,9 @@ static struct cgroup *cgroup_create(struct cgroup *parent)
|
|||
cgrp->self.parent = &parent->self;
|
||||
cgrp->root = root;
|
||||
cgrp->level = level;
|
||||
ret = cgroup_bpf_inherit(cgrp);
|
||||
if (ret)
|
||||
goto out_idr_free;
|
||||
|
||||
for (tcgrp = cgrp; tcgrp; tcgrp = cgroup_parent(tcgrp)) {
|
||||
cgrp->ancestor_ids[tcgrp->level] = tcgrp->id;
|
||||
|
@ -4747,13 +4753,12 @@ static struct cgroup *cgroup_create(struct cgroup *parent)
|
|||
if (!cgroup_on_dfl(cgrp))
|
||||
cgrp->subtree_control = cgroup_control(cgrp);
|
||||
|
||||
if (parent)
|
||||
cgroup_bpf_inherit(cgrp, parent);
|
||||
|
||||
cgroup_propagate_control(cgrp);
|
||||
|
||||
return cgrp;
|
||||
|
||||
out_idr_free:
|
||||
cgroup_idr_remove(&root->cgroup_idr, cgrp->id);
|
||||
out_cancel_ref:
|
||||
percpu_ref_exit(&cgrp->self.refcnt);
|
||||
out_free_cgrp:
|
||||
|
@ -5736,14 +5741,33 @@ void cgroup_sk_free(struct sock_cgroup_data *skcd)
|
|||
#endif /* CONFIG_SOCK_CGROUP_DATA */
|
||||
|
||||
#ifdef CONFIG_CGROUP_BPF
|
||||
int cgroup_bpf_update(struct cgroup *cgrp, struct bpf_prog *prog,
|
||||
enum bpf_attach_type type, bool overridable)
|
||||
int cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
|
||||
enum bpf_attach_type type, u32 flags)
|
||||
{
|
||||
struct cgroup *parent = cgroup_parent(cgrp);
|
||||
int ret;
|
||||
|
||||
mutex_lock(&cgroup_mutex);
|
||||
ret = __cgroup_bpf_update(cgrp, parent, prog, type, overridable);
|
||||
ret = __cgroup_bpf_attach(cgrp, prog, type, flags);
|
||||
mutex_unlock(&cgroup_mutex);
|
||||
return ret;
|
||||
}
|
||||
int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
|
||||
enum bpf_attach_type type, u32 flags)
|
||||
{
|
||||
int ret;
|
||||
|
||||
mutex_lock(&cgroup_mutex);
|
||||
ret = __cgroup_bpf_detach(cgrp, prog, type, flags);
|
||||
mutex_unlock(&cgroup_mutex);
|
||||
return ret;
|
||||
}
|
||||
int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
|
||||
union bpf_attr __user *uattr)
|
||||
{
|
||||
int ret;
|
||||
|
||||
mutex_lock(&cgroup_mutex);
|
||||
ret = __cgroup_bpf_query(cgrp, attr, uattr);
|
||||
mutex_unlock(&cgroup_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -56,7 +56,7 @@ int setup_cgroup_environment(void)
|
|||
return 1;
|
||||
}
|
||||
|
||||
if (mount("none", CGROUP_MOUNT_PATH, "cgroup2", 0, NULL)) {
|
||||
if (mount("none", CGROUP_MOUNT_PATH, "cgroup2", 0, NULL) && errno != EBUSY) {
|
||||
log_err("mount cgroup2");
|
||||
return 1;
|
||||
}
|
||||
|
@ -163,7 +163,7 @@ int create_and_get_cgroup(char *path)
|
|||
|
||||
format_cgroup_path(cgroup_path, path);
|
||||
if (mkdir(cgroup_path, 0777) && errno != EEXIST) {
|
||||
log_err("mkdiring cgroup");
|
||||
log_err("mkdiring cgroup %s .. %s", path, cgroup_path);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -30,7 +30,7 @@
|
|||
|
||||
#define FOO "/foo"
|
||||
#define BAR "/foo/bar/"
|
||||
#define PING_CMD "ping -c1 -w1 127.0.0.1"
|
||||
#define PING_CMD "ping -c1 -w1 127.0.0.1 > /dev/null"
|
||||
|
||||
char bpf_log_buf[BPF_LOG_BUF_SIZE];
|
||||
|
||||
|
@ -55,8 +55,7 @@ static int prog_load(int verdict)
|
|||
return ret;
|
||||
}
|
||||
|
||||
|
||||
int main(int argc, char **argv)
|
||||
static int test_foo_bar(void)
|
||||
{
|
||||
int drop_prog, allow_prog, foo = 0, bar = 0, rc = 0;
|
||||
|
||||
|
@ -189,8 +188,223 @@ out:
|
|||
close(bar);
|
||||
cleanup_cgroup_environment();
|
||||
if (!rc)
|
||||
printf("PASS\n");
|
||||
printf("### override:PASS\n");
|
||||
else
|
||||
printf("FAIL\n");
|
||||
printf("### override:FAIL\n");
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int map_fd = -1;
|
||||
|
||||
static int prog_load_cnt(int verdict, int val)
|
||||
{
|
||||
if (map_fd < 0)
|
||||
map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, 4, 8, 1, 0);
|
||||
if (map_fd < 0) {
|
||||
printf("failed to create map '%s'\n", strerror(errno));
|
||||
return -1;
|
||||
}
|
||||
|
||||
struct bpf_insn prog[] = {
|
||||
BPF_MOV32_IMM(BPF_REG_0, 0),
|
||||
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -4), /* *(u32 *)(fp - 4) = r0 */
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), /* r2 = fp - 4 */
|
||||
BPF_LD_MAP_FD(BPF_REG_1, map_fd),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_1, val), /* r1 = 1 */
|
||||
BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_0, BPF_REG_1, 0, 0), /* xadd r0 += r1 */
|
||||
BPF_MOV64_IMM(BPF_REG_0, verdict), /* r0 = verdict */
|
||||
BPF_EXIT_INSN(),
|
||||
};
|
||||
size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn);
|
||||
int ret;
|
||||
|
||||
ret = bpf_load_program(BPF_PROG_TYPE_CGROUP_SKB,
|
||||
prog, insns_cnt, "GPL", 0,
|
||||
bpf_log_buf, BPF_LOG_BUF_SIZE);
|
||||
|
||||
if (ret < 0) {
|
||||
log_err("Loading program");
|
||||
printf("Output from verifier:\n%s\n-------\n", bpf_log_buf);
|
||||
return 0;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
static int test_multiprog(void)
|
||||
{
|
||||
__u32 prog_ids[4], prog_cnt = 0, attach_flags, saved_prog_id;
|
||||
int cg1 = 0, cg2 = 0, cg3 = 0, cg4 = 0, cg5 = 0, key = 0;
|
||||
int drop_prog, allow_prog[6] = {}, rc = 0;
|
||||
unsigned long long value;
|
||||
int i = 0;
|
||||
|
||||
for (i = 0; i < 6; i++) {
|
||||
allow_prog[i] = prog_load_cnt(1, 1 << i);
|
||||
if (!allow_prog[i])
|
||||
goto err;
|
||||
}
|
||||
drop_prog = prog_load_cnt(0, 1);
|
||||
if (!drop_prog)
|
||||
goto err;
|
||||
|
||||
if (setup_cgroup_environment())
|
||||
goto err;
|
||||
|
||||
cg1 = create_and_get_cgroup("/cg1");
|
||||
if (!cg1)
|
||||
goto err;
|
||||
cg2 = create_and_get_cgroup("/cg1/cg2");
|
||||
if (!cg2)
|
||||
goto err;
|
||||
cg3 = create_and_get_cgroup("/cg1/cg2/cg3");
|
||||
if (!cg3)
|
||||
goto err;
|
||||
cg4 = create_and_get_cgroup("/cg1/cg2/cg3/cg4");
|
||||
if (!cg4)
|
||||
goto err;
|
||||
cg5 = create_and_get_cgroup("/cg1/cg2/cg3/cg4/cg5");
|
||||
if (!cg5)
|
||||
goto err;
|
||||
|
||||
if (join_cgroup("/cg1/cg2/cg3/cg4/cg5"))
|
||||
goto err;
|
||||
|
||||
if (bpf_prog_attach(allow_prog[0], cg1, BPF_CGROUP_INET_EGRESS, 2)) {
|
||||
log_err("Attaching prog to cg1");
|
||||
goto err;
|
||||
}
|
||||
if (!bpf_prog_attach(allow_prog[0], cg1, BPF_CGROUP_INET_EGRESS, 2)) {
|
||||
log_err("Unexpected success attaching the same prog to cg1");
|
||||
goto err;
|
||||
}
|
||||
if (bpf_prog_attach(allow_prog[1], cg1, BPF_CGROUP_INET_EGRESS, 2)) {
|
||||
log_err("Attaching prog2 to cg1");
|
||||
goto err;
|
||||
}
|
||||
if (bpf_prog_attach(allow_prog[2], cg2, BPF_CGROUP_INET_EGRESS, 1)) {
|
||||
log_err("Attaching prog to cg2");
|
||||
goto err;
|
||||
}
|
||||
if (bpf_prog_attach(allow_prog[3], cg3, BPF_CGROUP_INET_EGRESS, 2)) {
|
||||
log_err("Attaching prog to cg3");
|
||||
goto err;
|
||||
}
|
||||
if (bpf_prog_attach(allow_prog[4], cg4, BPF_CGROUP_INET_EGRESS, 1)) {
|
||||
log_err("Attaching prog to cg4");
|
||||
goto err;
|
||||
}
|
||||
if (bpf_prog_attach(allow_prog[5], cg5, BPF_CGROUP_INET_EGRESS, 0)) {
|
||||
log_err("Attaching prog to cg5");
|
||||
goto err;
|
||||
}
|
||||
assert(system(PING_CMD) == 0);
|
||||
assert(bpf_map_lookup_elem(map_fd, &key, &value) == 0);
|
||||
assert(value == 1 + 2 + 8 + 32);
|
||||
|
||||
/* query the number of effective progs in cg5 */
|
||||
assert(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, BPF_F_QUERY_EFFECTIVE,
|
||||
NULL, NULL, &prog_cnt) == 0);
|
||||
assert(prog_cnt == 4);
|
||||
/* retrieve prog_ids of effective progs in cg5 */
|
||||
assert(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, BPF_F_QUERY_EFFECTIVE,
|
||||
&attach_flags, prog_ids, &prog_cnt) == 0);
|
||||
assert(prog_cnt == 4);
|
||||
assert(attach_flags == 0);
|
||||
saved_prog_id = prog_ids[0];
|
||||
/* check enospc handling */
|
||||
prog_ids[0] = 0;
|
||||
prog_cnt = 2;
|
||||
assert(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, BPF_F_QUERY_EFFECTIVE,
|
||||
&attach_flags, prog_ids, &prog_cnt) == -1 &&
|
||||
errno == ENOSPC);
|
||||
assert(prog_cnt == 4);
|
||||
/* check that prog_ids are returned even when buffer is too small */
|
||||
assert(prog_ids[0] == saved_prog_id);
|
||||
/* retrieve prog_id of single attached prog in cg5 */
|
||||
prog_ids[0] = 0;
|
||||
assert(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, 0,
|
||||
NULL, prog_ids, &prog_cnt) == 0);
|
||||
assert(prog_cnt == 1);
|
||||
assert(prog_ids[0] == saved_prog_id);
|
||||
|
||||
/* detach bottom program and ping again */
|
||||
if (bpf_prog_detach2(-1, cg5, BPF_CGROUP_INET_EGRESS)) {
|
||||
log_err("Detaching prog from cg5");
|
||||
goto err;
|
||||
}
|
||||
value = 0;
|
||||
assert(bpf_map_update_elem(map_fd, &key, &value, 0) == 0);
|
||||
assert(system(PING_CMD) == 0);
|
||||
assert(bpf_map_lookup_elem(map_fd, &key, &value) == 0);
|
||||
assert(value == 1 + 2 + 8 + 16);
|
||||
|
||||
/* detach 3rd from bottom program and ping again */
|
||||
errno = 0;
|
||||
if (!bpf_prog_detach2(0, cg3, BPF_CGROUP_INET_EGRESS)) {
|
||||
log_err("Unexpected success on detach from cg3");
|
||||
goto err;
|
||||
}
|
||||
if (bpf_prog_detach2(allow_prog[3], cg3, BPF_CGROUP_INET_EGRESS)) {
|
||||
log_err("Detaching from cg3");
|
||||
goto err;
|
||||
}
|
||||
value = 0;
|
||||
assert(bpf_map_update_elem(map_fd, &key, &value, 0) == 0);
|
||||
assert(system(PING_CMD) == 0);
|
||||
assert(bpf_map_lookup_elem(map_fd, &key, &value) == 0);
|
||||
assert(value == 1 + 2 + 16);
|
||||
|
||||
/* detach 2nd from bottom program and ping again */
|
||||
if (bpf_prog_detach2(-1, cg4, BPF_CGROUP_INET_EGRESS)) {
|
||||
log_err("Detaching prog from cg4");
|
||||
goto err;
|
||||
}
|
||||
value = 0;
|
||||
assert(bpf_map_update_elem(map_fd, &key, &value, 0) == 0);
|
||||
assert(system(PING_CMD) == 0);
|
||||
assert(bpf_map_lookup_elem(map_fd, &key, &value) == 0);
|
||||
assert(value == 1 + 2 + 4);
|
||||
|
||||
prog_cnt = 4;
|
||||
assert(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, BPF_F_QUERY_EFFECTIVE,
|
||||
&attach_flags, prog_ids, &prog_cnt) == 0);
|
||||
assert(prog_cnt == 3);
|
||||
assert(attach_flags == 0);
|
||||
assert(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, 0,
|
||||
NULL, prog_ids, &prog_cnt) == 0);
|
||||
assert(prog_cnt == 0);
|
||||
goto out;
|
||||
err:
|
||||
rc = 1;
|
||||
|
||||
out:
|
||||
for (i = 0; i < 6; i++)
|
||||
if (allow_prog[i] > 0)
|
||||
close(allow_prog[i]);
|
||||
close(cg1);
|
||||
close(cg2);
|
||||
close(cg3);
|
||||
close(cg4);
|
||||
close(cg5);
|
||||
cleanup_cgroup_environment();
|
||||
if (!rc)
|
||||
printf("### multi:PASS\n");
|
||||
else
|
||||
printf("### multi:FAIL\n");
|
||||
return rc;
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
int rc = 0;
|
||||
|
||||
rc = test_foo_bar();
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
return test_multiprog();
|
||||
}
|
||||
|
|
|
@ -92,6 +92,7 @@ enum bpf_cmd {
|
|||
BPF_PROG_GET_FD_BY_ID,
|
||||
BPF_MAP_GET_FD_BY_ID,
|
||||
BPF_OBJ_GET_INFO_BY_FD,
|
||||
BPF_PROG_QUERY,
|
||||
};
|
||||
|
||||
enum bpf_map_type {
|
||||
|
@ -143,11 +144,47 @@ enum bpf_attach_type {
|
|||
|
||||
#define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE
|
||||
|
||||
/* If BPF_F_ALLOW_OVERRIDE flag is used in BPF_PROG_ATTACH command
|
||||
* to the given target_fd cgroup the descendent cgroup will be able to
|
||||
* override effective bpf program that was inherited from this cgroup
|
||||
/* cgroup-bpf attach flags used in BPF_PROG_ATTACH command
|
||||
*
|
||||
* NONE(default): No further bpf programs allowed in the subtree.
|
||||
*
|
||||
* BPF_F_ALLOW_OVERRIDE: If a sub-cgroup installs some bpf program,
|
||||
* the program in this cgroup yields to sub-cgroup program.
|
||||
*
|
||||
* BPF_F_ALLOW_MULTI: If a sub-cgroup installs some bpf program,
|
||||
* that cgroup program gets run in addition to the program in this cgroup.
|
||||
*
|
||||
* Only one program is allowed to be attached to a cgroup with
|
||||
* NONE or BPF_F_ALLOW_OVERRIDE flag.
|
||||
* Attaching another program on top of NONE or BPF_F_ALLOW_OVERRIDE will
|
||||
* release old program and attach the new one. Attach flags has to match.
|
||||
*
|
||||
* Multiple programs are allowed to be attached to a cgroup with
|
||||
* BPF_F_ALLOW_MULTI flag. They are executed in FIFO order
|
||||
* (those that were attached first, run first)
|
||||
* The programs of sub-cgroup are executed first, then programs of
|
||||
* this cgroup and then programs of parent cgroup.
|
||||
* When children program makes decision (like picking TCP CA or sock bind)
|
||||
* parent program has a chance to override it.
|
||||
*
|
||||
* A cgroup with MULTI or OVERRIDE flag allows any attach flags in sub-cgroups.
|
||||
* A cgroup with NONE doesn't allow any programs in sub-cgroups.
|
||||
* Ex1:
|
||||
* cgrp1 (MULTI progs A, B) ->
|
||||
* cgrp2 (OVERRIDE prog C) ->
|
||||
* cgrp3 (MULTI prog D) ->
|
||||
* cgrp4 (OVERRIDE prog E) ->
|
||||
* cgrp5 (NONE prog F)
|
||||
* the event in cgrp5 triggers execution of F,D,A,B in that order.
|
||||
* if prog F is detached, the execution is E,D,A,B
|
||||
* if prog F and D are detached, the execution is E,A,B
|
||||
* if prog F, E and D are detached, the execution is C,A,B
|
||||
*
|
||||
* All eligible programs are executed regardless of return code from
|
||||
* earlier programs.
|
||||
*/
|
||||
#define BPF_F_ALLOW_OVERRIDE (1U << 0)
|
||||
#define BPF_F_ALLOW_MULTI (1U << 1)
|
||||
|
||||
/* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the
|
||||
* verifier will perform strict alignment checking as if the kernel
|
||||
|
@ -175,6 +212,9 @@ enum bpf_attach_type {
|
|||
/* Specify numa node during map creation */
|
||||
#define BPF_F_NUMA_NODE (1U << 2)
|
||||
|
||||
/* flags for BPF_PROG_QUERY */
|
||||
#define BPF_F_QUERY_EFFECTIVE (1U << 0)
|
||||
|
||||
#define BPF_OBJ_NAME_LEN 16U
|
||||
|
||||
union bpf_attr {
|
||||
|
@ -253,6 +293,15 @@ union bpf_attr {
|
|||
__u32 info_len;
|
||||
__aligned_u64 info;
|
||||
} info;
|
||||
|
||||
struct { /* anonymous struct used by BPF_PROG_QUERY command */
|
||||
__u32 target_fd; /* container object to query */
|
||||
__u32 attach_type;
|
||||
__u32 query_flags;
|
||||
__u32 attach_flags;
|
||||
__aligned_u64 prog_ids;
|
||||
__u32 prog_cnt;
|
||||
} query;
|
||||
} __attribute__((aligned(8)));
|
||||
|
||||
/* BPF helper function descriptions:
|
||||
|
|
|
@ -291,6 +291,38 @@ int bpf_prog_detach(int target_fd, enum bpf_attach_type type)
|
|||
return sys_bpf(BPF_PROG_DETACH, &attr, sizeof(attr));
|
||||
}
|
||||
|
||||
int bpf_prog_detach2(int prog_fd, int target_fd, enum bpf_attach_type type)
|
||||
{
|
||||
union bpf_attr attr;
|
||||
|
||||
bzero(&attr, sizeof(attr));
|
||||
attr.target_fd = target_fd;
|
||||
attr.attach_bpf_fd = prog_fd;
|
||||
attr.attach_type = type;
|
||||
|
||||
return sys_bpf(BPF_PROG_DETACH, &attr, sizeof(attr));
|
||||
}
|
||||
|
||||
int bpf_prog_query(int target_fd, enum bpf_attach_type type, __u32 query_flags,
|
||||
__u32 *attach_flags, __u32 *prog_ids, __u32 *prog_cnt)
|
||||
{
|
||||
union bpf_attr attr;
|
||||
int ret;
|
||||
|
||||
bzero(&attr, sizeof(attr));
|
||||
attr.query.target_fd = target_fd;
|
||||
attr.query.attach_type = type;
|
||||
attr.query.query_flags = query_flags;
|
||||
attr.query.prog_cnt = *prog_cnt;
|
||||
attr.query.prog_ids = ptr_to_u64(prog_ids);
|
||||
|
||||
ret = sys_bpf(BPF_PROG_QUERY, &attr, sizeof(attr));
|
||||
if (attach_flags)
|
||||
*attach_flags = attr.query.attach_flags;
|
||||
*prog_cnt = attr.query.prog_cnt;
|
||||
return ret;
|
||||
}
|
||||
|
||||
int bpf_prog_test_run(int prog_fd, int repeat, void *data, __u32 size,
|
||||
void *data_out, __u32 *size_out, __u32 *retval,
|
||||
__u32 *duration)
|
||||
|
|
|
@ -66,6 +66,7 @@ int bpf_obj_get(const char *pathname);
|
|||
int bpf_prog_attach(int prog_fd, int attachable_fd, enum bpf_attach_type type,
|
||||
unsigned int flags);
|
||||
int bpf_prog_detach(int attachable_fd, enum bpf_attach_type type);
|
||||
int bpf_prog_detach2(int prog_fd, int attachable_fd, enum bpf_attach_type type);
|
||||
int bpf_prog_test_run(int prog_fd, int repeat, void *data, __u32 size,
|
||||
void *data_out, __u32 *size_out, __u32 *retval,
|
||||
__u32 *duration);
|
||||
|
@ -74,5 +75,6 @@ int bpf_map_get_next_id(__u32 start_id, __u32 *next_id);
|
|||
int bpf_prog_get_fd_by_id(__u32 id);
|
||||
int bpf_map_get_fd_by_id(__u32 id);
|
||||
int bpf_obj_get_info_by_fd(int prog_fd, void *info, __u32 *info_len);
|
||||
|
||||
int bpf_prog_query(int target_fd, enum bpf_attach_type type, __u32 query_flags,
|
||||
__u32 *attach_flags, __u32 *prog_ids, __u32 *prog_cnt);
|
||||
#endif
|
||||
|
|
|
@ -6892,6 +6892,78 @@ static struct bpf_test tests[] = {
|
|||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_XDP,
|
||||
},
|
||||
{
|
||||
"bpf_exit with invalid return code. test1",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "R0 has value (0x0; 0xffffffff)",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
|
||||
},
|
||||
{
|
||||
"bpf_exit with invalid return code. test2",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
|
||||
BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
|
||||
},
|
||||
{
|
||||
"bpf_exit with invalid return code. test3",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
|
||||
BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 3),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "R0 has value (0x0; 0x3)",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
|
||||
},
|
||||
{
|
||||
"bpf_exit with invalid return code. test4",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
|
||||
},
|
||||
{
|
||||
"bpf_exit with invalid return code. test5",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, 2),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "R0 has value (0x2; 0x0)",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
|
||||
},
|
||||
{
|
||||
"bpf_exit with invalid return code. test6",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "R0 is not a known value (ctx)",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
|
||||
},
|
||||
{
|
||||
"bpf_exit with invalid return code. test7",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 4),
|
||||
BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_2),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "R0 has unknown scalar value",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
|
||||
},
|
||||
};
|
||||
|
||||
static int probe_filter_length(const struct bpf_insn *fp)
|
||||
|
|
Loading…
Reference in New Issue