Daniel Borkmann says: ==================== pull-request: bpf-next 2022-07-09 We've added 94 non-merge commits during the last 19 day(s) which contain a total of 125 files changed, 5141 insertions(+), 6701 deletions(-). The main changes are: 1) Add new way for performing BTF type queries to BPF, from Daniel Müller. 2) Add inlining of calls to bpf_loop() helper when its function callback is statically known, from Eduard Zingerman. 3) Implement BPF TCP CC framework usability improvements, from Jörn-Thorben Hinz. 4) Add LSM flavor for attaching per-cgroup BPF programs to existing LSM hooks, from Stanislav Fomichev. 5) Remove all deprecated libbpf APIs in prep for 1.0 release, from Andrii Nakryiko. 6) Add benchmarks around local_storage to BPF selftests, from Dave Marchevsky. 7) AF_XDP sample removal (given move to libxdp) and various improvements around AF_XDP selftests, from Magnus Karlsson & Maciej Fijalkowski. 8) Add bpftool improvements for memcg probing and bash completion, from Quentin Monnet. 9) Add arm64 JIT support for BPF-2-BPF coupled with tail calls, from Jakub Sitnicki. 10) Sockmap optimizations around throughput of UDP transmissions which have been improved by 61%, from Cong Wang. 11) Rework perf's BPF prologue code to remove deprecated functions, from Jiri Olsa. 12) Fix sockmap teardown path to avoid sleepable sk_psock_stop, from John Fastabend. 13) Fix libbpf's cleanup around legacy kprobe/uprobe on error case, from Chuang Wang. 14) Fix libbpf's bpf_helpers.h to work with gcc for the case of its sec/pragma macro, from James Hilliard. 15) Fix libbpf's pt_regs macros for riscv to use a0 for RC register, from Yixun Lan. 16) Fix bpftool to show the name of type BPF_OBJ_LINK, from Yafang Shao. * https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next: (94 commits) selftests/bpf: Fix xdp_synproxy build failure if CONFIG_NF_CONNTRACK=m/n bpf: Correctly propagate errors up from bpf_core_composites_match libbpf: Disable SEC pragma macro on GCC bpf: Check attach_func_proto more carefully in check_return_code selftests/bpf: Add test involving restrict type qualifier bpftool: Add support for KIND_RESTRICT to gen min_core_btf command MAINTAINERS: Add entry for AF_XDP selftests files selftests, xsk: Rename AF_XDP testing app bpf, docs: Remove deprecated xsk libbpf APIs description selftests/bpf: Add benchmark for local_storage RCU Tasks Trace usage libbpf, riscv: Use a0 for RC register libbpf: Remove unnecessary usdt_rel_ip assignments selftests/bpf: Fix few more compiler warnings selftests/bpf: Fix bogus uninitialized variable warning bpftool: Remove zlib feature test from Makefile libbpf: Cleanup the legacy uprobe_event on failed add/attach_event() libbpf: Fix wrong variable used in perf_event_uprobe_open_legacy() libbpf: Cleanup the legacy kprobe_event on failed add/attach_event() selftests/bpf: Add type match test against kernel's task_struct selftests/bpf: Add nested type to type based tests ... ==================== Link: https://lore.kernel.org/r/20220708233145.32365-1-daniel@iogearbox.net Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
0076cad301
|
@ -351,7 +351,7 @@ These instructions have seven implicit operands:
|
|||
* Register R0 is an implicit output which contains the data fetched from
|
||||
the packet.
|
||||
* Registers R1-R5 are scratch registers that are clobbered after a call to
|
||||
``BPF_ABS | BPF_LD`` or ``BPF_IND`` | BPF_LD instructions.
|
||||
``BPF_ABS | BPF_LD`` or ``BPF_IND | BPF_LD`` instructions.
|
||||
|
||||
These instructions have an implicit program exit condition as well. When an
|
||||
eBPF program is trying to access the data beyond the packet boundary, the
|
||||
|
|
|
@ -9,8 +9,8 @@ described here. It's recommended to follow these conventions whenever a
|
|||
new function or type is added to keep libbpf API clean and consistent.
|
||||
|
||||
All types and functions provided by libbpf API should have one of the
|
||||
following prefixes: ``bpf_``, ``btf_``, ``libbpf_``, ``xsk_``,
|
||||
``btf_dump_``, ``ring_buffer_``, ``perf_buffer_``.
|
||||
following prefixes: ``bpf_``, ``btf_``, ``libbpf_``, ``btf_dump_``,
|
||||
``ring_buffer_``, ``perf_buffer_``.
|
||||
|
||||
System call wrappers
|
||||
--------------------
|
||||
|
@ -59,15 +59,6 @@ Auxiliary functions and types that don't fit well in any of categories
|
|||
described above should have ``libbpf_`` prefix, e.g.
|
||||
``libbpf_get_error`` or ``libbpf_prog_type_by_name``.
|
||||
|
||||
AF_XDP functions
|
||||
-------------------
|
||||
|
||||
AF_XDP functions should have an ``xsk_`` prefix, e.g.
|
||||
``xsk_umem__get_data`` or ``xsk_umem__create``. The interface consists
|
||||
of both low-level ring access functions and high-level configuration
|
||||
functions. These can be mixed and matched. Note that these functions
|
||||
are not reentrant for performance reasons.
|
||||
|
||||
ABI
|
||||
---
|
||||
|
||||
|
|
|
@ -21917,8 +21917,7 @@ F: include/uapi/linux/if_xdp.h
|
|||
F: include/uapi/linux/xdp_diag.h
|
||||
F: include/net/netns/xdp.h
|
||||
F: net/xdp/
|
||||
F: samples/bpf/xdpsock*
|
||||
F: tools/lib/bpf/xsk*
|
||||
F: tools/testing/selftests/bpf/*xsk*
|
||||
|
||||
XEN BLOCK SUBSYSTEM
|
||||
M: Roger Pau Monné <roger.pau@citrix.com>
|
||||
|
|
|
@ -246,6 +246,7 @@ static bool is_lsi_offset(int offset, int scale)
|
|||
static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf)
|
||||
{
|
||||
const struct bpf_prog *prog = ctx->prog;
|
||||
const bool is_main_prog = prog->aux->func_idx == 0;
|
||||
const u8 r6 = bpf2a64[BPF_REG_6];
|
||||
const u8 r7 = bpf2a64[BPF_REG_7];
|
||||
const u8 r8 = bpf2a64[BPF_REG_8];
|
||||
|
@ -299,7 +300,7 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf)
|
|||
/* Set up BPF prog stack base register */
|
||||
emit(A64_MOV(1, fp, A64_SP), ctx);
|
||||
|
||||
if (!ebpf_from_cbpf) {
|
||||
if (!ebpf_from_cbpf && is_main_prog) {
|
||||
/* Initialize tail_call_cnt */
|
||||
emit(A64_MOVZ(1, tcc, 0, 0), ctx);
|
||||
|
||||
|
@ -1530,3 +1531,9 @@ void bpf_jit_free_exec(void *addr)
|
|||
{
|
||||
return vfree(addr);
|
||||
}
|
||||
|
||||
/* Indicate the JIT backend supports mixing bpf2bpf and tailcalls. */
|
||||
bool bpf_jit_supports_subprog_tailcalls(void)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -1771,6 +1771,10 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
|
|||
struct bpf_tramp_link *l, int stack_size,
|
||||
int run_ctx_off, bool save_ret)
|
||||
{
|
||||
void (*exit)(struct bpf_prog *prog, u64 start,
|
||||
struct bpf_tramp_run_ctx *run_ctx) = __bpf_prog_exit;
|
||||
u64 (*enter)(struct bpf_prog *prog,
|
||||
struct bpf_tramp_run_ctx *run_ctx) = __bpf_prog_enter;
|
||||
u8 *prog = *pprog;
|
||||
u8 *jmp_insn;
|
||||
int ctx_cookie_off = offsetof(struct bpf_tramp_run_ctx, bpf_cookie);
|
||||
|
@ -1789,15 +1793,21 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
|
|||
*/
|
||||
emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_1, -run_ctx_off + ctx_cookie_off);
|
||||
|
||||
if (p->aux->sleepable) {
|
||||
enter = __bpf_prog_enter_sleepable;
|
||||
exit = __bpf_prog_exit_sleepable;
|
||||
} else if (p->expected_attach_type == BPF_LSM_CGROUP) {
|
||||
enter = __bpf_prog_enter_lsm_cgroup;
|
||||
exit = __bpf_prog_exit_lsm_cgroup;
|
||||
}
|
||||
|
||||
/* arg1: mov rdi, progs[i] */
|
||||
emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
|
||||
/* arg2: lea rsi, [rbp - ctx_cookie_off] */
|
||||
EMIT4(0x48, 0x8D, 0x75, -run_ctx_off);
|
||||
|
||||
if (emit_call(&prog,
|
||||
p->aux->sleepable ? __bpf_prog_enter_sleepable :
|
||||
__bpf_prog_enter, prog))
|
||||
return -EINVAL;
|
||||
if (emit_call(&prog, enter, prog))
|
||||
return -EINVAL;
|
||||
/* remember prog start time returned by __bpf_prog_enter */
|
||||
emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0);
|
||||
|
||||
|
@ -1841,10 +1851,8 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
|
|||
emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6);
|
||||
/* arg3: lea rdx, [rbp - run_ctx_off] */
|
||||
EMIT4(0x48, 0x8D, 0x55, -run_ctx_off);
|
||||
if (emit_call(&prog,
|
||||
p->aux->sleepable ? __bpf_prog_exit_sleepable :
|
||||
__bpf_prog_exit, prog))
|
||||
return -EINVAL;
|
||||
if (emit_call(&prog, exit, prog))
|
||||
return -EINVAL;
|
||||
|
||||
*pprog = prog;
|
||||
return 0;
|
||||
|
@ -2492,3 +2500,9 @@ void *bpf_arch_text_copy(void *dst, void *src, size_t len)
|
|||
return ERR_PTR(-EINVAL);
|
||||
return dst;
|
||||
}
|
||||
|
||||
/* Indicate the JIT backend supports mixing bpf2bpf and tailcalls. */
|
||||
bool bpf_jit_supports_subprog_tailcalls(void)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -10,6 +10,13 @@
|
|||
|
||||
struct bpf_prog_array;
|
||||
|
||||
#ifdef CONFIG_BPF_LSM
|
||||
/* Maximum number of concurrently attachable per-cgroup LSM hooks. */
|
||||
#define CGROUP_LSM_NUM 10
|
||||
#else
|
||||
#define CGROUP_LSM_NUM 0
|
||||
#endif
|
||||
|
||||
enum cgroup_bpf_attach_type {
|
||||
CGROUP_BPF_ATTACH_TYPE_INVALID = -1,
|
||||
CGROUP_INET_INGRESS = 0,
|
||||
|
@ -35,6 +42,8 @@ enum cgroup_bpf_attach_type {
|
|||
CGROUP_INET4_GETSOCKNAME,
|
||||
CGROUP_INET6_GETSOCKNAME,
|
||||
CGROUP_INET_SOCK_RELEASE,
|
||||
CGROUP_LSM_START,
|
||||
CGROUP_LSM_END = CGROUP_LSM_START + CGROUP_LSM_NUM - 1,
|
||||
MAX_CGROUP_BPF_ATTACH_TYPE
|
||||
};
|
||||
|
||||
|
@ -47,8 +56,8 @@ struct cgroup_bpf {
|
|||
* have either zero or one element
|
||||
* when BPF_F_ALLOW_MULTI the list can have up to BPF_CGROUP_MAX_PROGS
|
||||
*/
|
||||
struct list_head progs[MAX_CGROUP_BPF_ATTACH_TYPE];
|
||||
u32 flags[MAX_CGROUP_BPF_ATTACH_TYPE];
|
||||
struct hlist_head progs[MAX_CGROUP_BPF_ATTACH_TYPE];
|
||||
u8 flags[MAX_CGROUP_BPF_ATTACH_TYPE];
|
||||
|
||||
/* list of cgroup shared storages */
|
||||
struct list_head storages;
|
||||
|
|
|
@ -23,6 +23,13 @@ struct ctl_table;
|
|||
struct ctl_table_header;
|
||||
struct task_struct;
|
||||
|
||||
unsigned int __cgroup_bpf_run_lsm_sock(const void *ctx,
|
||||
const struct bpf_insn *insn);
|
||||
unsigned int __cgroup_bpf_run_lsm_socket(const void *ctx,
|
||||
const struct bpf_insn *insn);
|
||||
unsigned int __cgroup_bpf_run_lsm_current(const void *ctx,
|
||||
const struct bpf_insn *insn);
|
||||
|
||||
#ifdef CONFIG_CGROUP_BPF
|
||||
|
||||
#define CGROUP_ATYPE(type) \
|
||||
|
@ -95,7 +102,7 @@ struct bpf_cgroup_link {
|
|||
};
|
||||
|
||||
struct bpf_prog_list {
|
||||
struct list_head node;
|
||||
struct hlist_node node;
|
||||
struct bpf_prog *prog;
|
||||
struct bpf_cgroup_link *link;
|
||||
struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE];
|
||||
|
|
|
@ -56,6 +56,8 @@ typedef u64 (*bpf_callback_t)(u64, u64, u64, u64, u64);
|
|||
typedef int (*bpf_iter_init_seq_priv_t)(void *private_data,
|
||||
struct bpf_iter_aux_info *aux);
|
||||
typedef void (*bpf_iter_fini_seq_priv_t)(void *private_data);
|
||||
typedef unsigned int (*bpf_func_t)(const void *,
|
||||
const struct bpf_insn *);
|
||||
struct bpf_iter_seq_info {
|
||||
const struct seq_operations *seq_ops;
|
||||
bpf_iter_init_seq_priv_t init_seq_private;
|
||||
|
@ -792,6 +794,10 @@ void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start, struct bpf_tramp_
|
|||
u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog, struct bpf_tramp_run_ctx *run_ctx);
|
||||
void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start,
|
||||
struct bpf_tramp_run_ctx *run_ctx);
|
||||
u64 notrace __bpf_prog_enter_lsm_cgroup(struct bpf_prog *prog,
|
||||
struct bpf_tramp_run_ctx *run_ctx);
|
||||
void notrace __bpf_prog_exit_lsm_cgroup(struct bpf_prog *prog, u64 start,
|
||||
struct bpf_tramp_run_ctx *run_ctx);
|
||||
void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr);
|
||||
void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr);
|
||||
|
||||
|
@ -879,8 +885,7 @@ struct bpf_dispatcher {
|
|||
static __always_inline __nocfi unsigned int bpf_dispatcher_nop_func(
|
||||
const void *ctx,
|
||||
const struct bpf_insn *insnsi,
|
||||
unsigned int (*bpf_func)(const void *,
|
||||
const struct bpf_insn *))
|
||||
bpf_func_t bpf_func)
|
||||
{
|
||||
return bpf_func(ctx, insnsi);
|
||||
}
|
||||
|
@ -909,8 +914,7 @@ int arch_prepare_bpf_dispatcher(void *image, s64 *funcs, int num_funcs);
|
|||
noinline __nocfi unsigned int bpf_dispatcher_##name##_func( \
|
||||
const void *ctx, \
|
||||
const struct bpf_insn *insnsi, \
|
||||
unsigned int (*bpf_func)(const void *, \
|
||||
const struct bpf_insn *)) \
|
||||
bpf_func_t bpf_func) \
|
||||
{ \
|
||||
return bpf_func(ctx, insnsi); \
|
||||
} \
|
||||
|
@ -921,8 +925,7 @@ int arch_prepare_bpf_dispatcher(void *image, s64 *funcs, int num_funcs);
|
|||
unsigned int bpf_dispatcher_##name##_func( \
|
||||
const void *ctx, \
|
||||
const struct bpf_insn *insnsi, \
|
||||
unsigned int (*bpf_func)(const void *, \
|
||||
const struct bpf_insn *)); \
|
||||
bpf_func_t bpf_func); \
|
||||
extern struct bpf_dispatcher bpf_dispatcher_##name;
|
||||
#define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_##name##_func
|
||||
#define BPF_DISPATCHER_PTR(name) (&bpf_dispatcher_##name)
|
||||
|
@ -1061,6 +1064,7 @@ struct bpf_prog_aux {
|
|||
struct user_struct *user;
|
||||
u64 load_time; /* ns since boottime */
|
||||
u32 verified_insns;
|
||||
int cgroup_atype; /* enum cgroup_bpf_attach_type */
|
||||
struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
|
||||
char name[BPF_OBJ_NAME_LEN];
|
||||
#ifdef CONFIG_SECURITY
|
||||
|
@ -1168,6 +1172,11 @@ struct bpf_tramp_link {
|
|||
u64 cookie;
|
||||
};
|
||||
|
||||
struct bpf_shim_tramp_link {
|
||||
struct bpf_tramp_link link;
|
||||
struct bpf_trampoline *trampoline;
|
||||
};
|
||||
|
||||
struct bpf_tracing_link {
|
||||
struct bpf_tramp_link link;
|
||||
enum bpf_attach_type attach_type;
|
||||
|
@ -1246,6 +1255,9 @@ struct bpf_dummy_ops {
|
|||
int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
|
||||
union bpf_attr __user *uattr);
|
||||
#endif
|
||||
int bpf_trampoline_link_cgroup_shim(struct bpf_prog *prog,
|
||||
int cgroup_atype);
|
||||
void bpf_trampoline_unlink_cgroup_shim(struct bpf_prog *prog);
|
||||
#else
|
||||
static inline const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id)
|
||||
{
|
||||
|
@ -1269,6 +1281,14 @@ static inline int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map,
|
|||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
static inline int bpf_trampoline_link_cgroup_shim(struct bpf_prog *prog,
|
||||
int cgroup_atype)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
static inline void bpf_trampoline_unlink_cgroup_shim(struct bpf_prog *prog)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
struct bpf_array {
|
||||
|
@ -1286,6 +1306,9 @@ struct bpf_array {
|
|||
#define BPF_COMPLEXITY_LIMIT_INSNS 1000000 /* yes. 1M insns */
|
||||
#define MAX_TAIL_CALL_CNT 33
|
||||
|
||||
/* Maximum number of loops for bpf_loop */
|
||||
#define BPF_MAX_LOOPS BIT(23)
|
||||
|
||||
#define BPF_F_ACCESS_MASK (BPF_F_RDONLY | \
|
||||
BPF_F_RDONLY_PROG | \
|
||||
BPF_F_WRONLY | \
|
||||
|
@ -2363,9 +2386,13 @@ extern const struct bpf_func_proto bpf_for_each_map_elem_proto;
|
|||
extern const struct bpf_func_proto bpf_btf_find_by_name_kind_proto;
|
||||
extern const struct bpf_func_proto bpf_sk_setsockopt_proto;
|
||||
extern const struct bpf_func_proto bpf_sk_getsockopt_proto;
|
||||
extern const struct bpf_func_proto bpf_unlocked_sk_setsockopt_proto;
|
||||
extern const struct bpf_func_proto bpf_unlocked_sk_getsockopt_proto;
|
||||
extern const struct bpf_func_proto bpf_find_vma_proto;
|
||||
extern const struct bpf_func_proto bpf_loop_proto;
|
||||
extern const struct bpf_func_proto bpf_copy_from_user_task_proto;
|
||||
extern const struct bpf_func_proto bpf_set_retval_proto;
|
||||
extern const struct bpf_func_proto bpf_get_retval_proto;
|
||||
|
||||
const struct bpf_func_proto *tracing_prog_func_proto(
|
||||
enum bpf_func_id func_id, const struct bpf_prog *prog);
|
||||
|
@ -2519,4 +2546,12 @@ void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data,
|
|||
void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr);
|
||||
int bpf_dynptr_check_size(u32 size);
|
||||
|
||||
#ifdef CONFIG_BPF_LSM
|
||||
void bpf_cgroup_atype_get(u32 attach_btf_id, int cgroup_atype);
|
||||
void bpf_cgroup_atype_put(int cgroup_atype);
|
||||
#else
|
||||
static inline void bpf_cgroup_atype_get(u32 attach_btf_id, int cgroup_atype) {}
|
||||
static inline void bpf_cgroup_atype_put(int cgroup_atype) {}
|
||||
#endif /* CONFIG_BPF_LSM */
|
||||
|
||||
#endif /* _LINUX_BPF_H */
|
||||
|
|
|
@ -42,6 +42,8 @@ extern const struct bpf_func_proto bpf_inode_storage_get_proto;
|
|||
extern const struct bpf_func_proto bpf_inode_storage_delete_proto;
|
||||
void bpf_inode_storage_free(struct inode *inode);
|
||||
|
||||
void bpf_lsm_find_cgroup_shim(const struct bpf_prog *prog, bpf_func_t *bpf_func);
|
||||
|
||||
#else /* !CONFIG_BPF_LSM */
|
||||
|
||||
static inline bool bpf_lsm_is_sleepable_hook(u32 btf_id)
|
||||
|
@ -65,6 +67,11 @@ static inline void bpf_inode_storage_free(struct inode *inode)
|
|||
{
|
||||
}
|
||||
|
||||
static inline void bpf_lsm_find_cgroup_shim(const struct bpf_prog *prog,
|
||||
bpf_func_t *bpf_func)
|
||||
{
|
||||
}
|
||||
|
||||
#endif /* CONFIG_BPF_LSM */
|
||||
|
||||
#endif /* _LINUX_BPF_LSM_H */
|
||||
|
|
|
@ -344,6 +344,14 @@ struct bpf_verifier_state_list {
|
|||
int miss_cnt, hit_cnt;
|
||||
};
|
||||
|
||||
struct bpf_loop_inline_state {
|
||||
int initialized:1; /* set to true upon first entry */
|
||||
int fit_for_inline:1; /* true if callback function is the same
|
||||
* at each call and flags are always zero
|
||||
*/
|
||||
u32 callback_subprogno; /* valid when fit_for_inline is true */
|
||||
};
|
||||
|
||||
/* Possible states for alu_state member. */
|
||||
#define BPF_ALU_SANITIZE_SRC (1U << 0)
|
||||
#define BPF_ALU_SANITIZE_DST (1U << 1)
|
||||
|
@ -373,6 +381,10 @@ struct bpf_insn_aux_data {
|
|||
u32 mem_size; /* mem_size for non-struct typed var */
|
||||
};
|
||||
} btf_var;
|
||||
/* if instruction is a call to bpf_loop this field tracks
|
||||
* the state of the relevant registers to make decision about inlining
|
||||
*/
|
||||
struct bpf_loop_inline_state loop_inline_state;
|
||||
};
|
||||
u64 map_key_state; /* constant (32 bit) key tracking for maps */
|
||||
int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
|
||||
|
|
|
@ -179,7 +179,8 @@ extern struct btf_id_set name;
|
|||
BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP, udp_sock) \
|
||||
BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP6, udp6_sock) \
|
||||
BTF_SOCK_TYPE(BTF_SOCK_TYPE_UNIX, unix_sock) \
|
||||
BTF_SOCK_TYPE(BTF_SOCK_TYPE_MPTCP, mptcp_sock)
|
||||
BTF_SOCK_TYPE(BTF_SOCK_TYPE_MPTCP, mptcp_sock) \
|
||||
BTF_SOCK_TYPE(BTF_SOCK_TYPE_SOCKET, socket)
|
||||
|
||||
enum {
|
||||
#define BTF_SOCK_TYPE(name, str) name,
|
||||
|
|
|
@ -914,6 +914,7 @@ u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
|
|||
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog);
|
||||
void bpf_jit_compile(struct bpf_prog *prog);
|
||||
bool bpf_jit_needs_zext(void);
|
||||
bool bpf_jit_supports_subprog_tailcalls(void);
|
||||
bool bpf_jit_supports_kfunc_call(void);
|
||||
bool bpf_helper_changes_pkt_data(void *func);
|
||||
|
||||
|
|
|
@ -152,6 +152,8 @@ struct module;
|
|||
struct sk_buff;
|
||||
typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
|
||||
unsigned int, size_t);
|
||||
typedef int (*skb_read_actor_t)(struct sock *, struct sk_buff *);
|
||||
|
||||
|
||||
struct proto_ops {
|
||||
int family;
|
||||
|
@ -214,6 +216,8 @@ struct proto_ops {
|
|||
*/
|
||||
int (*read_sock)(struct sock *sk, read_descriptor_t *desc,
|
||||
sk_read_actor_t recv_actor);
|
||||
/* This is different from read_sock(), it reads an entire skb at a time. */
|
||||
int (*read_skb)(struct sock *sk, skb_read_actor_t recv_actor);
|
||||
int (*sendpage_locked)(struct sock *sk, struct page *page,
|
||||
int offset, size_t size, int flags);
|
||||
int (*sendmsg_locked)(struct sock *sk, struct msghdr *msg,
|
||||
|
|
|
@ -672,6 +672,7 @@ void tcp_get_info(struct sock *, struct tcp_info *);
|
|||
/* Read 'sendfile()'-style from a TCP socket */
|
||||
int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
|
||||
sk_read_actor_t recv_actor);
|
||||
int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
|
||||
|
||||
void tcp_initialize_rcv_mss(struct sock *sk);
|
||||
|
||||
|
|
|
@ -306,8 +306,7 @@ struct sock *__udp6_lib_lookup(struct net *net,
|
|||
struct sk_buff *skb);
|
||||
struct sock *udp6_lib_lookup_skb(const struct sk_buff *skb,
|
||||
__be16 sport, __be16 dport);
|
||||
int udp_read_sock(struct sock *sk, read_descriptor_t *desc,
|
||||
sk_read_actor_t recv_actor);
|
||||
int udp_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
|
||||
|
||||
/* UDP uses skb->dev_scratch to cache as much information as possible and avoid
|
||||
* possibly multiple cache miss on dequeue()
|
||||
|
|
|
@ -998,6 +998,7 @@ enum bpf_attach_type {
|
|||
BPF_SK_REUSEPORT_SELECT_OR_MIGRATE,
|
||||
BPF_PERF_EVENT,
|
||||
BPF_TRACE_KPROBE_MULTI,
|
||||
BPF_LSM_CGROUP,
|
||||
__MAX_BPF_ATTACH_TYPE
|
||||
};
|
||||
|
||||
|
@ -1431,6 +1432,7 @@ union bpf_attr {
|
|||
__u32 attach_flags;
|
||||
__aligned_u64 prog_ids;
|
||||
__u32 prog_cnt;
|
||||
__aligned_u64 prog_attach_flags; /* output: per-program attach_flags */
|
||||
} query;
|
||||
|
||||
struct { /* anonymous struct used by BPF_RAW_TRACEPOINT_OPEN command */
|
||||
|
@ -6075,6 +6077,8 @@ struct bpf_prog_info {
|
|||
__u64 run_cnt;
|
||||
__u64 recursion_misses;
|
||||
__u32 verified_insns;
|
||||
__u32 attach_btf_obj_id;
|
||||
__u32 attach_btf_id;
|
||||
} __attribute__((aligned(8)));
|
||||
|
||||
struct bpf_map_info {
|
||||
|
@ -6782,6 +6786,7 @@ enum bpf_core_relo_kind {
|
|||
BPF_CORE_TYPE_SIZE = 9, /* type size in bytes */
|
||||
BPF_CORE_ENUMVAL_EXISTS = 10, /* enum value existence in target kernel */
|
||||
BPF_CORE_ENUMVAL_VALUE = 11, /* enum value integer value */
|
||||
BPF_CORE_TYPE_MATCHES = 12, /* type match in target kernel */
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -723,9 +723,6 @@ const struct bpf_func_proto bpf_for_each_map_elem_proto = {
|
|||
.arg4_type = ARG_ANYTHING,
|
||||
};
|
||||
|
||||
/* maximum number of loops */
|
||||
#define MAX_LOOPS BIT(23)
|
||||
|
||||
BPF_CALL_4(bpf_loop, u32, nr_loops, void *, callback_fn, void *, callback_ctx,
|
||||
u64, flags)
|
||||
{
|
||||
|
@ -733,9 +730,13 @@ BPF_CALL_4(bpf_loop, u32, nr_loops, void *, callback_fn, void *, callback_ctx,
|
|||
u64 ret;
|
||||
u32 i;
|
||||
|
||||
/* Note: these safety checks are also verified when bpf_loop
|
||||
* is inlined, be careful to modify this code in sync. See
|
||||
* function verifier.c:inline_bpf_loop.
|
||||
*/
|
||||
if (flags)
|
||||
return -EINVAL;
|
||||
if (nr_loops > MAX_LOOPS)
|
||||
if (nr_loops > BPF_MAX_LOOPS)
|
||||
return -E2BIG;
|
||||
|
||||
for (i = 0; i < nr_loops; i++) {
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
#include <linux/bpf_local_storage.h>
|
||||
#include <linux/btf_ids.h>
|
||||
#include <linux/ima.h>
|
||||
#include <linux/bpf-cgroup.h>
|
||||
|
||||
/* For every LSM hook that allows attachment of BPF programs, declare a nop
|
||||
* function where a BPF program can be attached.
|
||||
|
@ -35,6 +36,57 @@ BTF_SET_START(bpf_lsm_hooks)
|
|||
#undef LSM_HOOK
|
||||
BTF_SET_END(bpf_lsm_hooks)
|
||||
|
||||
/* List of LSM hooks that should operate on 'current' cgroup regardless
|
||||
* of function signature.
|
||||
*/
|
||||
BTF_SET_START(bpf_lsm_current_hooks)
|
||||
/* operate on freshly allocated sk without any cgroup association */
|
||||
BTF_ID(func, bpf_lsm_sk_alloc_security)
|
||||
BTF_ID(func, bpf_lsm_sk_free_security)
|
||||
BTF_SET_END(bpf_lsm_current_hooks)
|
||||
|
||||
/* List of LSM hooks that trigger while the socket is properly locked.
|
||||
*/
|
||||
BTF_SET_START(bpf_lsm_locked_sockopt_hooks)
|
||||
BTF_ID(func, bpf_lsm_socket_sock_rcv_skb)
|
||||
BTF_ID(func, bpf_lsm_sock_graft)
|
||||
BTF_ID(func, bpf_lsm_inet_csk_clone)
|
||||
BTF_ID(func, bpf_lsm_inet_conn_established)
|
||||
BTF_SET_END(bpf_lsm_locked_sockopt_hooks)
|
||||
|
||||
/* List of LSM hooks that trigger while the socket is _not_ locked,
|
||||
* but it's ok to call bpf_{g,s}etsockopt because the socket is still
|
||||
* in the early init phase.
|
||||
*/
|
||||
BTF_SET_START(bpf_lsm_unlocked_sockopt_hooks)
|
||||
BTF_ID(func, bpf_lsm_socket_post_create)
|
||||
BTF_ID(func, bpf_lsm_socket_socketpair)
|
||||
BTF_SET_END(bpf_lsm_unlocked_sockopt_hooks)
|
||||
|
||||
void bpf_lsm_find_cgroup_shim(const struct bpf_prog *prog,
|
||||
bpf_func_t *bpf_func)
|
||||
{
|
||||
const struct btf_param *args;
|
||||
|
||||
if (btf_type_vlen(prog->aux->attach_func_proto) < 1 ||
|
||||
btf_id_set_contains(&bpf_lsm_current_hooks,
|
||||
prog->aux->attach_btf_id)) {
|
||||
*bpf_func = __cgroup_bpf_run_lsm_current;
|
||||
return;
|
||||
}
|
||||
|
||||
args = btf_params(prog->aux->attach_func_proto);
|
||||
|
||||
#ifdef CONFIG_NET
|
||||
if (args[0].type == btf_sock_ids[BTF_SOCK_TYPE_SOCKET])
|
||||
*bpf_func = __cgroup_bpf_run_lsm_socket;
|
||||
else if (args[0].type == btf_sock_ids[BTF_SOCK_TYPE_SOCK])
|
||||
*bpf_func = __cgroup_bpf_run_lsm_sock;
|
||||
else
|
||||
#endif
|
||||
*bpf_func = __cgroup_bpf_run_lsm_current;
|
||||
}
|
||||
|
||||
int bpf_lsm_verify_prog(struct bpf_verifier_log *vlog,
|
||||
const struct bpf_prog *prog)
|
||||
{
|
||||
|
@ -158,6 +210,35 @@ bpf_lsm_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
|||
return prog->aux->sleepable ? &bpf_ima_file_hash_proto : NULL;
|
||||
case BPF_FUNC_get_attach_cookie:
|
||||
return bpf_prog_has_trampoline(prog) ? &bpf_get_attach_cookie_proto : NULL;
|
||||
case BPF_FUNC_get_local_storage:
|
||||
return prog->expected_attach_type == BPF_LSM_CGROUP ?
|
||||
&bpf_get_local_storage_proto : NULL;
|
||||
case BPF_FUNC_set_retval:
|
||||
return prog->expected_attach_type == BPF_LSM_CGROUP ?
|
||||
&bpf_set_retval_proto : NULL;
|
||||
case BPF_FUNC_get_retval:
|
||||
return prog->expected_attach_type == BPF_LSM_CGROUP ?
|
||||
&bpf_get_retval_proto : NULL;
|
||||
case BPF_FUNC_setsockopt:
|
||||
if (prog->expected_attach_type != BPF_LSM_CGROUP)
|
||||
return NULL;
|
||||
if (btf_id_set_contains(&bpf_lsm_locked_sockopt_hooks,
|
||||
prog->aux->attach_btf_id))
|
||||
return &bpf_sk_setsockopt_proto;
|
||||
if (btf_id_set_contains(&bpf_lsm_unlocked_sockopt_hooks,
|
||||
prog->aux->attach_btf_id))
|
||||
return &bpf_unlocked_sk_setsockopt_proto;
|
||||
return NULL;
|
||||
case BPF_FUNC_getsockopt:
|
||||
if (prog->expected_attach_type != BPF_LSM_CGROUP)
|
||||
return NULL;
|
||||
if (btf_id_set_contains(&bpf_lsm_locked_sockopt_hooks,
|
||||
prog->aux->attach_btf_id))
|
||||
return &bpf_sk_getsockopt_proto;
|
||||
if (btf_id_set_contains(&bpf_lsm_unlocked_sockopt_hooks,
|
||||
prog->aux->attach_btf_id))
|
||||
return &bpf_unlocked_sk_getsockopt_proto;
|
||||
return NULL;
|
||||
default:
|
||||
return tracing_prog_func_proto(func_id, prog);
|
||||
}
|
||||
|
|
|
@ -503,10 +503,9 @@ static int bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
|
|||
goto unlock;
|
||||
}
|
||||
|
||||
/* Error during st_ops->reg(). It is very unlikely since
|
||||
* the above init_member() should have caught it earlier
|
||||
* before reg(). The only possibility is if there was a race
|
||||
* in registering the struct_ops (under the same name) to
|
||||
/* Error during st_ops->reg(). Can happen if this struct_ops needs to be
|
||||
* verified as a whole, after all init_member() calls. Can also happen if
|
||||
* there was a race in registering the struct_ops (under the same name) to
|
||||
* a sub-system through different struct_ops's maps.
|
||||
*/
|
||||
set_memory_nx((long)st_map->image, 1);
|
||||
|
|
|
@ -5368,6 +5368,7 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type,
|
|||
|
||||
if (arg == nr_args) {
|
||||
switch (prog->expected_attach_type) {
|
||||
case BPF_LSM_CGROUP:
|
||||
case BPF_LSM_MAC:
|
||||
case BPF_TRACE_FEXIT:
|
||||
/* When LSM programs are attached to void LSM hooks
|
||||
|
@ -7421,87 +7422,6 @@ EXPORT_SYMBOL_GPL(register_btf_id_dtor_kfuncs);
|
|||
|
||||
#define MAX_TYPES_ARE_COMPAT_DEPTH 2
|
||||
|
||||
static
|
||||
int __bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id,
|
||||
const struct btf *targ_btf, __u32 targ_id,
|
||||
int level)
|
||||
{
|
||||
const struct btf_type *local_type, *targ_type;
|
||||
int depth = 32; /* max recursion depth */
|
||||
|
||||
/* caller made sure that names match (ignoring flavor suffix) */
|
||||
local_type = btf_type_by_id(local_btf, local_id);
|
||||
targ_type = btf_type_by_id(targ_btf, targ_id);
|
||||
if (btf_kind(local_type) != btf_kind(targ_type))
|
||||
return 0;
|
||||
|
||||
recur:
|
||||
depth--;
|
||||
if (depth < 0)
|
||||
return -EINVAL;
|
||||
|
||||
local_type = btf_type_skip_modifiers(local_btf, local_id, &local_id);
|
||||
targ_type = btf_type_skip_modifiers(targ_btf, targ_id, &targ_id);
|
||||
if (!local_type || !targ_type)
|
||||
return -EINVAL;
|
||||
|
||||
if (btf_kind(local_type) != btf_kind(targ_type))
|
||||
return 0;
|
||||
|
||||
switch (btf_kind(local_type)) {
|
||||
case BTF_KIND_UNKN:
|
||||
case BTF_KIND_STRUCT:
|
||||
case BTF_KIND_UNION:
|
||||
case BTF_KIND_ENUM:
|
||||
case BTF_KIND_FWD:
|
||||
case BTF_KIND_ENUM64:
|
||||
return 1;
|
||||
case BTF_KIND_INT:
|
||||
/* just reject deprecated bitfield-like integers; all other
|
||||
* integers are by default compatible between each other
|
||||
*/
|
||||
return btf_int_offset(local_type) == 0 && btf_int_offset(targ_type) == 0;
|
||||
case BTF_KIND_PTR:
|
||||
local_id = local_type->type;
|
||||
targ_id = targ_type->type;
|
||||
goto recur;
|
||||
case BTF_KIND_ARRAY:
|
||||
local_id = btf_array(local_type)->type;
|
||||
targ_id = btf_array(targ_type)->type;
|
||||
goto recur;
|
||||
case BTF_KIND_FUNC_PROTO: {
|
||||
struct btf_param *local_p = btf_params(local_type);
|
||||
struct btf_param *targ_p = btf_params(targ_type);
|
||||
__u16 local_vlen = btf_vlen(local_type);
|
||||
__u16 targ_vlen = btf_vlen(targ_type);
|
||||
int i, err;
|
||||
|
||||
if (local_vlen != targ_vlen)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < local_vlen; i++, local_p++, targ_p++) {
|
||||
if (level <= 0)
|
||||
return -EINVAL;
|
||||
|
||||
btf_type_skip_modifiers(local_btf, local_p->type, &local_id);
|
||||
btf_type_skip_modifiers(targ_btf, targ_p->type, &targ_id);
|
||||
err = __bpf_core_types_are_compat(local_btf, local_id,
|
||||
targ_btf, targ_id,
|
||||
level - 1);
|
||||
if (err <= 0)
|
||||
return err;
|
||||
}
|
||||
|
||||
/* tail recurse for return type check */
|
||||
btf_type_skip_modifiers(local_btf, local_type->type, &local_id);
|
||||
btf_type_skip_modifiers(targ_btf, targ_type->type, &targ_id);
|
||||
goto recur;
|
||||
}
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* Check local and target types for compatibility. This check is used for
|
||||
* type-based CO-RE relocations and follow slightly different rules than
|
||||
* field-based relocations. This function assumes that root types were already
|
||||
|
@ -7524,11 +7444,19 @@ recur:
|
|||
int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id,
|
||||
const struct btf *targ_btf, __u32 targ_id)
|
||||
{
|
||||
return __bpf_core_types_are_compat(local_btf, local_id,
|
||||
targ_btf, targ_id,
|
||||
return __bpf_core_types_are_compat(local_btf, local_id, targ_btf, targ_id,
|
||||
MAX_TYPES_ARE_COMPAT_DEPTH);
|
||||
}
|
||||
|
||||
#define MAX_TYPES_MATCH_DEPTH 2
|
||||
|
||||
int bpf_core_types_match(const struct btf *local_btf, u32 local_id,
|
||||
const struct btf *targ_btf, u32 targ_id)
|
||||
{
|
||||
return __bpf_core_types_match(local_btf, local_id, targ_btf, targ_id, false,
|
||||
MAX_TYPES_MATCH_DEPTH);
|
||||
}
|
||||
|
||||
static bool bpf_core_is_flavor_sep(const char *s)
|
||||
{
|
||||
/* check X___Y name pattern, where X and Y are not underscores */
|
||||
|
|
|
@ -14,6 +14,8 @@
|
|||
#include <linux/string.h>
|
||||
#include <linux/bpf.h>
|
||||
#include <linux/bpf-cgroup.h>
|
||||
#include <linux/bpf_lsm.h>
|
||||
#include <linux/bpf_verifier.h>
|
||||
#include <net/sock.h>
|
||||
#include <net/bpf_sk_storage.h>
|
||||
|
||||
|
@ -61,6 +63,132 @@ bpf_prog_run_array_cg(const struct cgroup_bpf *cgrp,
|
|||
return run_ctx.retval;
|
||||
}
|
||||
|
||||
unsigned int __cgroup_bpf_run_lsm_sock(const void *ctx,
|
||||
const struct bpf_insn *insn)
|
||||
{
|
||||
const struct bpf_prog *shim_prog;
|
||||
struct sock *sk;
|
||||
struct cgroup *cgrp;
|
||||
int ret = 0;
|
||||
u64 *args;
|
||||
|
||||
args = (u64 *)ctx;
|
||||
sk = (void *)(unsigned long)args[0];
|
||||
/*shim_prog = container_of(insn, struct bpf_prog, insnsi);*/
|
||||
shim_prog = (const struct bpf_prog *)((void *)insn - offsetof(struct bpf_prog, insnsi));
|
||||
|
||||
cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
|
||||
if (likely(cgrp))
|
||||
ret = bpf_prog_run_array_cg(&cgrp->bpf,
|
||||
shim_prog->aux->cgroup_atype,
|
||||
ctx, bpf_prog_run, 0, NULL);
|
||||
return ret;
|
||||
}
|
||||
|
||||
unsigned int __cgroup_bpf_run_lsm_socket(const void *ctx,
|
||||
const struct bpf_insn *insn)
|
||||
{
|
||||
const struct bpf_prog *shim_prog;
|
||||
struct socket *sock;
|
||||
struct cgroup *cgrp;
|
||||
int ret = 0;
|
||||
u64 *args;
|
||||
|
||||
args = (u64 *)ctx;
|
||||
sock = (void *)(unsigned long)args[0];
|
||||
/*shim_prog = container_of(insn, struct bpf_prog, insnsi);*/
|
||||
shim_prog = (const struct bpf_prog *)((void *)insn - offsetof(struct bpf_prog, insnsi));
|
||||
|
||||
cgrp = sock_cgroup_ptr(&sock->sk->sk_cgrp_data);
|
||||
if (likely(cgrp))
|
||||
ret = bpf_prog_run_array_cg(&cgrp->bpf,
|
||||
shim_prog->aux->cgroup_atype,
|
||||
ctx, bpf_prog_run, 0, NULL);
|
||||
return ret;
|
||||
}
|
||||
|
||||
unsigned int __cgroup_bpf_run_lsm_current(const void *ctx,
|
||||
const struct bpf_insn *insn)
|
||||
{
|
||||
const struct bpf_prog *shim_prog;
|
||||
struct cgroup *cgrp;
|
||||
int ret = 0;
|
||||
|
||||
/*shim_prog = container_of(insn, struct bpf_prog, insnsi);*/
|
||||
shim_prog = (const struct bpf_prog *)((void *)insn - offsetof(struct bpf_prog, insnsi));
|
||||
|
||||
/* We rely on trampoline's __bpf_prog_enter_lsm_cgroup to grab RCU read lock. */
|
||||
cgrp = task_dfl_cgroup(current);
|
||||
if (likely(cgrp))
|
||||
ret = bpf_prog_run_array_cg(&cgrp->bpf,
|
||||
shim_prog->aux->cgroup_atype,
|
||||
ctx, bpf_prog_run, 0, NULL);
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BPF_LSM
|
||||
struct cgroup_lsm_atype {
|
||||
u32 attach_btf_id;
|
||||
int refcnt;
|
||||
};
|
||||
|
||||
static struct cgroup_lsm_atype cgroup_lsm_atype[CGROUP_LSM_NUM];
|
||||
|
||||
static enum cgroup_bpf_attach_type
|
||||
bpf_cgroup_atype_find(enum bpf_attach_type attach_type, u32 attach_btf_id)
|
||||
{
|
||||
int i;
|
||||
|
||||
lockdep_assert_held(&cgroup_mutex);
|
||||
|
||||
if (attach_type != BPF_LSM_CGROUP)
|
||||
return to_cgroup_bpf_attach_type(attach_type);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(cgroup_lsm_atype); i++)
|
||||
if (cgroup_lsm_atype[i].attach_btf_id == attach_btf_id)
|
||||
return CGROUP_LSM_START + i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(cgroup_lsm_atype); i++)
|
||||
if (cgroup_lsm_atype[i].attach_btf_id == 0)
|
||||
return CGROUP_LSM_START + i;
|
||||
|
||||
return -E2BIG;
|
||||
|
||||
}
|
||||
|
||||
void bpf_cgroup_atype_get(u32 attach_btf_id, int cgroup_atype)
|
||||
{
|
||||
int i = cgroup_atype - CGROUP_LSM_START;
|
||||
|
||||
lockdep_assert_held(&cgroup_mutex);
|
||||
|
||||
WARN_ON_ONCE(cgroup_lsm_atype[i].attach_btf_id &&
|
||||
cgroup_lsm_atype[i].attach_btf_id != attach_btf_id);
|
||||
|
||||
cgroup_lsm_atype[i].attach_btf_id = attach_btf_id;
|
||||
cgroup_lsm_atype[i].refcnt++;
|
||||
}
|
||||
|
||||
void bpf_cgroup_atype_put(int cgroup_atype)
|
||||
{
|
||||
int i = cgroup_atype - CGROUP_LSM_START;
|
||||
|
||||
mutex_lock(&cgroup_mutex);
|
||||
if (--cgroup_lsm_atype[i].refcnt <= 0)
|
||||
cgroup_lsm_atype[i].attach_btf_id = 0;
|
||||
WARN_ON_ONCE(cgroup_lsm_atype[i].refcnt < 0);
|
||||
mutex_unlock(&cgroup_mutex);
|
||||
}
|
||||
#else
|
||||
static enum cgroup_bpf_attach_type
|
||||
bpf_cgroup_atype_find(enum bpf_attach_type attach_type, u32 attach_btf_id)
|
||||
{
|
||||
if (attach_type != BPF_LSM_CGROUP)
|
||||
return to_cgroup_bpf_attach_type(attach_type);
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
#endif /* CONFIG_BPF_LSM */
|
||||
|
||||
void cgroup_bpf_offline(struct cgroup *cgrp)
|
||||
{
|
||||
cgroup_get(cgrp);
|
||||
|
@ -157,15 +285,22 @@ static void cgroup_bpf_release(struct work_struct *work)
|
|||
mutex_lock(&cgroup_mutex);
|
||||
|
||||
for (atype = 0; atype < ARRAY_SIZE(cgrp->bpf.progs); atype++) {
|
||||
struct list_head *progs = &cgrp->bpf.progs[atype];
|
||||
struct bpf_prog_list *pl, *pltmp;
|
||||
struct hlist_head *progs = &cgrp->bpf.progs[atype];
|
||||
struct bpf_prog_list *pl;
|
||||
struct hlist_node *pltmp;
|
||||
|
||||
list_for_each_entry_safe(pl, pltmp, progs, node) {
|
||||
list_del(&pl->node);
|
||||
if (pl->prog)
|
||||
hlist_for_each_entry_safe(pl, pltmp, progs, node) {
|
||||
hlist_del(&pl->node);
|
||||
if (pl->prog) {
|
||||
if (pl->prog->expected_attach_type == BPF_LSM_CGROUP)
|
||||
bpf_trampoline_unlink_cgroup_shim(pl->prog);
|
||||
bpf_prog_put(pl->prog);
|
||||
if (pl->link)
|
||||
}
|
||||
if (pl->link) {
|
||||
if (pl->link->link.prog->expected_attach_type == BPF_LSM_CGROUP)
|
||||
bpf_trampoline_unlink_cgroup_shim(pl->link->link.prog);
|
||||
bpf_cgroup_link_auto_detach(pl->link);
|
||||
}
|
||||
kfree(pl);
|
||||
static_branch_dec(&cgroup_bpf_enabled_key[atype]);
|
||||
}
|
||||
|
@ -217,12 +352,12 @@ static struct bpf_prog *prog_list_prog(struct bpf_prog_list *pl)
|
|||
/* count number of elements in the list.
|
||||
* it's slow but the list cannot be long
|
||||
*/
|
||||
static u32 prog_list_length(struct list_head *head)
|
||||
static u32 prog_list_length(struct hlist_head *head)
|
||||
{
|
||||
struct bpf_prog_list *pl;
|
||||
u32 cnt = 0;
|
||||
|
||||
list_for_each_entry(pl, head, node) {
|
||||
hlist_for_each_entry(pl, head, node) {
|
||||
if (!prog_list_prog(pl))
|
||||
continue;
|
||||
cnt++;
|
||||
|
@ -291,7 +426,7 @@ static int compute_effective_progs(struct cgroup *cgrp,
|
|||
if (cnt > 0 && !(p->bpf.flags[atype] & BPF_F_ALLOW_MULTI))
|
||||
continue;
|
||||
|
||||
list_for_each_entry(pl, &p->bpf.progs[atype], node) {
|
||||
hlist_for_each_entry(pl, &p->bpf.progs[atype], node) {
|
||||
if (!prog_list_prog(pl))
|
||||
continue;
|
||||
|
||||
|
@ -342,7 +477,7 @@ int cgroup_bpf_inherit(struct cgroup *cgrp)
|
|||
cgroup_bpf_get(p);
|
||||
|
||||
for (i = 0; i < NR; i++)
|
||||
INIT_LIST_HEAD(&cgrp->bpf.progs[i]);
|
||||
INIT_HLIST_HEAD(&cgrp->bpf.progs[i]);
|
||||
|
||||
INIT_LIST_HEAD(&cgrp->bpf.storages);
|
||||
|
||||
|
@ -418,7 +553,7 @@ cleanup:
|
|||
|
||||
#define BPF_CGROUP_MAX_PROGS 64
|
||||
|
||||
static struct bpf_prog_list *find_attach_entry(struct list_head *progs,
|
||||
static struct bpf_prog_list *find_attach_entry(struct hlist_head *progs,
|
||||
struct bpf_prog *prog,
|
||||
struct bpf_cgroup_link *link,
|
||||
struct bpf_prog *replace_prog,
|
||||
|
@ -428,12 +563,12 @@ static struct bpf_prog_list *find_attach_entry(struct list_head *progs,
|
|||
|
||||
/* single-attach case */
|
||||
if (!allow_multi) {
|
||||
if (list_empty(progs))
|
||||
if (hlist_empty(progs))
|
||||
return NULL;
|
||||
return list_first_entry(progs, typeof(*pl), node);
|
||||
return hlist_entry(progs->first, typeof(*pl), node);
|
||||
}
|
||||
|
||||
list_for_each_entry(pl, progs, node) {
|
||||
hlist_for_each_entry(pl, progs, node) {
|
||||
if (prog && pl->prog == prog && prog != replace_prog)
|
||||
/* disallow attaching the same prog twice */
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
@ -444,7 +579,7 @@ static struct bpf_prog_list *find_attach_entry(struct list_head *progs,
|
|||
|
||||
/* direct prog multi-attach w/ replacement case */
|
||||
if (replace_prog) {
|
||||
list_for_each_entry(pl, progs, node) {
|
||||
hlist_for_each_entry(pl, progs, node) {
|
||||
if (pl->prog == replace_prog)
|
||||
/* a match found */
|
||||
return pl;
|
||||
|
@ -478,9 +613,10 @@ static int __cgroup_bpf_attach(struct cgroup *cgrp,
|
|||
struct bpf_prog *old_prog = NULL;
|
||||
struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {};
|
||||
struct bpf_cgroup_storage *new_storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {};
|
||||
struct bpf_prog *new_prog = prog ? : link->link.prog;
|
||||
enum cgroup_bpf_attach_type atype;
|
||||
struct bpf_prog_list *pl;
|
||||
struct list_head *progs;
|
||||
struct hlist_head *progs;
|
||||
int err;
|
||||
|
||||
if (((flags & BPF_F_ALLOW_OVERRIDE) && (flags & BPF_F_ALLOW_MULTI)) ||
|
||||
|
@ -494,7 +630,7 @@ static int __cgroup_bpf_attach(struct cgroup *cgrp,
|
|||
/* replace_prog implies BPF_F_REPLACE, and vice versa */
|
||||
return -EINVAL;
|
||||
|
||||
atype = to_cgroup_bpf_attach_type(type);
|
||||
atype = bpf_cgroup_atype_find(type, new_prog->aux->attach_btf_id);
|
||||
if (atype < 0)
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -503,7 +639,7 @@ static int __cgroup_bpf_attach(struct cgroup *cgrp,
|
|||
if (!hierarchy_allows_attach(cgrp, atype))
|
||||
return -EPERM;
|
||||
|
||||
if (!list_empty(progs) && cgrp->bpf.flags[atype] != saved_flags)
|
||||
if (!hlist_empty(progs) && cgrp->bpf.flags[atype] != saved_flags)
|
||||
/* Disallow attaching non-overridable on top
|
||||
* of existing overridable in this cgroup.
|
||||
* Disallow attaching multi-prog if overridable or none
|
||||
|
@ -525,12 +661,22 @@ static int __cgroup_bpf_attach(struct cgroup *cgrp,
|
|||
if (pl) {
|
||||
old_prog = pl->prog;
|
||||
} else {
|
||||
struct hlist_node *last = NULL;
|
||||
|
||||
pl = kmalloc(sizeof(*pl), GFP_KERNEL);
|
||||
if (!pl) {
|
||||
bpf_cgroup_storages_free(new_storage);
|
||||
return -ENOMEM;
|
||||
}
|
||||
list_add_tail(&pl->node, progs);
|
||||
if (hlist_empty(progs))
|
||||
hlist_add_head(&pl->node, progs);
|
||||
else
|
||||
hlist_for_each(last, progs) {
|
||||
if (last->next)
|
||||
continue;
|
||||
hlist_add_behind(&pl->node, last);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
pl->prog = prog;
|
||||
|
@ -538,17 +684,30 @@ static int __cgroup_bpf_attach(struct cgroup *cgrp,
|
|||
bpf_cgroup_storages_assign(pl->storage, storage);
|
||||
cgrp->bpf.flags[atype] = saved_flags;
|
||||
|
||||
if (type == BPF_LSM_CGROUP) {
|
||||
err = bpf_trampoline_link_cgroup_shim(new_prog, atype);
|
||||
if (err)
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
err = update_effective_progs(cgrp, atype);
|
||||
if (err)
|
||||
goto cleanup;
|
||||
goto cleanup_trampoline;
|
||||
|
||||
if (old_prog)
|
||||
if (old_prog) {
|
||||
if (type == BPF_LSM_CGROUP)
|
||||
bpf_trampoline_unlink_cgroup_shim(old_prog);
|
||||
bpf_prog_put(old_prog);
|
||||
else
|
||||
} else {
|
||||
static_branch_inc(&cgroup_bpf_enabled_key[atype]);
|
||||
}
|
||||
bpf_cgroup_storages_link(new_storage, cgrp, type);
|
||||
return 0;
|
||||
|
||||
cleanup_trampoline:
|
||||
if (type == BPF_LSM_CGROUP)
|
||||
bpf_trampoline_unlink_cgroup_shim(new_prog);
|
||||
|
||||
cleanup:
|
||||
if (old_prog) {
|
||||
pl->prog = old_prog;
|
||||
|
@ -556,7 +715,7 @@ cleanup:
|
|||
}
|
||||
bpf_cgroup_storages_free(new_storage);
|
||||
if (!old_prog) {
|
||||
list_del(&pl->node);
|
||||
hlist_del(&pl->node);
|
||||
kfree(pl);
|
||||
}
|
||||
return err;
|
||||
|
@ -587,7 +746,7 @@ static void replace_effective_prog(struct cgroup *cgrp,
|
|||
struct cgroup_subsys_state *css;
|
||||
struct bpf_prog_array *progs;
|
||||
struct bpf_prog_list *pl;
|
||||
struct list_head *head;
|
||||
struct hlist_head *head;
|
||||
struct cgroup *cg;
|
||||
int pos;
|
||||
|
||||
|
@ -603,7 +762,7 @@ static void replace_effective_prog(struct cgroup *cgrp,
|
|||
continue;
|
||||
|
||||
head = &cg->bpf.progs[atype];
|
||||
list_for_each_entry(pl, head, node) {
|
||||
hlist_for_each_entry(pl, head, node) {
|
||||
if (!prog_list_prog(pl))
|
||||
continue;
|
||||
if (pl->link == link)
|
||||
|
@ -637,10 +796,10 @@ static int __cgroup_bpf_replace(struct cgroup *cgrp,
|
|||
enum cgroup_bpf_attach_type atype;
|
||||
struct bpf_prog *old_prog;
|
||||
struct bpf_prog_list *pl;
|
||||
struct list_head *progs;
|
||||
struct hlist_head *progs;
|
||||
bool found = false;
|
||||
|
||||
atype = to_cgroup_bpf_attach_type(link->type);
|
||||
atype = bpf_cgroup_atype_find(link->type, new_prog->aux->attach_btf_id);
|
||||
if (atype < 0)
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -649,7 +808,7 @@ static int __cgroup_bpf_replace(struct cgroup *cgrp,
|
|||
if (link->link.prog->type != new_prog->type)
|
||||
return -EINVAL;
|
||||
|
||||
list_for_each_entry(pl, progs, node) {
|
||||
hlist_for_each_entry(pl, progs, node) {
|
||||
if (pl->link == link) {
|
||||
found = true;
|
||||
break;
|
||||
|
@ -688,7 +847,7 @@ out_unlock:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static struct bpf_prog_list *find_detach_entry(struct list_head *progs,
|
||||
static struct bpf_prog_list *find_detach_entry(struct hlist_head *progs,
|
||||
struct bpf_prog *prog,
|
||||
struct bpf_cgroup_link *link,
|
||||
bool allow_multi)
|
||||
|
@ -696,14 +855,14 @@ static struct bpf_prog_list *find_detach_entry(struct list_head *progs,
|
|||
struct bpf_prog_list *pl;
|
||||
|
||||
if (!allow_multi) {
|
||||
if (list_empty(progs))
|
||||
if (hlist_empty(progs))
|
||||
/* report error when trying to detach and nothing is attached */
|
||||
return ERR_PTR(-ENOENT);
|
||||
|
||||
/* to maintain backward compatibility NONE and OVERRIDE cgroups
|
||||
* allow detaching with invalid FD (prog==NULL) in legacy mode
|
||||
*/
|
||||
return list_first_entry(progs, typeof(*pl), node);
|
||||
return hlist_entry(progs->first, typeof(*pl), node);
|
||||
}
|
||||
|
||||
if (!prog && !link)
|
||||
|
@ -713,7 +872,7 @@ static struct bpf_prog_list *find_detach_entry(struct list_head *progs,
|
|||
return ERR_PTR(-EINVAL);
|
||||
|
||||
/* find the prog or link and detach it */
|
||||
list_for_each_entry(pl, progs, node) {
|
||||
hlist_for_each_entry(pl, progs, node) {
|
||||
if (pl->prog == prog && pl->link == link)
|
||||
return pl;
|
||||
}
|
||||
|
@ -737,7 +896,7 @@ static void purge_effective_progs(struct cgroup *cgrp, struct bpf_prog *prog,
|
|||
struct cgroup_subsys_state *css;
|
||||
struct bpf_prog_array *progs;
|
||||
struct bpf_prog_list *pl;
|
||||
struct list_head *head;
|
||||
struct hlist_head *head;
|
||||
struct cgroup *cg;
|
||||
int pos;
|
||||
|
||||
|
@ -754,7 +913,7 @@ static void purge_effective_progs(struct cgroup *cgrp, struct bpf_prog *prog,
|
|||
continue;
|
||||
|
||||
head = &cg->bpf.progs[atype];
|
||||
list_for_each_entry(pl, head, node) {
|
||||
hlist_for_each_entry(pl, head, node) {
|
||||
if (!prog_list_prog(pl))
|
||||
continue;
|
||||
if (pl->prog == prog && pl->link == link)
|
||||
|
@ -791,10 +950,16 @@ static int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
|
|||
enum cgroup_bpf_attach_type atype;
|
||||
struct bpf_prog *old_prog;
|
||||
struct bpf_prog_list *pl;
|
||||
struct list_head *progs;
|
||||
struct hlist_head *progs;
|
||||
u32 attach_btf_id = 0;
|
||||
u32 flags;
|
||||
|
||||
atype = to_cgroup_bpf_attach_type(type);
|
||||
if (prog)
|
||||
attach_btf_id = prog->aux->attach_btf_id;
|
||||
if (link)
|
||||
attach_btf_id = link->link.prog->aux->attach_btf_id;
|
||||
|
||||
atype = bpf_cgroup_atype_find(type, attach_btf_id);
|
||||
if (atype < 0)
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -822,13 +987,17 @@ static int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
|
|||
}
|
||||
|
||||
/* now can actually delete it from this cgroup list */
|
||||
list_del(&pl->node);
|
||||
hlist_del(&pl->node);
|
||||
|
||||
kfree(pl);
|
||||
if (list_empty(progs))
|
||||
if (hlist_empty(progs))
|
||||
/* last program was detached, reset flags to zero */
|
||||
cgrp->bpf.flags[atype] = 0;
|
||||
if (old_prog)
|
||||
if (old_prog) {
|
||||
if (type == BPF_LSM_CGROUP)
|
||||
bpf_trampoline_unlink_cgroup_shim(old_prog);
|
||||
bpf_prog_put(old_prog);
|
||||
}
|
||||
static_branch_dec(&cgroup_bpf_enabled_key[atype]);
|
||||
return 0;
|
||||
}
|
||||
|
@ -848,57 +1017,90 @@ static int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
|
|||
static int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
|
||||
union bpf_attr __user *uattr)
|
||||
{
|
||||
__u32 __user *prog_attach_flags = u64_to_user_ptr(attr->query.prog_attach_flags);
|
||||
__u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
|
||||
enum bpf_attach_type type = attr->query.attach_type;
|
||||
enum cgroup_bpf_attach_type from_atype, to_atype;
|
||||
enum cgroup_bpf_attach_type atype;
|
||||
struct bpf_prog_array *effective;
|
||||
struct list_head *progs;
|
||||
struct bpf_prog *prog;
|
||||
int cnt, ret = 0, i;
|
||||
int total_cnt = 0;
|
||||
u32 flags;
|
||||
|
||||
atype = to_cgroup_bpf_attach_type(type);
|
||||
if (atype < 0)
|
||||
return -EINVAL;
|
||||
if (type == BPF_LSM_CGROUP) {
|
||||
if (attr->query.prog_cnt && prog_ids && !prog_attach_flags)
|
||||
return -EINVAL;
|
||||
|
||||
progs = &cgrp->bpf.progs[atype];
|
||||
flags = cgrp->bpf.flags[atype];
|
||||
from_atype = CGROUP_LSM_START;
|
||||
to_atype = CGROUP_LSM_END;
|
||||
flags = 0;
|
||||
} else {
|
||||
from_atype = to_cgroup_bpf_attach_type(type);
|
||||
if (from_atype < 0)
|
||||
return -EINVAL;
|
||||
to_atype = from_atype;
|
||||
flags = cgrp->bpf.flags[from_atype];
|
||||
}
|
||||
|
||||
effective = rcu_dereference_protected(cgrp->bpf.effective[atype],
|
||||
lockdep_is_held(&cgroup_mutex));
|
||||
|
||||
if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE)
|
||||
cnt = bpf_prog_array_length(effective);
|
||||
else
|
||||
cnt = prog_list_length(progs);
|
||||
for (atype = from_atype; atype <= to_atype; atype++) {
|
||||
if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE) {
|
||||
effective = rcu_dereference_protected(cgrp->bpf.effective[atype],
|
||||
lockdep_is_held(&cgroup_mutex));
|
||||
total_cnt += bpf_prog_array_length(effective);
|
||||
} else {
|
||||
total_cnt += prog_list_length(&cgrp->bpf.progs[atype]);
|
||||
}
|
||||
}
|
||||
|
||||
if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)))
|
||||
return -EFAULT;
|
||||
if (copy_to_user(&uattr->query.prog_cnt, &cnt, sizeof(cnt)))
|
||||
if (copy_to_user(&uattr->query.prog_cnt, &total_cnt, sizeof(total_cnt)))
|
||||
return -EFAULT;
|
||||
if (attr->query.prog_cnt == 0 || !prog_ids || !cnt)
|
||||
if (attr->query.prog_cnt == 0 || !prog_ids || !total_cnt)
|
||||
/* return early if user requested only program count + flags */
|
||||
return 0;
|
||||
if (attr->query.prog_cnt < cnt) {
|
||||
cnt = attr->query.prog_cnt;
|
||||
|
||||
if (attr->query.prog_cnt < total_cnt) {
|
||||
total_cnt = attr->query.prog_cnt;
|
||||
ret = -ENOSPC;
|
||||
}
|
||||
|
||||
if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE) {
|
||||
return bpf_prog_array_copy_to_user(effective, prog_ids, cnt);
|
||||
} else {
|
||||
struct bpf_prog_list *pl;
|
||||
u32 id;
|
||||
for (atype = from_atype; atype <= to_atype && total_cnt; atype++) {
|
||||
if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE) {
|
||||
effective = rcu_dereference_protected(cgrp->bpf.effective[atype],
|
||||
lockdep_is_held(&cgroup_mutex));
|
||||
cnt = min_t(int, bpf_prog_array_length(effective), total_cnt);
|
||||
ret = bpf_prog_array_copy_to_user(effective, prog_ids, cnt);
|
||||
} else {
|
||||
struct hlist_head *progs;
|
||||
struct bpf_prog_list *pl;
|
||||
struct bpf_prog *prog;
|
||||
u32 id;
|
||||
|
||||
i = 0;
|
||||
list_for_each_entry(pl, progs, node) {
|
||||
prog = prog_list_prog(pl);
|
||||
id = prog->aux->id;
|
||||
if (copy_to_user(prog_ids + i, &id, sizeof(id)))
|
||||
return -EFAULT;
|
||||
if (++i == cnt)
|
||||
break;
|
||||
progs = &cgrp->bpf.progs[atype];
|
||||
cnt = min_t(int, prog_list_length(progs), total_cnt);
|
||||
i = 0;
|
||||
hlist_for_each_entry(pl, progs, node) {
|
||||
prog = prog_list_prog(pl);
|
||||
id = prog->aux->id;
|
||||
if (copy_to_user(prog_ids + i, &id, sizeof(id)))
|
||||
return -EFAULT;
|
||||
if (++i == cnt)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (prog_attach_flags) {
|
||||
flags = cgrp->bpf.flags[atype];
|
||||
|
||||
for (i = 0; i < cnt; i++)
|
||||
if (copy_to_user(prog_attach_flags + i, &flags, sizeof(flags)))
|
||||
return -EFAULT;
|
||||
prog_attach_flags += cnt;
|
||||
}
|
||||
|
||||
prog_ids += cnt;
|
||||
total_cnt -= cnt;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
@ -987,6 +1189,8 @@ static void bpf_cgroup_link_release(struct bpf_link *link)
|
|||
|
||||
WARN_ON(__cgroup_bpf_detach(cg_link->cgroup, NULL, cg_link,
|
||||
cg_link->type));
|
||||
if (cg_link->type == BPF_LSM_CGROUP)
|
||||
bpf_trampoline_unlink_cgroup_shim(cg_link->link.prog);
|
||||
|
||||
cg = cg_link->cgroup;
|
||||
cg_link->cgroup = NULL;
|
||||
|
@ -1331,7 +1535,7 @@ BPF_CALL_0(bpf_get_retval)
|
|||
return ctx->retval;
|
||||
}
|
||||
|
||||
static const struct bpf_func_proto bpf_get_retval_proto = {
|
||||
const struct bpf_func_proto bpf_get_retval_proto = {
|
||||
.func = bpf_get_retval,
|
||||
.gpl_only = false,
|
||||
.ret_type = RET_INTEGER,
|
||||
|
@ -1346,7 +1550,7 @@ BPF_CALL_1(bpf_set_retval, int, retval)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static const struct bpf_func_proto bpf_set_retval_proto = {
|
||||
const struct bpf_func_proto bpf_set_retval_proto = {
|
||||
.func = bpf_set_retval,
|
||||
.gpl_only = false,
|
||||
.ret_type = RET_INTEGER,
|
||||
|
|
|
@ -107,6 +107,9 @@ struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flag
|
|||
fp->aux->prog = fp;
|
||||
fp->jit_requested = ebpf_jit_enabled();
|
||||
fp->blinding_requested = bpf_jit_blinding_enabled(fp);
|
||||
#ifdef CONFIG_CGROUP_BPF
|
||||
aux->cgroup_atype = CGROUP_BPF_ATTACH_TYPE_INVALID;
|
||||
#endif
|
||||
|
||||
INIT_LIST_HEAD_RCU(&fp->aux->ksym.lnode);
|
||||
mutex_init(&fp->aux->used_maps_mutex);
|
||||
|
@ -2569,6 +2572,10 @@ static void bpf_prog_free_deferred(struct work_struct *work)
|
|||
aux = container_of(work, struct bpf_prog_aux, work);
|
||||
#ifdef CONFIG_BPF_SYSCALL
|
||||
bpf_free_kfunc_btf_tab(aux->kfunc_btf_tab);
|
||||
#endif
|
||||
#ifdef CONFIG_CGROUP_BPF
|
||||
if (aux->cgroup_atype != CGROUP_BPF_ATTACH_TYPE_INVALID)
|
||||
bpf_cgroup_atype_put(aux->cgroup_atype);
|
||||
#endif
|
||||
bpf_free_used_maps(aux);
|
||||
bpf_free_used_btfs(aux);
|
||||
|
@ -2666,6 +2673,8 @@ const struct bpf_func_proto bpf_get_local_storage_proto __weak;
|
|||
const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto __weak;
|
||||
const struct bpf_func_proto bpf_snprintf_btf_proto __weak;
|
||||
const struct bpf_func_proto bpf_seq_printf_btf_proto __weak;
|
||||
const struct bpf_func_proto bpf_set_retval_proto __weak;
|
||||
const struct bpf_func_proto bpf_get_retval_proto __weak;
|
||||
|
||||
const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
|
||||
{
|
||||
|
@ -2729,6 +2738,12 @@ bool __weak bpf_jit_needs_zext(void)
|
|||
return false;
|
||||
}
|
||||
|
||||
/* Return TRUE if the JIT backend supports mixing bpf2bpf and tailcalls. */
|
||||
bool __weak bpf_jit_supports_subprog_tailcalls(void)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
bool __weak bpf_jit_supports_kfunc_call(void)
|
||||
{
|
||||
return false;
|
||||
|
|
|
@ -3416,6 +3416,8 @@ attach_type_to_prog_type(enum bpf_attach_type attach_type)
|
|||
return BPF_PROG_TYPE_SK_LOOKUP;
|
||||
case BPF_XDP:
|
||||
return BPF_PROG_TYPE_XDP;
|
||||
case BPF_LSM_CGROUP:
|
||||
return BPF_PROG_TYPE_LSM;
|
||||
default:
|
||||
return BPF_PROG_TYPE_UNSPEC;
|
||||
}
|
||||
|
@ -3469,6 +3471,11 @@ static int bpf_prog_attach(const union bpf_attr *attr)
|
|||
case BPF_PROG_TYPE_CGROUP_SOCKOPT:
|
||||
case BPF_PROG_TYPE_CGROUP_SYSCTL:
|
||||
case BPF_PROG_TYPE_SOCK_OPS:
|
||||
case BPF_PROG_TYPE_LSM:
|
||||
if (ptype == BPF_PROG_TYPE_LSM &&
|
||||
prog->expected_attach_type != BPF_LSM_CGROUP)
|
||||
return -EINVAL;
|
||||
|
||||
ret = cgroup_bpf_prog_attach(attr, ptype, prog);
|
||||
break;
|
||||
default:
|
||||
|
@ -3506,13 +3513,14 @@ static int bpf_prog_detach(const union bpf_attr *attr)
|
|||
case BPF_PROG_TYPE_CGROUP_SOCKOPT:
|
||||
case BPF_PROG_TYPE_CGROUP_SYSCTL:
|
||||
case BPF_PROG_TYPE_SOCK_OPS:
|
||||
case BPF_PROG_TYPE_LSM:
|
||||
return cgroup_bpf_prog_detach(attr, ptype);
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
#define BPF_PROG_QUERY_LAST_FIELD query.prog_cnt
|
||||
#define BPF_PROG_QUERY_LAST_FIELD query.prog_attach_flags
|
||||
|
||||
static int bpf_prog_query(const union bpf_attr *attr,
|
||||
union bpf_attr __user *uattr)
|
||||
|
@ -3548,6 +3556,7 @@ static int bpf_prog_query(const union bpf_attr *attr,
|
|||
case BPF_CGROUP_SYSCTL:
|
||||
case BPF_CGROUP_GETSOCKOPT:
|
||||
case BPF_CGROUP_SETSOCKOPT:
|
||||
case BPF_LSM_CGROUP:
|
||||
return cgroup_bpf_prog_query(attr, uattr);
|
||||
case BPF_LIRC_MODE2:
|
||||
return lirc_prog_query(attr, uattr);
|
||||
|
@ -4058,6 +4067,11 @@ static int bpf_prog_get_info_by_fd(struct file *file,
|
|||
|
||||
if (prog->aux->btf)
|
||||
info.btf_id = btf_obj_id(prog->aux->btf);
|
||||
info.attach_btf_id = prog->aux->attach_btf_id;
|
||||
if (prog->aux->attach_btf)
|
||||
info.attach_btf_obj_id = btf_obj_id(prog->aux->attach_btf);
|
||||
else if (prog->aux->dst_prog)
|
||||
info.attach_btf_obj_id = btf_obj_id(prog->aux->dst_prog->aux->attach_btf);
|
||||
|
||||
ulen = info.nr_func_info;
|
||||
info.nr_func_info = prog->aux->func_info_cnt;
|
||||
|
@ -4540,6 +4554,8 @@ static int link_create(union bpf_attr *attr, bpfptr_t uattr)
|
|||
ret = bpf_raw_tp_link_attach(prog, NULL);
|
||||
else if (prog->expected_attach_type == BPF_TRACE_ITER)
|
||||
ret = bpf_iter_link_attach(attr, uattr, prog);
|
||||
else if (prog->expected_attach_type == BPF_LSM_CGROUP)
|
||||
ret = cgroup_bpf_link_attach(attr, prog);
|
||||
else
|
||||
ret = bpf_tracing_prog_attach(prog,
|
||||
attr->link_create.target_fd,
|
||||
|
|
|
@ -11,6 +11,8 @@
|
|||
#include <linux/rcupdate_wait.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/static_call.h>
|
||||
#include <linux/bpf_verifier.h>
|
||||
#include <linux/bpf_lsm.h>
|
||||
|
||||
/* dummy _ops. The verifier will operate on target program's ops. */
|
||||
const struct bpf_verifier_ops bpf_extension_verifier_ops = {
|
||||
|
@ -410,7 +412,7 @@ static enum bpf_tramp_prog_type bpf_attach_type_to_tramp(struct bpf_prog *prog)
|
|||
}
|
||||
}
|
||||
|
||||
int bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr)
|
||||
static int __bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr)
|
||||
{
|
||||
enum bpf_tramp_prog_type kind;
|
||||
struct bpf_tramp_link *link_exiting;
|
||||
|
@ -418,44 +420,33 @@ int bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_trampoline
|
|||
int cnt = 0, i;
|
||||
|
||||
kind = bpf_attach_type_to_tramp(link->link.prog);
|
||||
mutex_lock(&tr->mutex);
|
||||
if (tr->extension_prog) {
|
||||
if (tr->extension_prog)
|
||||
/* cannot attach fentry/fexit if extension prog is attached.
|
||||
* cannot overwrite extension prog either.
|
||||
*/
|
||||
err = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
return -EBUSY;
|
||||
|
||||
for (i = 0; i < BPF_TRAMP_MAX; i++)
|
||||
cnt += tr->progs_cnt[i];
|
||||
|
||||
if (kind == BPF_TRAMP_REPLACE) {
|
||||
/* Cannot attach extension if fentry/fexit are in use. */
|
||||
if (cnt) {
|
||||
err = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
if (cnt)
|
||||
return -EBUSY;
|
||||
tr->extension_prog = link->link.prog;
|
||||
err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_JUMP, NULL,
|
||||
link->link.prog->bpf_func);
|
||||
goto out;
|
||||
return bpf_arch_text_poke(tr->func.addr, BPF_MOD_JUMP, NULL,
|
||||
link->link.prog->bpf_func);
|
||||
}
|
||||
if (cnt >= BPF_MAX_TRAMP_LINKS) {
|
||||
err = -E2BIG;
|
||||
goto out;
|
||||
}
|
||||
if (!hlist_unhashed(&link->tramp_hlist)) {
|
||||
if (cnt >= BPF_MAX_TRAMP_LINKS)
|
||||
return -E2BIG;
|
||||
if (!hlist_unhashed(&link->tramp_hlist))
|
||||
/* prog already linked */
|
||||
err = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
return -EBUSY;
|
||||
hlist_for_each_entry(link_exiting, &tr->progs_hlist[kind], tramp_hlist) {
|
||||
if (link_exiting->link.prog != link->link.prog)
|
||||
continue;
|
||||
/* prog already linked */
|
||||
err = -EBUSY;
|
||||
goto out;
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
hlist_add_head(&link->tramp_hlist, &tr->progs_hlist[kind]);
|
||||
|
@ -465,34 +456,220 @@ int bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_trampoline
|
|||
hlist_del_init(&link->tramp_hlist);
|
||||
tr->progs_cnt[kind]--;
|
||||
}
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
int bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr)
|
||||
{
|
||||
int err;
|
||||
|
||||
mutex_lock(&tr->mutex);
|
||||
err = __bpf_trampoline_link_prog(link, tr);
|
||||
mutex_unlock(&tr->mutex);
|
||||
return err;
|
||||
}
|
||||
|
||||
/* bpf_trampoline_unlink_prog() should never fail. */
|
||||
int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr)
|
||||
static int __bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr)
|
||||
{
|
||||
enum bpf_tramp_prog_type kind;
|
||||
int err;
|
||||
|
||||
kind = bpf_attach_type_to_tramp(link->link.prog);
|
||||
mutex_lock(&tr->mutex);
|
||||
if (kind == BPF_TRAMP_REPLACE) {
|
||||
WARN_ON_ONCE(!tr->extension_prog);
|
||||
err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_JUMP,
|
||||
tr->extension_prog->bpf_func, NULL);
|
||||
tr->extension_prog = NULL;
|
||||
goto out;
|
||||
return err;
|
||||
}
|
||||
hlist_del_init(&link->tramp_hlist);
|
||||
tr->progs_cnt[kind]--;
|
||||
err = bpf_trampoline_update(tr);
|
||||
out:
|
||||
return bpf_trampoline_update(tr);
|
||||
}
|
||||
|
||||
/* bpf_trampoline_unlink_prog() should never fail. */
|
||||
int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr)
|
||||
{
|
||||
int err;
|
||||
|
||||
mutex_lock(&tr->mutex);
|
||||
err = __bpf_trampoline_unlink_prog(link, tr);
|
||||
mutex_unlock(&tr->mutex);
|
||||
return err;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL)
|
||||
static void bpf_shim_tramp_link_release(struct bpf_link *link)
|
||||
{
|
||||
struct bpf_shim_tramp_link *shim_link =
|
||||
container_of(link, struct bpf_shim_tramp_link, link.link);
|
||||
|
||||
/* paired with 'shim_link->trampoline = tr' in bpf_trampoline_link_cgroup_shim */
|
||||
if (!shim_link->trampoline)
|
||||
return;
|
||||
|
||||
WARN_ON_ONCE(bpf_trampoline_unlink_prog(&shim_link->link, shim_link->trampoline));
|
||||
bpf_trampoline_put(shim_link->trampoline);
|
||||
}
|
||||
|
||||
static void bpf_shim_tramp_link_dealloc(struct bpf_link *link)
|
||||
{
|
||||
struct bpf_shim_tramp_link *shim_link =
|
||||
container_of(link, struct bpf_shim_tramp_link, link.link);
|
||||
|
||||
kfree(shim_link);
|
||||
}
|
||||
|
||||
static const struct bpf_link_ops bpf_shim_tramp_link_lops = {
|
||||
.release = bpf_shim_tramp_link_release,
|
||||
.dealloc = bpf_shim_tramp_link_dealloc,
|
||||
};
|
||||
|
||||
static struct bpf_shim_tramp_link *cgroup_shim_alloc(const struct bpf_prog *prog,
|
||||
bpf_func_t bpf_func,
|
||||
int cgroup_atype)
|
||||
{
|
||||
struct bpf_shim_tramp_link *shim_link = NULL;
|
||||
struct bpf_prog *p;
|
||||
|
||||
shim_link = kzalloc(sizeof(*shim_link), GFP_USER);
|
||||
if (!shim_link)
|
||||
return NULL;
|
||||
|
||||
p = bpf_prog_alloc(1, 0);
|
||||
if (!p) {
|
||||
kfree(shim_link);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
p->jited = false;
|
||||
p->bpf_func = bpf_func;
|
||||
|
||||
p->aux->cgroup_atype = cgroup_atype;
|
||||
p->aux->attach_func_proto = prog->aux->attach_func_proto;
|
||||
p->aux->attach_btf_id = prog->aux->attach_btf_id;
|
||||
p->aux->attach_btf = prog->aux->attach_btf;
|
||||
btf_get(p->aux->attach_btf);
|
||||
p->type = BPF_PROG_TYPE_LSM;
|
||||
p->expected_attach_type = BPF_LSM_MAC;
|
||||
bpf_prog_inc(p);
|
||||
bpf_link_init(&shim_link->link.link, BPF_LINK_TYPE_UNSPEC,
|
||||
&bpf_shim_tramp_link_lops, p);
|
||||
bpf_cgroup_atype_get(p->aux->attach_btf_id, cgroup_atype);
|
||||
|
||||
return shim_link;
|
||||
}
|
||||
|
||||
static struct bpf_shim_tramp_link *cgroup_shim_find(struct bpf_trampoline *tr,
|
||||
bpf_func_t bpf_func)
|
||||
{
|
||||
struct bpf_tramp_link *link;
|
||||
int kind;
|
||||
|
||||
for (kind = 0; kind < BPF_TRAMP_MAX; kind++) {
|
||||
hlist_for_each_entry(link, &tr->progs_hlist[kind], tramp_hlist) {
|
||||
struct bpf_prog *p = link->link.prog;
|
||||
|
||||
if (p->bpf_func == bpf_func)
|
||||
return container_of(link, struct bpf_shim_tramp_link, link);
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int bpf_trampoline_link_cgroup_shim(struct bpf_prog *prog,
|
||||
int cgroup_atype)
|
||||
{
|
||||
struct bpf_shim_tramp_link *shim_link = NULL;
|
||||
struct bpf_attach_target_info tgt_info = {};
|
||||
struct bpf_trampoline *tr;
|
||||
bpf_func_t bpf_func;
|
||||
u64 key;
|
||||
int err;
|
||||
|
||||
err = bpf_check_attach_target(NULL, prog, NULL,
|
||||
prog->aux->attach_btf_id,
|
||||
&tgt_info);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
key = bpf_trampoline_compute_key(NULL, prog->aux->attach_btf,
|
||||
prog->aux->attach_btf_id);
|
||||
|
||||
bpf_lsm_find_cgroup_shim(prog, &bpf_func);
|
||||
tr = bpf_trampoline_get(key, &tgt_info);
|
||||
if (!tr)
|
||||
return -ENOMEM;
|
||||
|
||||
mutex_lock(&tr->mutex);
|
||||
|
||||
shim_link = cgroup_shim_find(tr, bpf_func);
|
||||
if (shim_link) {
|
||||
/* Reusing existing shim attached by the other program. */
|
||||
bpf_link_inc(&shim_link->link.link);
|
||||
|
||||
mutex_unlock(&tr->mutex);
|
||||
bpf_trampoline_put(tr); /* bpf_trampoline_get above */
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Allocate and install new shim. */
|
||||
|
||||
shim_link = cgroup_shim_alloc(prog, bpf_func, cgroup_atype);
|
||||
if (!shim_link) {
|
||||
err = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
err = __bpf_trampoline_link_prog(&shim_link->link, tr);
|
||||
if (err)
|
||||
goto err;
|
||||
|
||||
shim_link->trampoline = tr;
|
||||
/* note, we're still holding tr refcnt from above */
|
||||
|
||||
mutex_unlock(&tr->mutex);
|
||||
|
||||
return 0;
|
||||
err:
|
||||
mutex_unlock(&tr->mutex);
|
||||
|
||||
if (shim_link)
|
||||
bpf_link_put(&shim_link->link.link);
|
||||
|
||||
/* have to release tr while _not_ holding its mutex */
|
||||
bpf_trampoline_put(tr); /* bpf_trampoline_get above */
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
void bpf_trampoline_unlink_cgroup_shim(struct bpf_prog *prog)
|
||||
{
|
||||
struct bpf_shim_tramp_link *shim_link = NULL;
|
||||
struct bpf_trampoline *tr;
|
||||
bpf_func_t bpf_func;
|
||||
u64 key;
|
||||
|
||||
key = bpf_trampoline_compute_key(NULL, prog->aux->attach_btf,
|
||||
prog->aux->attach_btf_id);
|
||||
|
||||
bpf_lsm_find_cgroup_shim(prog, &bpf_func);
|
||||
tr = bpf_trampoline_lookup(key);
|
||||
if (WARN_ON_ONCE(!tr))
|
||||
return;
|
||||
|
||||
mutex_lock(&tr->mutex);
|
||||
shim_link = cgroup_shim_find(tr, bpf_func);
|
||||
mutex_unlock(&tr->mutex);
|
||||
|
||||
if (shim_link)
|
||||
bpf_link_put(&shim_link->link.link);
|
||||
|
||||
bpf_trampoline_put(tr); /* bpf_trampoline_lookup above */
|
||||
}
|
||||
#endif
|
||||
|
||||
struct bpf_trampoline *bpf_trampoline_get(u64 key,
|
||||
struct bpf_attach_target_info *tgt_info)
|
||||
{
|
||||
|
@ -625,6 +802,31 @@ void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start, struct bpf_tramp_
|
|||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
u64 notrace __bpf_prog_enter_lsm_cgroup(struct bpf_prog *prog,
|
||||
struct bpf_tramp_run_ctx *run_ctx)
|
||||
__acquires(RCU)
|
||||
{
|
||||
/* Runtime stats are exported via actual BPF_LSM_CGROUP
|
||||
* programs, not the shims.
|
||||
*/
|
||||
rcu_read_lock();
|
||||
migrate_disable();
|
||||
|
||||
run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);
|
||||
|
||||
return NO_START_TIME;
|
||||
}
|
||||
|
||||
void notrace __bpf_prog_exit_lsm_cgroup(struct bpf_prog *prog, u64 start,
|
||||
struct bpf_tramp_run_ctx *run_ctx)
|
||||
__releases(RCU)
|
||||
{
|
||||
bpf_reset_run_ctx(run_ctx->saved_run_ctx);
|
||||
|
||||
migrate_enable();
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog, struct bpf_tramp_run_ctx *run_ctx)
|
||||
{
|
||||
rcu_read_lock_trace();
|
||||
|
|
|
@ -6153,7 +6153,8 @@ static bool may_update_sockmap(struct bpf_verifier_env *env, int func_id)
|
|||
|
||||
static bool allow_tail_call_in_subprogs(struct bpf_verifier_env *env)
|
||||
{
|
||||
return env->prog->jit_requested && IS_ENABLED(CONFIG_X86_64);
|
||||
return env->prog->jit_requested &&
|
||||
bpf_jit_supports_subprog_tailcalls();
|
||||
}
|
||||
|
||||
static int check_map_func_compatibility(struct bpf_verifier_env *env,
|
||||
|
@ -7121,6 +7122,41 @@ static int check_get_func_ip(struct bpf_verifier_env *env)
|
|||
return -ENOTSUPP;
|
||||
}
|
||||
|
||||
static struct bpf_insn_aux_data *cur_aux(struct bpf_verifier_env *env)
|
||||
{
|
||||
return &env->insn_aux_data[env->insn_idx];
|
||||
}
|
||||
|
||||
static bool loop_flag_is_zero(struct bpf_verifier_env *env)
|
||||
{
|
||||
struct bpf_reg_state *regs = cur_regs(env);
|
||||
struct bpf_reg_state *reg = ®s[BPF_REG_4];
|
||||
bool reg_is_null = register_is_null(reg);
|
||||
|
||||
if (reg_is_null)
|
||||
mark_chain_precision(env, BPF_REG_4);
|
||||
|
||||
return reg_is_null;
|
||||
}
|
||||
|
||||
static void update_loop_inline_state(struct bpf_verifier_env *env, u32 subprogno)
|
||||
{
|
||||
struct bpf_loop_inline_state *state = &cur_aux(env)->loop_inline_state;
|
||||
|
||||
if (!state->initialized) {
|
||||
state->initialized = 1;
|
||||
state->fit_for_inline = loop_flag_is_zero(env);
|
||||
state->callback_subprogno = subprogno;
|
||||
return;
|
||||
}
|
||||
|
||||
if (!state->fit_for_inline)
|
||||
return;
|
||||
|
||||
state->fit_for_inline = (loop_flag_is_zero(env) &&
|
||||
state->callback_subprogno == subprogno);
|
||||
}
|
||||
|
||||
static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
|
||||
int *insn_idx_p)
|
||||
{
|
||||
|
@ -7273,6 +7309,7 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
|
|||
err = check_bpf_snprintf_call(env, regs);
|
||||
break;
|
||||
case BPF_FUNC_loop:
|
||||
update_loop_inline_state(env, meta.subprogno);
|
||||
err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
|
||||
set_loop_callback_state);
|
||||
break;
|
||||
|
@ -7282,6 +7319,18 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
|
|||
reg_type_str(env, regs[BPF_REG_1].type));
|
||||
return -EACCES;
|
||||
}
|
||||
break;
|
||||
case BPF_FUNC_set_retval:
|
||||
if (env->prog->expected_attach_type == BPF_LSM_CGROUP) {
|
||||
if (!env->prog->aux->attach_func_proto->type) {
|
||||
/* Make sure programs that attach to void
|
||||
* hooks don't try to modify return value.
|
||||
*/
|
||||
verbose(env, "BPF_LSM_CGROUP that attach to void LSM hooks can't modify return value!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
if (err)
|
||||
|
@ -7679,11 +7728,6 @@ static bool check_reg_sane_offset(struct bpf_verifier_env *env,
|
|||
return true;
|
||||
}
|
||||
|
||||
static struct bpf_insn_aux_data *cur_aux(struct bpf_verifier_env *env)
|
||||
{
|
||||
return &env->insn_aux_data[env->insn_idx];
|
||||
}
|
||||
|
||||
enum {
|
||||
REASON_BOUNDS = -1,
|
||||
REASON_TYPE = -2,
|
||||
|
@ -9054,7 +9098,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
|
|||
|
||||
if (opcode == BPF_END || opcode == BPF_NEG) {
|
||||
if (opcode == BPF_NEG) {
|
||||
if (BPF_SRC(insn->code) != 0 ||
|
||||
if (BPF_SRC(insn->code) != BPF_K ||
|
||||
insn->src_reg != BPF_REG_0 ||
|
||||
insn->off != 0 || insn->imm != 0) {
|
||||
verbose(env, "BPF_NEG uses reserved fields\n");
|
||||
|
@ -10381,11 +10425,21 @@ static int check_return_code(struct bpf_verifier_env *env)
|
|||
const bool is_subprog = frame->subprogno;
|
||||
|
||||
/* LSM and struct_ops func-ptr's return type could be "void" */
|
||||
if (!is_subprog &&
|
||||
(prog_type == BPF_PROG_TYPE_STRUCT_OPS ||
|
||||
prog_type == BPF_PROG_TYPE_LSM) &&
|
||||
!prog->aux->attach_func_proto->type)
|
||||
return 0;
|
||||
if (!is_subprog) {
|
||||
switch (prog_type) {
|
||||
case BPF_PROG_TYPE_LSM:
|
||||
if (prog->expected_attach_type == BPF_LSM_CGROUP)
|
||||
/* See below, can be 0 or 0-1 depending on hook. */
|
||||
break;
|
||||
fallthrough;
|
||||
case BPF_PROG_TYPE_STRUCT_OPS:
|
||||
if (!prog->aux->attach_func_proto->type)
|
||||
return 0;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* eBPF calling convention is such that R0 is used
|
||||
* to return the value from eBPF program.
|
||||
|
@ -10476,6 +10530,22 @@ static int check_return_code(struct bpf_verifier_env *env)
|
|||
case BPF_PROG_TYPE_SK_LOOKUP:
|
||||
range = tnum_range(SK_DROP, SK_PASS);
|
||||
break;
|
||||
|
||||
case BPF_PROG_TYPE_LSM:
|
||||
if (env->prog->expected_attach_type != BPF_LSM_CGROUP) {
|
||||
/* Regular BPF_PROG_TYPE_LSM programs can return
|
||||
* any value.
|
||||
*/
|
||||
return 0;
|
||||
}
|
||||
if (!env->prog->aux->attach_func_proto->type) {
|
||||
/* Make sure programs that attach to void
|
||||
* hooks don't try to modify return value.
|
||||
*/
|
||||
range = tnum_range(1, 1);
|
||||
}
|
||||
break;
|
||||
|
||||
case BPF_PROG_TYPE_EXT:
|
||||
/* freplace program can return anything as its return value
|
||||
* depends on the to-be-replaced kernel func or bpf program.
|
||||
|
@ -10492,6 +10562,10 @@ static int check_return_code(struct bpf_verifier_env *env)
|
|||
|
||||
if (!tnum_in(range, reg->var_off)) {
|
||||
verbose_invalid_scalar(env, reg, &range, "program exit", "R0");
|
||||
if (prog->expected_attach_type == BPF_LSM_CGROUP &&
|
||||
prog_type == BPF_PROG_TYPE_LSM &&
|
||||
!prog->aux->attach_func_proto->type)
|
||||
verbose(env, "Note, BPF_LSM_CGROUP that attach to void LSM hooks can't modify return value!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -14296,6 +14370,142 @@ patch_call_imm:
|
|||
return 0;
|
||||
}
|
||||
|
||||
static struct bpf_prog *inline_bpf_loop(struct bpf_verifier_env *env,
|
||||
int position,
|
||||
s32 stack_base,
|
||||
u32 callback_subprogno,
|
||||
u32 *cnt)
|
||||
{
|
||||
s32 r6_offset = stack_base + 0 * BPF_REG_SIZE;
|
||||
s32 r7_offset = stack_base + 1 * BPF_REG_SIZE;
|
||||
s32 r8_offset = stack_base + 2 * BPF_REG_SIZE;
|
||||
int reg_loop_max = BPF_REG_6;
|
||||
int reg_loop_cnt = BPF_REG_7;
|
||||
int reg_loop_ctx = BPF_REG_8;
|
||||
|
||||
struct bpf_prog *new_prog;
|
||||
u32 callback_start;
|
||||
u32 call_insn_offset;
|
||||
s32 callback_offset;
|
||||
|
||||
/* This represents an inlined version of bpf_iter.c:bpf_loop,
|
||||
* be careful to modify this code in sync.
|
||||
*/
|
||||
struct bpf_insn insn_buf[] = {
|
||||
/* Return error and jump to the end of the patch if
|
||||
* expected number of iterations is too big.
|
||||
*/
|
||||
BPF_JMP_IMM(BPF_JLE, BPF_REG_1, BPF_MAX_LOOPS, 2),
|
||||
BPF_MOV32_IMM(BPF_REG_0, -E2BIG),
|
||||
BPF_JMP_IMM(BPF_JA, 0, 0, 16),
|
||||
/* spill R6, R7, R8 to use these as loop vars */
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, r6_offset),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_7, r7_offset),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_8, r8_offset),
|
||||
/* initialize loop vars */
|
||||
BPF_MOV64_REG(reg_loop_max, BPF_REG_1),
|
||||
BPF_MOV32_IMM(reg_loop_cnt, 0),
|
||||
BPF_MOV64_REG(reg_loop_ctx, BPF_REG_3),
|
||||
/* loop header,
|
||||
* if reg_loop_cnt >= reg_loop_max skip the loop body
|
||||
*/
|
||||
BPF_JMP_REG(BPF_JGE, reg_loop_cnt, reg_loop_max, 5),
|
||||
/* callback call,
|
||||
* correct callback offset would be set after patching
|
||||
*/
|
||||
BPF_MOV64_REG(BPF_REG_1, reg_loop_cnt),
|
||||
BPF_MOV64_REG(BPF_REG_2, reg_loop_ctx),
|
||||
BPF_CALL_REL(0),
|
||||
/* increment loop counter */
|
||||
BPF_ALU64_IMM(BPF_ADD, reg_loop_cnt, 1),
|
||||
/* jump to loop header if callback returned 0 */
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -6),
|
||||
/* return value of bpf_loop,
|
||||
* set R0 to the number of iterations
|
||||
*/
|
||||
BPF_MOV64_REG(BPF_REG_0, reg_loop_cnt),
|
||||
/* restore original values of R6, R7, R8 */
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, r6_offset),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_10, r7_offset),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_8, BPF_REG_10, r8_offset),
|
||||
};
|
||||
|
||||
*cnt = ARRAY_SIZE(insn_buf);
|
||||
new_prog = bpf_patch_insn_data(env, position, insn_buf, *cnt);
|
||||
if (!new_prog)
|
||||
return new_prog;
|
||||
|
||||
/* callback start is known only after patching */
|
||||
callback_start = env->subprog_info[callback_subprogno].start;
|
||||
/* Note: insn_buf[12] is an offset of BPF_CALL_REL instruction */
|
||||
call_insn_offset = position + 12;
|
||||
callback_offset = callback_start - call_insn_offset - 1;
|
||||
new_prog->insnsi[call_insn_offset].imm = callback_offset;
|
||||
|
||||
return new_prog;
|
||||
}
|
||||
|
||||
static bool is_bpf_loop_call(struct bpf_insn *insn)
|
||||
{
|
||||
return insn->code == (BPF_JMP | BPF_CALL) &&
|
||||
insn->src_reg == 0 &&
|
||||
insn->imm == BPF_FUNC_loop;
|
||||
}
|
||||
|
||||
/* For all sub-programs in the program (including main) check
|
||||
* insn_aux_data to see if there are bpf_loop calls that require
|
||||
* inlining. If such calls are found the calls are replaced with a
|
||||
* sequence of instructions produced by `inline_bpf_loop` function and
|
||||
* subprog stack_depth is increased by the size of 3 registers.
|
||||
* This stack space is used to spill values of the R6, R7, R8. These
|
||||
* registers are used to store the loop bound, counter and context
|
||||
* variables.
|
||||
*/
|
||||
static int optimize_bpf_loop(struct bpf_verifier_env *env)
|
||||
{
|
||||
struct bpf_subprog_info *subprogs = env->subprog_info;
|
||||
int i, cur_subprog = 0, cnt, delta = 0;
|
||||
struct bpf_insn *insn = env->prog->insnsi;
|
||||
int insn_cnt = env->prog->len;
|
||||
u16 stack_depth = subprogs[cur_subprog].stack_depth;
|
||||
u16 stack_depth_roundup = round_up(stack_depth, 8) - stack_depth;
|
||||
u16 stack_depth_extra = 0;
|
||||
|
||||
for (i = 0; i < insn_cnt; i++, insn++) {
|
||||
struct bpf_loop_inline_state *inline_state =
|
||||
&env->insn_aux_data[i + delta].loop_inline_state;
|
||||
|
||||
if (is_bpf_loop_call(insn) && inline_state->fit_for_inline) {
|
||||
struct bpf_prog *new_prog;
|
||||
|
||||
stack_depth_extra = BPF_REG_SIZE * 3 + stack_depth_roundup;
|
||||
new_prog = inline_bpf_loop(env,
|
||||
i + delta,
|
||||
-(stack_depth + stack_depth_extra),
|
||||
inline_state->callback_subprogno,
|
||||
&cnt);
|
||||
if (!new_prog)
|
||||
return -ENOMEM;
|
||||
|
||||
delta += cnt - 1;
|
||||
env->prog = new_prog;
|
||||
insn = new_prog->insnsi + i + delta;
|
||||
}
|
||||
|
||||
if (subprogs[cur_subprog + 1].start == i + delta + 1) {
|
||||
subprogs[cur_subprog].stack_depth += stack_depth_extra;
|
||||
cur_subprog++;
|
||||
stack_depth = subprogs[cur_subprog].stack_depth;
|
||||
stack_depth_roundup = round_up(stack_depth, 8) - stack_depth;
|
||||
stack_depth_extra = 0;
|
||||
}
|
||||
}
|
||||
|
||||
env->prog->aux->stack_depth = env->subprog_info[0].stack_depth;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void free_states(struct bpf_verifier_env *env)
|
||||
{
|
||||
struct bpf_verifier_state_list *sl, *sln;
|
||||
|
@ -14715,6 +14925,7 @@ int bpf_check_attach_target(struct bpf_verifier_log *log,
|
|||
fallthrough;
|
||||
case BPF_MODIFY_RETURN:
|
||||
case BPF_LSM_MAC:
|
||||
case BPF_LSM_CGROUP:
|
||||
case BPF_TRACE_FENTRY:
|
||||
case BPF_TRACE_FEXIT:
|
||||
if (!btf_type_is_func(t)) {
|
||||
|
@ -15033,6 +15244,9 @@ skip_full_check:
|
|||
ret = check_max_stack_depth(env);
|
||||
|
||||
/* instruction rewrites happen after this point */
|
||||
if (ret == 0)
|
||||
ret = optimize_bpf_loop(env);
|
||||
|
||||
if (is_priv) {
|
||||
if (ret == 0)
|
||||
opt_hard_wire_dead_code_branches(env);
|
||||
|
|
|
@ -1343,6 +1343,7 @@ static void __uprobe_perf_func(struct trace_uprobe *tu,
|
|||
int size, esize;
|
||||
int rctx;
|
||||
|
||||
#ifdef CONFIG_BPF_EVENTS
|
||||
if (bpf_prog_array_valid(call)) {
|
||||
u32 ret;
|
||||
|
||||
|
@ -1350,6 +1351,7 @@ static void __uprobe_perf_func(struct trace_uprobe *tu,
|
|||
if (!ret)
|
||||
return;
|
||||
}
|
||||
#endif /* CONFIG_BPF_EVENTS */
|
||||
|
||||
esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
|
||||
|
||||
|
|
|
@ -14733,9 +14733,9 @@ static struct skb_segment_test skb_segment_tests[] __initconst = {
|
|||
.build_skb = build_test_skb_linear_no_head_frag,
|
||||
.features = NETIF_F_SG | NETIF_F_FRAGLIST |
|
||||
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_GSO |
|
||||
NETIF_F_LLTX_BIT | NETIF_F_GRO |
|
||||
NETIF_F_LLTX | NETIF_F_GRO |
|
||||
NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
|
||||
NETIF_F_HW_VLAN_STAG_TX_BIT
|
||||
NETIF_F_HW_VLAN_STAG_TX
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -5012,8 +5012,8 @@ static const struct bpf_func_proto bpf_get_socket_uid_proto = {
|
|||
.arg1_type = ARG_PTR_TO_CTX,
|
||||
};
|
||||
|
||||
static int _bpf_setsockopt(struct sock *sk, int level, int optname,
|
||||
char *optval, int optlen)
|
||||
static int __bpf_setsockopt(struct sock *sk, int level, int optname,
|
||||
char *optval, int optlen)
|
||||
{
|
||||
char devname[IFNAMSIZ];
|
||||
int val, valbool;
|
||||
|
@ -5024,8 +5024,6 @@ static int _bpf_setsockopt(struct sock *sk, int level, int optname,
|
|||
if (!sk_fullsock(sk))
|
||||
return -EINVAL;
|
||||
|
||||
sock_owned_by_me(sk);
|
||||
|
||||
if (level == SOL_SOCKET) {
|
||||
if (optlen != sizeof(int) && optname != SO_BINDTODEVICE)
|
||||
return -EINVAL;
|
||||
|
@ -5258,14 +5256,20 @@ static int _bpf_setsockopt(struct sock *sk, int level, int optname,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int _bpf_getsockopt(struct sock *sk, int level, int optname,
|
||||
static int _bpf_setsockopt(struct sock *sk, int level, int optname,
|
||||
char *optval, int optlen)
|
||||
{
|
||||
if (sk_fullsock(sk))
|
||||
sock_owned_by_me(sk);
|
||||
return __bpf_setsockopt(sk, level, optname, optval, optlen);
|
||||
}
|
||||
|
||||
static int __bpf_getsockopt(struct sock *sk, int level, int optname,
|
||||
char *optval, int optlen)
|
||||
{
|
||||
if (!sk_fullsock(sk))
|
||||
goto err_clear;
|
||||
|
||||
sock_owned_by_me(sk);
|
||||
|
||||
if (level == SOL_SOCKET) {
|
||||
if (optlen != sizeof(int))
|
||||
goto err_clear;
|
||||
|
@ -5360,6 +5364,14 @@ err_clear:
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int _bpf_getsockopt(struct sock *sk, int level, int optname,
|
||||
char *optval, int optlen)
|
||||
{
|
||||
if (sk_fullsock(sk))
|
||||
sock_owned_by_me(sk);
|
||||
return __bpf_getsockopt(sk, level, optname, optval, optlen);
|
||||
}
|
||||
|
||||
BPF_CALL_5(bpf_sk_setsockopt, struct sock *, sk, int, level,
|
||||
int, optname, char *, optval, int, optlen)
|
||||
{
|
||||
|
@ -5400,6 +5412,40 @@ const struct bpf_func_proto bpf_sk_getsockopt_proto = {
|
|||
.arg5_type = ARG_CONST_SIZE,
|
||||
};
|
||||
|
||||
BPF_CALL_5(bpf_unlocked_sk_setsockopt, struct sock *, sk, int, level,
|
||||
int, optname, char *, optval, int, optlen)
|
||||
{
|
||||
return __bpf_setsockopt(sk, level, optname, optval, optlen);
|
||||
}
|
||||
|
||||
const struct bpf_func_proto bpf_unlocked_sk_setsockopt_proto = {
|
||||
.func = bpf_unlocked_sk_setsockopt,
|
||||
.gpl_only = false,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
|
||||
.arg2_type = ARG_ANYTHING,
|
||||
.arg3_type = ARG_ANYTHING,
|
||||
.arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
|
||||
.arg5_type = ARG_CONST_SIZE,
|
||||
};
|
||||
|
||||
BPF_CALL_5(bpf_unlocked_sk_getsockopt, struct sock *, sk, int, level,
|
||||
int, optname, char *, optval, int, optlen)
|
||||
{
|
||||
return __bpf_getsockopt(sk, level, optname, optval, optlen);
|
||||
}
|
||||
|
||||
const struct bpf_func_proto bpf_unlocked_sk_getsockopt_proto = {
|
||||
.func = bpf_unlocked_sk_getsockopt,
|
||||
.gpl_only = false,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
|
||||
.arg2_type = ARG_ANYTHING,
|
||||
.arg3_type = ARG_ANYTHING,
|
||||
.arg4_type = ARG_PTR_TO_UNINIT_MEM,
|
||||
.arg5_type = ARG_CONST_SIZE,
|
||||
};
|
||||
|
||||
BPF_CALL_5(bpf_sock_addr_setsockopt, struct bpf_sock_addr_kern *, ctx,
|
||||
int, level, int, optname, char *, optval, int, optlen)
|
||||
{
|
||||
|
@ -6470,8 +6516,8 @@ __bpf_skc_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
|
|||
u64 flags)
|
||||
{
|
||||
struct sock *sk = NULL;
|
||||
u8 family = AF_UNSPEC;
|
||||
struct net *net;
|
||||
u8 family;
|
||||
int sdif;
|
||||
|
||||
if (len == sizeof(tuple->ipv4))
|
||||
|
@ -6481,8 +6527,7 @@ __bpf_skc_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
|
|||
else
|
||||
return NULL;
|
||||
|
||||
if (unlikely(family == AF_UNSPEC || flags ||
|
||||
!((s32)netns_id < 0 || netns_id <= S32_MAX)))
|
||||
if (unlikely(flags || !((s32)netns_id < 0 || netns_id <= S32_MAX)))
|
||||
goto out;
|
||||
|
||||
if (family == AF_INET)
|
||||
|
|
|
@ -497,23 +497,27 @@ bool sk_msg_is_readable(struct sock *sk)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(sk_msg_is_readable);
|
||||
|
||||
static struct sk_msg *sk_psock_create_ingress_msg(struct sock *sk,
|
||||
struct sk_buff *skb)
|
||||
static struct sk_msg *alloc_sk_msg(void)
|
||||
{
|
||||
struct sk_msg *msg;
|
||||
|
||||
msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_KERNEL);
|
||||
if (unlikely(!msg))
|
||||
return NULL;
|
||||
sg_init_marker(msg->sg.data, NR_MSG_FRAG_IDS);
|
||||
return msg;
|
||||
}
|
||||
|
||||
static struct sk_msg *sk_psock_create_ingress_msg(struct sock *sk,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
|
||||
return NULL;
|
||||
|
||||
if (!sk_rmem_schedule(sk, skb, skb->truesize))
|
||||
return NULL;
|
||||
|
||||
msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_KERNEL);
|
||||
if (unlikely(!msg))
|
||||
return NULL;
|
||||
|
||||
sk_msg_init(msg);
|
||||
return msg;
|
||||
return alloc_sk_msg();
|
||||
}
|
||||
|
||||
static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb,
|
||||
|
@ -590,13 +594,12 @@ static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb,
|
|||
static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb,
|
||||
u32 off, u32 len)
|
||||
{
|
||||
struct sk_msg *msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_ATOMIC);
|
||||
struct sk_msg *msg = alloc_sk_msg();
|
||||
struct sock *sk = psock->sk;
|
||||
int err;
|
||||
|
||||
if (unlikely(!msg))
|
||||
return -EAGAIN;
|
||||
sk_msg_init(msg);
|
||||
skb_set_owner_r(skb, sk);
|
||||
err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg);
|
||||
if (err < 0)
|
||||
|
@ -1165,21 +1168,14 @@ static void sk_psock_done_strp(struct sk_psock *psock)
|
|||
}
|
||||
#endif /* CONFIG_BPF_STREAM_PARSER */
|
||||
|
||||
static int sk_psock_verdict_recv(read_descriptor_t *desc, struct sk_buff *skb,
|
||||
unsigned int offset, size_t orig_len)
|
||||
static int sk_psock_verdict_recv(struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
struct sock *sk = (struct sock *)desc->arg.data;
|
||||
struct sk_psock *psock;
|
||||
struct bpf_prog *prog;
|
||||
int ret = __SK_DROP;
|
||||
int len = orig_len;
|
||||
int len = skb->len;
|
||||
|
||||
/* clone here so sk_eat_skb() in tcp_read_sock does not drop our data */
|
||||
skb = skb_clone(skb, GFP_ATOMIC);
|
||||
if (!skb) {
|
||||
desc->error = -ENOMEM;
|
||||
return 0;
|
||||
}
|
||||
skb_get(skb);
|
||||
|
||||
rcu_read_lock();
|
||||
psock = sk_psock(sk);
|
||||
|
@ -1192,12 +1188,10 @@ static int sk_psock_verdict_recv(read_descriptor_t *desc, struct sk_buff *skb,
|
|||
if (!prog)
|
||||
prog = READ_ONCE(psock->progs.skb_verdict);
|
||||
if (likely(prog)) {
|
||||
skb->sk = sk;
|
||||
skb_dst_drop(skb);
|
||||
skb_bpf_redirect_clear(skb);
|
||||
ret = bpf_prog_run_pin_on_cpu(prog, skb);
|
||||
ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
|
||||
skb->sk = NULL;
|
||||
}
|
||||
if (sk_psock_verdict_apply(psock, skb, ret) < 0)
|
||||
len = 0;
|
||||
|
@ -1209,16 +1203,10 @@ out:
|
|||
static void sk_psock_verdict_data_ready(struct sock *sk)
|
||||
{
|
||||
struct socket *sock = sk->sk_socket;
|
||||
read_descriptor_t desc;
|
||||
|
||||
if (unlikely(!sock || !sock->ops || !sock->ops->read_sock))
|
||||
if (unlikely(!sock || !sock->ops || !sock->ops->read_skb))
|
||||
return;
|
||||
|
||||
desc.arg.data = sk;
|
||||
desc.error = 0;
|
||||
desc.count = 1;
|
||||
|
||||
sock->ops->read_sock(sk, &desc, sk_psock_verdict_recv);
|
||||
sock->ops->read_skb(sk, sk_psock_verdict_recv);
|
||||
}
|
||||
|
||||
void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock)
|
||||
|
|
|
@ -1578,7 +1578,7 @@ void sock_map_destroy(struct sock *sk)
|
|||
saved_destroy = psock->saved_destroy;
|
||||
sock_map_remove_links(sk, psock);
|
||||
rcu_read_unlock();
|
||||
sk_psock_stop(psock, true);
|
||||
sk_psock_stop(psock, false);
|
||||
sk_psock_put(sk, psock);
|
||||
saved_destroy(sk);
|
||||
}
|
||||
|
|
|
@ -1040,6 +1040,7 @@ const struct proto_ops inet_stream_ops = {
|
|||
.sendpage = inet_sendpage,
|
||||
.splice_read = tcp_splice_read,
|
||||
.read_sock = tcp_read_sock,
|
||||
.read_skb = tcp_read_skb,
|
||||
.sendmsg_locked = tcp_sendmsg_locked,
|
||||
.sendpage_locked = tcp_sendpage_locked,
|
||||
.peek_len = tcp_peek_len,
|
||||
|
@ -1067,7 +1068,7 @@ const struct proto_ops inet_dgram_ops = {
|
|||
.setsockopt = sock_common_setsockopt,
|
||||
.getsockopt = sock_common_getsockopt,
|
||||
.sendmsg = inet_sendmsg,
|
||||
.read_sock = udp_read_sock,
|
||||
.read_skb = udp_read_skb,
|
||||
.recvmsg = inet_recvmsg,
|
||||
.mmap = sock_no_mmap,
|
||||
.sendpage = inet_sendpage,
|
||||
|
|
|
@ -14,18 +14,6 @@
|
|||
/* "extern" is to avoid sparse warning. It is only used in bpf_struct_ops.c. */
|
||||
extern struct bpf_struct_ops bpf_tcp_congestion_ops;
|
||||
|
||||
static u32 optional_ops[] = {
|
||||
offsetof(struct tcp_congestion_ops, init),
|
||||
offsetof(struct tcp_congestion_ops, release),
|
||||
offsetof(struct tcp_congestion_ops, set_state),
|
||||
offsetof(struct tcp_congestion_ops, cwnd_event),
|
||||
offsetof(struct tcp_congestion_ops, in_ack_event),
|
||||
offsetof(struct tcp_congestion_ops, pkts_acked),
|
||||
offsetof(struct tcp_congestion_ops, min_tso_segs),
|
||||
offsetof(struct tcp_congestion_ops, sndbuf_expand),
|
||||
offsetof(struct tcp_congestion_ops, cong_control),
|
||||
};
|
||||
|
||||
static u32 unsupported_ops[] = {
|
||||
offsetof(struct tcp_congestion_ops, get_info),
|
||||
};
|
||||
|
@ -51,18 +39,6 @@ static int bpf_tcp_ca_init(struct btf *btf)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static bool is_optional(u32 member_offset)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(optional_ops); i++) {
|
||||
if (member_offset == optional_ops[i])
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool is_unsupported(u32 member_offset)
|
||||
{
|
||||
unsigned int i;
|
||||
|
@ -111,6 +87,12 @@ static int bpf_tcp_ca_btf_struct_access(struct bpf_verifier_log *log,
|
|||
}
|
||||
|
||||
switch (off) {
|
||||
case offsetof(struct sock, sk_pacing_rate):
|
||||
end = offsetofend(struct sock, sk_pacing_rate);
|
||||
break;
|
||||
case offsetof(struct sock, sk_pacing_status):
|
||||
end = offsetofend(struct sock, sk_pacing_status);
|
||||
break;
|
||||
case bpf_ctx_range(struct inet_connection_sock, icsk_ca_priv):
|
||||
end = offsetofend(struct inet_connection_sock, icsk_ca_priv);
|
||||
break;
|
||||
|
@ -240,7 +222,6 @@ static int bpf_tcp_ca_init_member(const struct btf_type *t,
|
|||
{
|
||||
const struct tcp_congestion_ops *utcp_ca;
|
||||
struct tcp_congestion_ops *tcp_ca;
|
||||
int prog_fd;
|
||||
u32 moff;
|
||||
|
||||
utcp_ca = (const struct tcp_congestion_ops *)udata;
|
||||
|
@ -262,14 +243,6 @@ static int bpf_tcp_ca_init_member(const struct btf_type *t,
|
|||
return 1;
|
||||
}
|
||||
|
||||
if (!btf_type_resolve_func_ptr(btf_vmlinux, member->type, NULL))
|
||||
return 0;
|
||||
|
||||
/* Ensure bpf_prog is provided for compulsory func ptr */
|
||||
prog_fd = (int)(*(unsigned long *)(udata + moff));
|
||||
if (!prog_fd && !is_optional(moff) && !is_unsupported(moff))
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -1734,6 +1734,50 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
|
|||
}
|
||||
EXPORT_SYMBOL(tcp_read_sock);
|
||||
|
||||
int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
u32 seq = tp->copied_seq;
|
||||
struct sk_buff *skb;
|
||||
int copied = 0;
|
||||
u32 offset;
|
||||
|
||||
if (sk->sk_state == TCP_LISTEN)
|
||||
return -ENOTCONN;
|
||||
|
||||
while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
|
||||
int used;
|
||||
|
||||
__skb_unlink(skb, &sk->sk_receive_queue);
|
||||
used = recv_actor(sk, skb);
|
||||
if (used <= 0) {
|
||||
if (!copied)
|
||||
copied = used;
|
||||
break;
|
||||
}
|
||||
seq += used;
|
||||
copied += used;
|
||||
|
||||
if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) {
|
||||
consume_skb(skb);
|
||||
++seq;
|
||||
break;
|
||||
}
|
||||
consume_skb(skb);
|
||||
break;
|
||||
}
|
||||
WRITE_ONCE(tp->copied_seq, seq);
|
||||
|
||||
tcp_rcv_space_adjust(sk);
|
||||
|
||||
/* Clean up data we have read: This will do ACK frames. */
|
||||
if (copied > 0)
|
||||
tcp_cleanup_rbuf(sk, copied);
|
||||
|
||||
return copied;
|
||||
}
|
||||
EXPORT_SYMBOL(tcp_read_skb);
|
||||
|
||||
int tcp_peek_len(struct socket *sock)
|
||||
{
|
||||
return tcp_inq(sock->sk);
|
||||
|
|
|
@ -1797,8 +1797,7 @@ busy_check:
|
|||
}
|
||||
EXPORT_SYMBOL(__skb_recv_udp);
|
||||
|
||||
int udp_read_sock(struct sock *sk, read_descriptor_t *desc,
|
||||
sk_read_actor_t recv_actor)
|
||||
int udp_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
|
||||
{
|
||||
int copied = 0;
|
||||
|
||||
|
@ -1820,7 +1819,8 @@ int udp_read_sock(struct sock *sk, read_descriptor_t *desc,
|
|||
continue;
|
||||
}
|
||||
|
||||
used = recv_actor(desc, skb, 0, skb->len);
|
||||
WARN_ON(!skb_set_owner_sk_safe(skb, sk));
|
||||
used = recv_actor(sk, skb);
|
||||
if (used <= 0) {
|
||||
if (!copied)
|
||||
copied = used;
|
||||
|
@ -1831,13 +1831,12 @@ int udp_read_sock(struct sock *sk, read_descriptor_t *desc,
|
|||
}
|
||||
|
||||
kfree_skb(skb);
|
||||
if (!desc->count)
|
||||
break;
|
||||
break;
|
||||
}
|
||||
|
||||
return copied;
|
||||
}
|
||||
EXPORT_SYMBOL(udp_read_sock);
|
||||
EXPORT_SYMBOL(udp_read_skb);
|
||||
|
||||
/*
|
||||
* This should be easy, if there is something there we
|
||||
|
|
|
@ -702,6 +702,7 @@ const struct proto_ops inet6_stream_ops = {
|
|||
.sendpage_locked = tcp_sendpage_locked,
|
||||
.splice_read = tcp_splice_read,
|
||||
.read_sock = tcp_read_sock,
|
||||
.read_skb = tcp_read_skb,
|
||||
.peek_len = tcp_peek_len,
|
||||
#ifdef CONFIG_COMPAT
|
||||
.compat_ioctl = inet6_compat_ioctl,
|
||||
|
@ -727,7 +728,7 @@ const struct proto_ops inet6_dgram_ops = {
|
|||
.getsockopt = sock_common_getsockopt, /* ok */
|
||||
.sendmsg = inet6_sendmsg, /* retpoline's sake */
|
||||
.recvmsg = inet6_recvmsg, /* retpoline's sake */
|
||||
.read_sock = udp_read_sock,
|
||||
.read_skb = udp_read_skb,
|
||||
.mmap = sock_no_mmap,
|
||||
.sendpage = sock_no_sendpage,
|
||||
.set_peek_off = sk_set_peek_off,
|
||||
|
|
|
@ -763,10 +763,8 @@ static ssize_t unix_stream_splice_read(struct socket *, loff_t *ppos,
|
|||
unsigned int flags);
|
||||
static int unix_dgram_sendmsg(struct socket *, struct msghdr *, size_t);
|
||||
static int unix_dgram_recvmsg(struct socket *, struct msghdr *, size_t, int);
|
||||
static int unix_read_sock(struct sock *sk, read_descriptor_t *desc,
|
||||
sk_read_actor_t recv_actor);
|
||||
static int unix_stream_read_sock(struct sock *sk, read_descriptor_t *desc,
|
||||
sk_read_actor_t recv_actor);
|
||||
static int unix_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
|
||||
static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
|
||||
static int unix_dgram_connect(struct socket *, struct sockaddr *,
|
||||
int, int);
|
||||
static int unix_seqpacket_sendmsg(struct socket *, struct msghdr *, size_t);
|
||||
|
@ -820,7 +818,7 @@ static const struct proto_ops unix_stream_ops = {
|
|||
.shutdown = unix_shutdown,
|
||||
.sendmsg = unix_stream_sendmsg,
|
||||
.recvmsg = unix_stream_recvmsg,
|
||||
.read_sock = unix_stream_read_sock,
|
||||
.read_skb = unix_stream_read_skb,
|
||||
.mmap = sock_no_mmap,
|
||||
.sendpage = unix_stream_sendpage,
|
||||
.splice_read = unix_stream_splice_read,
|
||||
|
@ -845,7 +843,7 @@ static const struct proto_ops unix_dgram_ops = {
|
|||
.listen = sock_no_listen,
|
||||
.shutdown = unix_shutdown,
|
||||
.sendmsg = unix_dgram_sendmsg,
|
||||
.read_sock = unix_read_sock,
|
||||
.read_skb = unix_read_skb,
|
||||
.recvmsg = unix_dgram_recvmsg,
|
||||
.mmap = sock_no_mmap,
|
||||
.sendpage = sock_no_sendpage,
|
||||
|
@ -2506,8 +2504,7 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg, size_t si
|
|||
return __unix_dgram_recvmsg(sk, msg, size, flags);
|
||||
}
|
||||
|
||||
static int unix_read_sock(struct sock *sk, read_descriptor_t *desc,
|
||||
sk_read_actor_t recv_actor)
|
||||
static int unix_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
|
||||
{
|
||||
int copied = 0;
|
||||
|
||||
|
@ -2522,7 +2519,7 @@ static int unix_read_sock(struct sock *sk, read_descriptor_t *desc,
|
|||
if (!skb)
|
||||
return err;
|
||||
|
||||
used = recv_actor(desc, skb, 0, skb->len);
|
||||
used = recv_actor(sk, skb);
|
||||
if (used <= 0) {
|
||||
if (!copied)
|
||||
copied = used;
|
||||
|
@ -2533,8 +2530,7 @@ static int unix_read_sock(struct sock *sk, read_descriptor_t *desc,
|
|||
}
|
||||
|
||||
kfree_skb(skb);
|
||||
if (!desc->count)
|
||||
break;
|
||||
break;
|
||||
}
|
||||
|
||||
return copied;
|
||||
|
@ -2669,13 +2665,12 @@ static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk,
|
|||
}
|
||||
#endif
|
||||
|
||||
static int unix_stream_read_sock(struct sock *sk, read_descriptor_t *desc,
|
||||
sk_read_actor_t recv_actor)
|
||||
static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
|
||||
{
|
||||
if (unlikely(sk->sk_state != TCP_ESTABLISHED))
|
||||
return -ENOTCONN;
|
||||
|
||||
return unix_read_sock(sk, desc, recv_actor);
|
||||
return unix_read_skb(sk, recv_actor);
|
||||
}
|
||||
|
||||
static int unix_stream_read_generic(struct unix_stream_read_state *state,
|
||||
|
|
|
@ -45,9 +45,6 @@ tprogs-y += xdp_rxq_info
|
|||
tprogs-y += syscall_tp
|
||||
tprogs-y += cpustat
|
||||
tprogs-y += xdp_adjust_tail
|
||||
tprogs-y += xdpsock
|
||||
tprogs-y += xdpsock_ctrl_proc
|
||||
tprogs-y += xsk_fwd
|
||||
tprogs-y += xdp_fwd
|
||||
tprogs-y += task_fd_query
|
||||
tprogs-y += xdp_sample_pkts
|
||||
|
@ -109,9 +106,6 @@ xdp_rxq_info-objs := xdp_rxq_info_user.o
|
|||
syscall_tp-objs := syscall_tp_user.o
|
||||
cpustat-objs := cpustat_user.o
|
||||
xdp_adjust_tail-objs := xdp_adjust_tail_user.o
|
||||
xdpsock-objs := xdpsock_user.o
|
||||
xdpsock_ctrl_proc-objs := xdpsock_ctrl_proc.o
|
||||
xsk_fwd-objs := xsk_fwd.o
|
||||
xdp_fwd-objs := xdp_fwd_user.o
|
||||
task_fd_query-objs := task_fd_query_user.o $(TRACE_HELPERS)
|
||||
xdp_sample_pkts-objs := xdp_sample_pkts_user.o
|
||||
|
@ -179,7 +173,6 @@ always-y += xdp_sample_pkts_kern.o
|
|||
always-y += ibumad_kern.o
|
||||
always-y += hbm_out_kern.o
|
||||
always-y += hbm_edt_kern.o
|
||||
always-y += xdpsock_kern.o
|
||||
|
||||
ifeq ($(ARCH), arm)
|
||||
# Strip all except -D__LINUX_ARM_ARCH__ option needed to handle linux
|
||||
|
@ -224,8 +217,6 @@ TPROGLDLIBS_tracex4 += -lrt
|
|||
TPROGLDLIBS_trace_output += -lrt
|
||||
TPROGLDLIBS_map_perf_test += -lrt
|
||||
TPROGLDLIBS_test_overhead += -lrt
|
||||
TPROGLDLIBS_xdpsock += -pthread -lcap
|
||||
TPROGLDLIBS_xsk_fwd += -pthread
|
||||
|
||||
# Allows pointing LLC/CLANG to a LLVM backend with bpf support, redefine on cmdline:
|
||||
# make M=samples/bpf LLC=~/git/llvm-project/llvm/build/bin/llc CLANG=~/git/llvm-project/llvm/build/bin/clang
|
||||
|
|
|
@ -39,11 +39,13 @@ static int parse_ipv6(void *data, u64 nh_off, void *data_end)
|
|||
return ip6h->nexthdr;
|
||||
}
|
||||
|
||||
SEC("xdp1")
|
||||
#define XDPBUFSIZE 64
|
||||
SEC("xdp.frags")
|
||||
int xdp_prog1(struct xdp_md *ctx)
|
||||
{
|
||||
void *data_end = (void *)(long)ctx->data_end;
|
||||
void *data = (void *)(long)ctx->data;
|
||||
__u8 pkt[XDPBUFSIZE] = {};
|
||||
void *data_end = &pkt[XDPBUFSIZE-1];
|
||||
void *data = pkt;
|
||||
struct ethhdr *eth = data;
|
||||
int rc = XDP_DROP;
|
||||
long *value;
|
||||
|
@ -51,6 +53,9 @@ int xdp_prog1(struct xdp_md *ctx)
|
|||
u64 nh_off;
|
||||
u32 ipproto;
|
||||
|
||||
if (bpf_xdp_load_bytes(ctx, 0, pkt, sizeof(pkt)))
|
||||
return rc;
|
||||
|
||||
nh_off = sizeof(*eth);
|
||||
if (data + nh_off > data_end)
|
||||
return rc;
|
||||
|
|
|
@ -55,11 +55,13 @@ static int parse_ipv6(void *data, u64 nh_off, void *data_end)
|
|||
return ip6h->nexthdr;
|
||||
}
|
||||
|
||||
SEC("xdp1")
|
||||
#define XDPBUFSIZE 64
|
||||
SEC("xdp.frags")
|
||||
int xdp_prog1(struct xdp_md *ctx)
|
||||
{
|
||||
void *data_end = (void *)(long)ctx->data_end;
|
||||
void *data = (void *)(long)ctx->data;
|
||||
__u8 pkt[XDPBUFSIZE] = {};
|
||||
void *data_end = &pkt[XDPBUFSIZE-1];
|
||||
void *data = pkt;
|
||||
struct ethhdr *eth = data;
|
||||
int rc = XDP_DROP;
|
||||
long *value;
|
||||
|
@ -67,6 +69,9 @@ int xdp_prog1(struct xdp_md *ctx)
|
|||
u64 nh_off;
|
||||
u32 ipproto;
|
||||
|
||||
if (bpf_xdp_load_bytes(ctx, 0, pkt, sizeof(pkt)))
|
||||
return rc;
|
||||
|
||||
nh_off = sizeof(*eth);
|
||||
if (data + nh_off > data_end)
|
||||
return rc;
|
||||
|
|
|
@ -212,7 +212,7 @@ static __always_inline int handle_ipv6(struct xdp_md *xdp)
|
|||
return XDP_TX;
|
||||
}
|
||||
|
||||
SEC("xdp_tx_iptunnel")
|
||||
SEC("xdp.frags")
|
||||
int _xdp_tx_iptunnel(struct xdp_md *xdp)
|
||||
{
|
||||
void *data_end = (void *)(long)xdp->data_end;
|
||||
|
|
|
@ -1,19 +0,0 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0
|
||||
*
|
||||
* Copyright(c) 2019 Intel Corporation.
|
||||
*/
|
||||
|
||||
#ifndef XDPSOCK_H_
|
||||
#define XDPSOCK_H_
|
||||
|
||||
#define MAX_SOCKS 4
|
||||
|
||||
#define SOCKET_NAME "sock_cal_bpf_fd"
|
||||
#define MAX_NUM_OF_CLIENTS 10
|
||||
|
||||
#define CLOSE_CONN 1
|
||||
|
||||
typedef __u64 u64;
|
||||
typedef __u32 u32;
|
||||
|
||||
#endif /* XDPSOCK_H */
|
|
@ -1,190 +0,0 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright(c) 2017 - 2018 Intel Corporation. */
|
||||
|
||||
#include <errno.h>
|
||||
#include <getopt.h>
|
||||
#include <libgen.h>
|
||||
#include <net/if.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <sys/socket.h>
|
||||
#include <sys/un.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include <bpf/bpf.h>
|
||||
#include <bpf/xsk.h>
|
||||
#include "xdpsock.h"
|
||||
|
||||
/* libbpf APIs for AF_XDP are deprecated starting from v0.7 */
|
||||
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
|
||||
|
||||
static const char *opt_if = "";
|
||||
|
||||
static struct option long_options[] = {
|
||||
{"interface", required_argument, 0, 'i'},
|
||||
{0, 0, 0, 0}
|
||||
};
|
||||
|
||||
static void usage(const char *prog)
|
||||
{
|
||||
const char *str =
|
||||
" Usage: %s [OPTIONS]\n"
|
||||
" Options:\n"
|
||||
" -i, --interface=n Run on interface n\n"
|
||||
"\n";
|
||||
fprintf(stderr, "%s\n", str);
|
||||
|
||||
exit(0);
|
||||
}
|
||||
|
||||
static void parse_command_line(int argc, char **argv)
|
||||
{
|
||||
int option_index, c;
|
||||
|
||||
opterr = 0;
|
||||
|
||||
for (;;) {
|
||||
c = getopt_long(argc, argv, "i:",
|
||||
long_options, &option_index);
|
||||
if (c == -1)
|
||||
break;
|
||||
|
||||
switch (c) {
|
||||
case 'i':
|
||||
opt_if = optarg;
|
||||
break;
|
||||
default:
|
||||
usage(basename(argv[0]));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int send_xsks_map_fd(int sock, int fd)
|
||||
{
|
||||
char cmsgbuf[CMSG_SPACE(sizeof(int))];
|
||||
struct msghdr msg;
|
||||
struct iovec iov;
|
||||
int value = 0;
|
||||
|
||||
if (fd == -1) {
|
||||
fprintf(stderr, "Incorrect fd = %d\n", fd);
|
||||
return -1;
|
||||
}
|
||||
iov.iov_base = &value;
|
||||
iov.iov_len = sizeof(int);
|
||||
|
||||
msg.msg_name = NULL;
|
||||
msg.msg_namelen = 0;
|
||||
msg.msg_iov = &iov;
|
||||
msg.msg_iovlen = 1;
|
||||
msg.msg_flags = 0;
|
||||
msg.msg_control = cmsgbuf;
|
||||
msg.msg_controllen = CMSG_LEN(sizeof(int));
|
||||
|
||||
struct cmsghdr *cmsg = CMSG_FIRSTHDR(&msg);
|
||||
|
||||
cmsg->cmsg_level = SOL_SOCKET;
|
||||
cmsg->cmsg_type = SCM_RIGHTS;
|
||||
cmsg->cmsg_len = CMSG_LEN(sizeof(int));
|
||||
|
||||
*(int *)CMSG_DATA(cmsg) = fd;
|
||||
int ret = sendmsg(sock, &msg, 0);
|
||||
|
||||
if (ret == -1) {
|
||||
fprintf(stderr, "Sendmsg failed with %s", strerror(errno));
|
||||
return -errno;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int
|
||||
main(int argc, char **argv)
|
||||
{
|
||||
struct sockaddr_un server;
|
||||
int listening = 1;
|
||||
int rval, msgsock;
|
||||
int ifindex = 0;
|
||||
int flag = 1;
|
||||
int cmd = 0;
|
||||
int sock;
|
||||
int err;
|
||||
int xsks_map_fd;
|
||||
|
||||
parse_command_line(argc, argv);
|
||||
|
||||
ifindex = if_nametoindex(opt_if);
|
||||
if (ifindex == 0) {
|
||||
fprintf(stderr, "Unable to get ifindex for Interface %s. Reason:%s",
|
||||
opt_if, strerror(errno));
|
||||
return -errno;
|
||||
}
|
||||
|
||||
sock = socket(AF_UNIX, SOCK_STREAM, 0);
|
||||
if (sock < 0) {
|
||||
fprintf(stderr, "Opening socket stream failed: %s", strerror(errno));
|
||||
return -errno;
|
||||
}
|
||||
|
||||
server.sun_family = AF_UNIX;
|
||||
strcpy(server.sun_path, SOCKET_NAME);
|
||||
|
||||
setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, &flag, sizeof(int));
|
||||
|
||||
if (bind(sock, (struct sockaddr *)&server, sizeof(struct sockaddr_un))) {
|
||||
fprintf(stderr, "Binding to socket stream failed: %s", strerror(errno));
|
||||
return -errno;
|
||||
}
|
||||
|
||||
listen(sock, MAX_NUM_OF_CLIENTS);
|
||||
|
||||
err = xsk_setup_xdp_prog(ifindex, &xsks_map_fd);
|
||||
if (err) {
|
||||
fprintf(stderr, "Setup of xdp program failed\n");
|
||||
goto close_sock;
|
||||
}
|
||||
|
||||
while (listening) {
|
||||
msgsock = accept(sock, 0, 0);
|
||||
if (msgsock == -1) {
|
||||
fprintf(stderr, "Error accepting connection: %s", strerror(errno));
|
||||
err = -errno;
|
||||
goto close_sock;
|
||||
}
|
||||
err = send_xsks_map_fd(msgsock, xsks_map_fd);
|
||||
if (err <= 0) {
|
||||
fprintf(stderr, "Error %d sending xsks_map_fd\n", err);
|
||||
goto cleanup;
|
||||
}
|
||||
do {
|
||||
rval = read(msgsock, &cmd, sizeof(int));
|
||||
if (rval < 0) {
|
||||
fprintf(stderr, "Error reading stream message");
|
||||
} else {
|
||||
if (cmd != CLOSE_CONN)
|
||||
fprintf(stderr, "Recv unknown cmd = %d\n", cmd);
|
||||
listening = 0;
|
||||
break;
|
||||
}
|
||||
} while (rval > 0);
|
||||
}
|
||||
close(msgsock);
|
||||
close(sock);
|
||||
unlink(SOCKET_NAME);
|
||||
|
||||
/* Unset fd for given ifindex */
|
||||
err = bpf_xdp_detach(ifindex, 0, NULL);
|
||||
if (err) {
|
||||
fprintf(stderr, "Error when unsetting bpf prog_fd for ifindex(%d)\n", ifindex);
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
cleanup:
|
||||
close(msgsock);
|
||||
close_sock:
|
||||
close(sock);
|
||||
unlink(SOCKET_NAME);
|
||||
return err;
|
||||
}
|
|
@ -1,24 +0,0 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <linux/bpf.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include "xdpsock.h"
|
||||
|
||||
/* This XDP program is only needed for the XDP_SHARED_UMEM mode.
|
||||
* If you do not use this mode, libbpf can supply an XDP program for you.
|
||||
*/
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_XSKMAP);
|
||||
__uint(max_entries, MAX_SOCKS);
|
||||
__uint(key_size, sizeof(int));
|
||||
__uint(value_size, sizeof(int));
|
||||
} xsks_map SEC(".maps");
|
||||
|
||||
static unsigned int rr;
|
||||
|
||||
SEC("xdp_sock") int xdp_sock_prog(struct xdp_md *ctx)
|
||||
{
|
||||
rr = (rr + 1) & (MAX_SOCKS - 1);
|
||||
|
||||
return bpf_redirect_map(&xsks_map, rr, XDP_DROP);
|
||||
}
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -24,9 +24,11 @@ FEATURE COMMANDS
|
|||
================
|
||||
|
||||
| **bpftool** **feature probe** [*COMPONENT*] [**full**] [**unprivileged**] [**macros** [**prefix** *PREFIX*]]
|
||||
| **bpftool** **feature list_builtins** *GROUP*
|
||||
| **bpftool** **feature help**
|
||||
|
|
||||
| *COMPONENT* := { **kernel** | **dev** *NAME* }
|
||||
| *GROUP* := { **prog_types** | **map_types** | **attach_types** | **link_types** | **helpers** }
|
||||
|
||||
DESCRIPTION
|
||||
===========
|
||||
|
@ -70,6 +72,16 @@ DESCRIPTION
|
|||
The keywords **full**, **macros** and **prefix** have the
|
||||
same role as when probing the kernel.
|
||||
|
||||
**bpftool feature list_builtins** *GROUP*
|
||||
List items known to bpftool. These can be BPF program types
|
||||
(**prog_types**), BPF map types (**map_types**), attach types
|
||||
(**attach_types**), link types (**link_types**), or BPF helper
|
||||
functions (**helpers**). The command does not probe the system, but
|
||||
simply lists the elements that bpftool knows from compilation time,
|
||||
as provided from libbpf (for all object types) or from the BPF UAPI
|
||||
header (list of helpers). This can be used in scripts to iterate over
|
||||
BPF types or helpers.
|
||||
|
||||
**bpftool feature help**
|
||||
Print short help message.
|
||||
|
||||
|
|
|
@ -93,10 +93,8 @@ INSTALL ?= install
|
|||
RM ?= rm -f
|
||||
|
||||
FEATURE_USER = .bpftool
|
||||
FEATURE_TESTS = libbfd disassembler-four-args zlib libcap \
|
||||
clang-bpf-co-re
|
||||
FEATURE_DISPLAY = libbfd disassembler-four-args zlib libcap \
|
||||
clang-bpf-co-re
|
||||
FEATURE_TESTS = libbfd disassembler-four-args libcap clang-bpf-co-re
|
||||
FEATURE_DISPLAY = libbfd disassembler-four-args libcap clang-bpf-co-re
|
||||
|
||||
check_feat := 1
|
||||
NON_CHECK_FEAT_TARGETS := clean uninstall doc doc-clean doc-install doc-uninstall
|
||||
|
@ -204,11 +202,6 @@ $(BOOTSTRAP_OUTPUT)disasm.o: $(srctree)/kernel/bpf/disasm.c
|
|||
$(OUTPUT)disasm.o: $(srctree)/kernel/bpf/disasm.c
|
||||
$(QUIET_CC)$(CC) $(CFLAGS) -c -MMD $< -o $@
|
||||
|
||||
$(OUTPUT)feature.o:
|
||||
ifneq ($(feature-zlib), 1)
|
||||
$(error "No zlib found")
|
||||
endif
|
||||
|
||||
$(BPFTOOL_BOOTSTRAP): $(BOOTSTRAP_OBJS) $(LIBBPF_BOOTSTRAP)
|
||||
$(QUIET_LINK)$(HOSTCC) $(HOST_CFLAGS) $(LDFLAGS) $(BOOTSTRAP_OBJS) $(LIBS_BOOTSTRAP) -o $@
|
||||
|
||||
|
|
|
@ -703,15 +703,8 @@ _bpftool()
|
|||
return 0
|
||||
;;
|
||||
type)
|
||||
local BPFTOOL_MAP_CREATE_TYPES='hash array \
|
||||
prog_array perf_event_array percpu_hash \
|
||||
percpu_array stack_trace cgroup_array lru_hash \
|
||||
lru_percpu_hash lpm_trie array_of_maps \
|
||||
hash_of_maps devmap devmap_hash sockmap cpumap \
|
||||
xskmap sockhash cgroup_storage reuseport_sockarray \
|
||||
percpu_cgroup_storage queue stack sk_storage \
|
||||
struct_ops ringbuf inode_storage task_storage \
|
||||
bloom_filter'
|
||||
local BPFTOOL_MAP_CREATE_TYPES="$(bpftool feature list_builtins map_types 2>/dev/null | \
|
||||
grep -v '^unspec$')"
|
||||
COMPREPLY=( $( compgen -W "$BPFTOOL_MAP_CREATE_TYPES" -- "$cur" ) )
|
||||
return 0
|
||||
;;
|
||||
|
@ -1039,14 +1032,8 @@ _bpftool()
|
|||
return 0
|
||||
;;
|
||||
attach|detach)
|
||||
local BPFTOOL_CGROUP_ATTACH_TYPES='cgroup_inet_ingress cgroup_inet_egress \
|
||||
cgroup_inet_sock_create cgroup_sock_ops cgroup_device cgroup_inet4_bind \
|
||||
cgroup_inet6_bind cgroup_inet4_post_bind cgroup_inet6_post_bind \
|
||||
cgroup_inet4_connect cgroup_inet6_connect cgroup_inet4_getpeername \
|
||||
cgroup_inet6_getpeername cgroup_inet4_getsockname cgroup_inet6_getsockname \
|
||||
cgroup_udp4_sendmsg cgroup_udp6_sendmsg cgroup_udp4_recvmsg \
|
||||
cgroup_udp6_recvmsg cgroup_sysctl cgroup_getsockopt cgroup_setsockopt \
|
||||
cgroup_inet_sock_release'
|
||||
local BPFTOOL_CGROUP_ATTACH_TYPES="$(bpftool feature list_builtins attach_types 2>/dev/null | \
|
||||
grep '^cgroup_')"
|
||||
local ATTACH_FLAGS='multi override'
|
||||
local PROG_TYPE='id pinned tag name'
|
||||
# Check for $prev = $command first
|
||||
|
@ -1175,9 +1162,14 @@ _bpftool()
|
|||
_bpftool_once_attr 'full unprivileged'
|
||||
return 0
|
||||
;;
|
||||
list_builtins)
|
||||
[[ $prev != "$command" ]] && return 0
|
||||
COMPREPLY=( $( compgen -W 'prog_types map_types \
|
||||
attach_types link_types helpers' -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
[[ $prev == $object ]] && \
|
||||
COMPREPLY=( $( compgen -W 'help probe' -- "$cur" ) )
|
||||
COMPREPLY=( $( compgen -W 'help list_builtins probe' -- "$cur" ) )
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
#include <unistd.h>
|
||||
|
||||
#include <bpf/bpf.h>
|
||||
#include <bpf/btf.h>
|
||||
|
||||
#include "main.h"
|
||||
|
||||
|
@ -36,6 +37,8 @@
|
|||
" cgroup_inet_sock_release }"
|
||||
|
||||
static unsigned int query_flags;
|
||||
static struct btf *btf_vmlinux;
|
||||
static __u32 btf_vmlinux_id;
|
||||
|
||||
static enum bpf_attach_type parse_attach_type(const char *str)
|
||||
{
|
||||
|
@ -64,11 +67,38 @@ static enum bpf_attach_type parse_attach_type(const char *str)
|
|||
return __MAX_BPF_ATTACH_TYPE;
|
||||
}
|
||||
|
||||
static void guess_vmlinux_btf_id(__u32 attach_btf_obj_id)
|
||||
{
|
||||
struct bpf_btf_info btf_info = {};
|
||||
__u32 btf_len = sizeof(btf_info);
|
||||
char name[16] = {};
|
||||
int err;
|
||||
int fd;
|
||||
|
||||
btf_info.name = ptr_to_u64(name);
|
||||
btf_info.name_len = sizeof(name);
|
||||
|
||||
fd = bpf_btf_get_fd_by_id(attach_btf_obj_id);
|
||||
if (fd < 0)
|
||||
return;
|
||||
|
||||
err = bpf_obj_get_info_by_fd(fd, &btf_info, &btf_len);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
if (btf_info.kernel_btf && strncmp(name, "vmlinux", sizeof(name)) == 0)
|
||||
btf_vmlinux_id = btf_info.id;
|
||||
|
||||
out:
|
||||
close(fd);
|
||||
}
|
||||
|
||||
static int show_bpf_prog(int id, enum bpf_attach_type attach_type,
|
||||
const char *attach_flags_str,
|
||||
int level)
|
||||
{
|
||||
char prog_name[MAX_PROG_FULL_NAME];
|
||||
const char *attach_btf_name = NULL;
|
||||
struct bpf_prog_info info = {};
|
||||
const char *attach_type_str;
|
||||
__u32 info_len = sizeof(info);
|
||||
|
@ -84,6 +114,20 @@ static int show_bpf_prog(int id, enum bpf_attach_type attach_type,
|
|||
}
|
||||
|
||||
attach_type_str = libbpf_bpf_attach_type_str(attach_type);
|
||||
|
||||
if (btf_vmlinux) {
|
||||
if (!btf_vmlinux_id)
|
||||
guess_vmlinux_btf_id(info.attach_btf_obj_id);
|
||||
|
||||
if (btf_vmlinux_id == info.attach_btf_obj_id &&
|
||||
info.attach_btf_id < btf__type_cnt(btf_vmlinux)) {
|
||||
const struct btf_type *t =
|
||||
btf__type_by_id(btf_vmlinux, info.attach_btf_id);
|
||||
attach_btf_name =
|
||||
btf__name_by_offset(btf_vmlinux, t->name_off);
|
||||
}
|
||||
}
|
||||
|
||||
get_prog_full_name(&info, prog_fd, prog_name, sizeof(prog_name));
|
||||
if (json_output) {
|
||||
jsonw_start_object(json_wtr);
|
||||
|
@ -95,6 +139,10 @@ static int show_bpf_prog(int id, enum bpf_attach_type attach_type,
|
|||
jsonw_string_field(json_wtr, "attach_flags",
|
||||
attach_flags_str);
|
||||
jsonw_string_field(json_wtr, "name", prog_name);
|
||||
if (attach_btf_name)
|
||||
jsonw_string_field(json_wtr, "attach_btf_name", attach_btf_name);
|
||||
jsonw_uint_field(json_wtr, "attach_btf_obj_id", info.attach_btf_obj_id);
|
||||
jsonw_uint_field(json_wtr, "attach_btf_id", info.attach_btf_id);
|
||||
jsonw_end_object(json_wtr);
|
||||
} else {
|
||||
printf("%s%-8u ", level ? " " : "", info.id);
|
||||
|
@ -102,7 +150,13 @@ static int show_bpf_prog(int id, enum bpf_attach_type attach_type,
|
|||
printf("%-15s", attach_type_str);
|
||||
else
|
||||
printf("type %-10u", attach_type);
|
||||
printf(" %-15s %-15s\n", attach_flags_str, prog_name);
|
||||
printf(" %-15s %-15s", attach_flags_str, prog_name);
|
||||
if (attach_btf_name)
|
||||
printf(" %-15s", attach_btf_name);
|
||||
else if (info.attach_btf_id)
|
||||
printf(" attach_btf_obj_id=%d attach_btf_id=%d",
|
||||
info.attach_btf_obj_id, info.attach_btf_id);
|
||||
printf("\n");
|
||||
}
|
||||
|
||||
close(prog_fd);
|
||||
|
@ -144,40 +198,49 @@ static int cgroup_has_attached_progs(int cgroup_fd)
|
|||
static int show_attached_bpf_progs(int cgroup_fd, enum bpf_attach_type type,
|
||||
int level)
|
||||
{
|
||||
LIBBPF_OPTS(bpf_prog_query_opts, p);
|
||||
__u32 prog_attach_flags[1024] = {0};
|
||||
const char *attach_flags_str;
|
||||
__u32 prog_ids[1024] = {0};
|
||||
__u32 prog_cnt, iter;
|
||||
__u32 attach_flags;
|
||||
char buf[32];
|
||||
__u32 iter;
|
||||
int ret;
|
||||
|
||||
prog_cnt = ARRAY_SIZE(prog_ids);
|
||||
ret = bpf_prog_query(cgroup_fd, type, query_flags, &attach_flags,
|
||||
prog_ids, &prog_cnt);
|
||||
p.query_flags = query_flags;
|
||||
p.prog_cnt = ARRAY_SIZE(prog_ids);
|
||||
p.prog_ids = prog_ids;
|
||||
p.prog_attach_flags = prog_attach_flags;
|
||||
|
||||
ret = bpf_prog_query_opts(cgroup_fd, type, &p);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (prog_cnt == 0)
|
||||
if (p.prog_cnt == 0)
|
||||
return 0;
|
||||
|
||||
switch (attach_flags) {
|
||||
case BPF_F_ALLOW_MULTI:
|
||||
attach_flags_str = "multi";
|
||||
break;
|
||||
case BPF_F_ALLOW_OVERRIDE:
|
||||
attach_flags_str = "override";
|
||||
break;
|
||||
case 0:
|
||||
attach_flags_str = "";
|
||||
break;
|
||||
default:
|
||||
snprintf(buf, sizeof(buf), "unknown(%x)", attach_flags);
|
||||
attach_flags_str = buf;
|
||||
}
|
||||
for (iter = 0; iter < p.prog_cnt; iter++) {
|
||||
__u32 attach_flags;
|
||||
|
||||
attach_flags = prog_attach_flags[iter] ?: p.attach_flags;
|
||||
|
||||
switch (attach_flags) {
|
||||
case BPF_F_ALLOW_MULTI:
|
||||
attach_flags_str = "multi";
|
||||
break;
|
||||
case BPF_F_ALLOW_OVERRIDE:
|
||||
attach_flags_str = "override";
|
||||
break;
|
||||
case 0:
|
||||
attach_flags_str = "";
|
||||
break;
|
||||
default:
|
||||
snprintf(buf, sizeof(buf), "unknown(%x)", attach_flags);
|
||||
attach_flags_str = buf;
|
||||
}
|
||||
|
||||
for (iter = 0; iter < prog_cnt; iter++)
|
||||
show_bpf_prog(prog_ids[iter], type,
|
||||
attach_flags_str, level);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -233,6 +296,7 @@ static int do_show(int argc, char **argv)
|
|||
printf("%-8s %-15s %-15s %-15s\n", "ID", "AttachType",
|
||||
"AttachFlags", "Name");
|
||||
|
||||
btf_vmlinux = libbpf_find_kernel_btf();
|
||||
for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++) {
|
||||
/*
|
||||
* Not all attach types may be supported, so it's expected,
|
||||
|
@ -296,6 +360,7 @@ static int do_show_tree_fn(const char *fpath, const struct stat *sb,
|
|||
printf("%s\n", fpath);
|
||||
}
|
||||
|
||||
btf_vmlinux = libbpf_find_kernel_btf();
|
||||
for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++)
|
||||
show_attached_bpf_progs(cgroup_fd, type, ftw->level);
|
||||
|
||||
|
|
|
@ -13,14 +13,17 @@
|
|||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <unistd.h>
|
||||
#include <linux/limits.h>
|
||||
#include <linux/magic.h>
|
||||
#include <net/if.h>
|
||||
#include <sys/mount.h>
|
||||
#include <sys/resource.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/vfs.h>
|
||||
|
||||
#include <linux/filter.h>
|
||||
#include <linux/limits.h>
|
||||
#include <linux/magic.h>
|
||||
#include <linux/unistd.h>
|
||||
|
||||
#include <bpf/bpf.h>
|
||||
#include <bpf/hashmap.h>
|
||||
#include <bpf/libbpf.h> /* libbpf_num_possible_cpus */
|
||||
|
@ -73,11 +76,73 @@ static bool is_bpffs(char *path)
|
|||
return (unsigned long)st_fs.f_type == BPF_FS_MAGIC;
|
||||
}
|
||||
|
||||
/* Probe whether kernel switched from memlock-based (RLIMIT_MEMLOCK) to
|
||||
* memcg-based memory accounting for BPF maps and programs. This was done in
|
||||
* commit 97306be45fbe ("Merge branch 'switch to memcg-based memory
|
||||
* accounting'"), in Linux 5.11.
|
||||
*
|
||||
* Libbpf also offers to probe for memcg-based accounting vs rlimit, but does
|
||||
* so by checking for the availability of a given BPF helper and this has
|
||||
* failed on some kernels with backports in the past, see commit 6b4384ff1088
|
||||
* ("Revert "bpftool: Use libbpf 1.0 API mode instead of RLIMIT_MEMLOCK"").
|
||||
* Instead, we can probe by lowering the process-based rlimit to 0, trying to
|
||||
* load a BPF object, and resetting the rlimit. If the load succeeds then
|
||||
* memcg-based accounting is supported.
|
||||
*
|
||||
* This would be too dangerous to do in the library, because multithreaded
|
||||
* applications might attempt to load items while the rlimit is at 0. Given
|
||||
* that bpftool is single-threaded, this is fine to do here.
|
||||
*/
|
||||
static bool known_to_need_rlimit(void)
|
||||
{
|
||||
struct rlimit rlim_init, rlim_cur_zero = {};
|
||||
struct bpf_insn insns[] = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
};
|
||||
size_t insn_cnt = ARRAY_SIZE(insns);
|
||||
union bpf_attr attr;
|
||||
int prog_fd, err;
|
||||
|
||||
memset(&attr, 0, sizeof(attr));
|
||||
attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
|
||||
attr.insns = ptr_to_u64(insns);
|
||||
attr.insn_cnt = insn_cnt;
|
||||
attr.license = ptr_to_u64("GPL");
|
||||
|
||||
if (getrlimit(RLIMIT_MEMLOCK, &rlim_init))
|
||||
return false;
|
||||
|
||||
/* Drop the soft limit to zero. We maintain the hard limit to its
|
||||
* current value, because lowering it would be a permanent operation
|
||||
* for unprivileged users.
|
||||
*/
|
||||
rlim_cur_zero.rlim_max = rlim_init.rlim_max;
|
||||
if (setrlimit(RLIMIT_MEMLOCK, &rlim_cur_zero))
|
||||
return false;
|
||||
|
||||
/* Do not use bpf_prog_load() from libbpf here, because it calls
|
||||
* bump_rlimit_memlock(), interfering with the current probe.
|
||||
*/
|
||||
prog_fd = syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr));
|
||||
err = errno;
|
||||
|
||||
/* reset soft rlimit to its initial value */
|
||||
setrlimit(RLIMIT_MEMLOCK, &rlim_init);
|
||||
|
||||
if (prog_fd < 0)
|
||||
return err == EPERM;
|
||||
|
||||
close(prog_fd);
|
||||
return false;
|
||||
}
|
||||
|
||||
void set_max_rlimit(void)
|
||||
{
|
||||
struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
|
||||
|
||||
setrlimit(RLIMIT_MEMLOCK, &rinf);
|
||||
if (known_to_need_rlimit())
|
||||
setrlimit(RLIMIT_MEMLOCK, &rinf);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -251,6 +316,7 @@ const char *get_fd_type_name(enum bpf_obj_type type)
|
|||
[BPF_OBJ_UNKNOWN] = "unknown",
|
||||
[BPF_OBJ_PROG] = "prog",
|
||||
[BPF_OBJ_MAP] = "map",
|
||||
[BPF_OBJ_LINK] = "link",
|
||||
};
|
||||
|
||||
if (type < 0 || type >= ARRAY_SIZE(names) || !names[type])
|
||||
|
|
|
@ -1258,6 +1258,58 @@ exit_close_json:
|
|||
return 0;
|
||||
}
|
||||
|
||||
static const char *get_helper_name(unsigned int id)
|
||||
{
|
||||
if (id >= ARRAY_SIZE(helper_name))
|
||||
return NULL;
|
||||
|
||||
return helper_name[id];
|
||||
}
|
||||
|
||||
static int do_list_builtins(int argc, char **argv)
|
||||
{
|
||||
const char *(*get_name)(unsigned int id);
|
||||
unsigned int id = 0;
|
||||
|
||||
if (argc < 1)
|
||||
usage();
|
||||
|
||||
if (is_prefix(*argv, "prog_types")) {
|
||||
get_name = (const char *(*)(unsigned int))libbpf_bpf_prog_type_str;
|
||||
} else if (is_prefix(*argv, "map_types")) {
|
||||
get_name = (const char *(*)(unsigned int))libbpf_bpf_map_type_str;
|
||||
} else if (is_prefix(*argv, "attach_types")) {
|
||||
get_name = (const char *(*)(unsigned int))libbpf_bpf_attach_type_str;
|
||||
} else if (is_prefix(*argv, "link_types")) {
|
||||
get_name = (const char *(*)(unsigned int))libbpf_bpf_link_type_str;
|
||||
} else if (is_prefix(*argv, "helpers")) {
|
||||
get_name = get_helper_name;
|
||||
} else {
|
||||
p_err("expected 'prog_types', 'map_types', 'attach_types', 'link_types' or 'helpers', got: %s", *argv);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (json_output)
|
||||
jsonw_start_array(json_wtr); /* root array */
|
||||
|
||||
while (true) {
|
||||
const char *name;
|
||||
|
||||
name = get_name(id++);
|
||||
if (!name)
|
||||
break;
|
||||
if (json_output)
|
||||
jsonw_string(json_wtr, name);
|
||||
else
|
||||
printf("%s\n", name);
|
||||
}
|
||||
|
||||
if (json_output)
|
||||
jsonw_end_array(json_wtr); /* root array */
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int do_help(int argc, char **argv)
|
||||
{
|
||||
if (json_output) {
|
||||
|
@ -1267,9 +1319,11 @@ static int do_help(int argc, char **argv)
|
|||
|
||||
fprintf(stderr,
|
||||
"Usage: %1$s %2$s probe [COMPONENT] [full] [unprivileged] [macros [prefix PREFIX]]\n"
|
||||
" %1$s %2$s list_builtins GROUP\n"
|
||||
" %1$s %2$s help\n"
|
||||
"\n"
|
||||
" COMPONENT := { kernel | dev NAME }\n"
|
||||
" GROUP := { prog_types | map_types | attach_types | link_types | helpers }\n"
|
||||
" " HELP_SPEC_OPTIONS " }\n"
|
||||
"",
|
||||
bin_name, argv[-2]);
|
||||
|
@ -1278,8 +1332,9 @@ static int do_help(int argc, char **argv)
|
|||
}
|
||||
|
||||
static const struct cmd cmds[] = {
|
||||
{ "probe", do_probe },
|
||||
{ "help", do_help },
|
||||
{ "probe", do_probe },
|
||||
{ "list_builtins", do_list_builtins },
|
||||
{ "help", do_help },
|
||||
{ 0 }
|
||||
};
|
||||
|
||||
|
|
|
@ -1762,6 +1762,7 @@ btfgen_mark_type(struct btfgen_info *info, unsigned int type_id, bool follow_poi
|
|||
}
|
||||
break;
|
||||
case BTF_KIND_CONST:
|
||||
case BTF_KIND_RESTRICT:
|
||||
case BTF_KIND_VOLATILE:
|
||||
case BTF_KIND_TYPEDEF:
|
||||
err = btfgen_mark_type(info, btf_type->type, follow_pointers);
|
||||
|
@ -1856,6 +1857,112 @@ static int btfgen_record_field_relo(struct btfgen_info *info, struct bpf_core_sp
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* Mark types, members, and member types. Compared to btfgen_record_field_relo,
|
||||
* this function does not rely on the target spec for inferring members, but
|
||||
* uses the associated BTF.
|
||||
*
|
||||
* The `behind_ptr` argument is used to stop marking of composite types reached
|
||||
* through a pointer. This way, we can keep BTF size in check while providing
|
||||
* reasonable match semantics.
|
||||
*/
|
||||
static int btfgen_mark_type_match(struct btfgen_info *info, __u32 type_id, bool behind_ptr)
|
||||
{
|
||||
const struct btf_type *btf_type;
|
||||
struct btf *btf = info->src_btf;
|
||||
struct btf_type *cloned_type;
|
||||
int i, err;
|
||||
|
||||
if (type_id == 0)
|
||||
return 0;
|
||||
|
||||
btf_type = btf__type_by_id(btf, type_id);
|
||||
/* mark type on cloned BTF as used */
|
||||
cloned_type = (struct btf_type *)btf__type_by_id(info->marked_btf, type_id);
|
||||
cloned_type->name_off = MARKED;
|
||||
|
||||
switch (btf_kind(btf_type)) {
|
||||
case BTF_KIND_UNKN:
|
||||
case BTF_KIND_INT:
|
||||
case BTF_KIND_FLOAT:
|
||||
case BTF_KIND_ENUM:
|
||||
case BTF_KIND_ENUM64:
|
||||
break;
|
||||
case BTF_KIND_STRUCT:
|
||||
case BTF_KIND_UNION: {
|
||||
struct btf_member *m = btf_members(btf_type);
|
||||
__u16 vlen = btf_vlen(btf_type);
|
||||
|
||||
if (behind_ptr)
|
||||
break;
|
||||
|
||||
for (i = 0; i < vlen; i++, m++) {
|
||||
/* mark member */
|
||||
btfgen_mark_member(info, type_id, i);
|
||||
|
||||
/* mark member's type */
|
||||
err = btfgen_mark_type_match(info, m->type, false);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case BTF_KIND_CONST:
|
||||
case BTF_KIND_FWD:
|
||||
case BTF_KIND_RESTRICT:
|
||||
case BTF_KIND_TYPEDEF:
|
||||
case BTF_KIND_VOLATILE:
|
||||
return btfgen_mark_type_match(info, btf_type->type, behind_ptr);
|
||||
case BTF_KIND_PTR:
|
||||
return btfgen_mark_type_match(info, btf_type->type, true);
|
||||
case BTF_KIND_ARRAY: {
|
||||
struct btf_array *array;
|
||||
|
||||
array = btf_array(btf_type);
|
||||
/* mark array type */
|
||||
err = btfgen_mark_type_match(info, array->type, false);
|
||||
/* mark array's index type */
|
||||
err = err ? : btfgen_mark_type_match(info, array->index_type, false);
|
||||
if (err)
|
||||
return err;
|
||||
break;
|
||||
}
|
||||
case BTF_KIND_FUNC_PROTO: {
|
||||
__u16 vlen = btf_vlen(btf_type);
|
||||
struct btf_param *param;
|
||||
|
||||
/* mark ret type */
|
||||
err = btfgen_mark_type_match(info, btf_type->type, false);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* mark parameters types */
|
||||
param = btf_params(btf_type);
|
||||
for (i = 0; i < vlen; i++) {
|
||||
err = btfgen_mark_type_match(info, param->type, false);
|
||||
if (err)
|
||||
return err;
|
||||
param++;
|
||||
}
|
||||
break;
|
||||
}
|
||||
/* tells if some other type needs to be handled */
|
||||
default:
|
||||
p_err("unsupported kind: %s (%d)", btf_kind_str(btf_type), type_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Mark types, members, and member types. Compared to btfgen_record_field_relo,
|
||||
* this function does not rely on the target spec for inferring members, but
|
||||
* uses the associated BTF.
|
||||
*/
|
||||
static int btfgen_record_type_match_relo(struct btfgen_info *info, struct bpf_core_spec *targ_spec)
|
||||
{
|
||||
return btfgen_mark_type_match(info, targ_spec->root_type_id, false);
|
||||
}
|
||||
|
||||
static int btfgen_record_type_relo(struct btfgen_info *info, struct bpf_core_spec *targ_spec)
|
||||
{
|
||||
return btfgen_mark_type(info, targ_spec->root_type_id, true);
|
||||
|
@ -1882,6 +1989,8 @@ static int btfgen_record_reloc(struct btfgen_info *info, struct bpf_core_spec *r
|
|||
case BPF_CORE_TYPE_EXISTS:
|
||||
case BPF_CORE_TYPE_SIZE:
|
||||
return btfgen_record_type_relo(info, res);
|
||||
case BPF_CORE_TYPE_MATCHES:
|
||||
return btfgen_record_type_match_relo(info, res);
|
||||
case BPF_CORE_ENUMVAL_EXISTS:
|
||||
case BPF_CORE_ENUMVAL_VALUE:
|
||||
return btfgen_record_enumval_relo(info, res);
|
||||
|
|
|
@ -63,8 +63,6 @@ static inline void *u64_to_ptr(__u64 ptr)
|
|||
#define HELP_SPEC_LINK \
|
||||
"LINK := { id LINK_ID | pinned FILE }"
|
||||
|
||||
extern const char * const attach_type_name[__MAX_BPF_ATTACH_TYPE];
|
||||
|
||||
/* keep in sync with the definition in skeleton/pid_iter.bpf.c */
|
||||
enum bpf_obj_type {
|
||||
BPF_OBJ_UNKNOWN,
|
||||
|
|
|
@ -73,7 +73,7 @@ asm( \
|
|||
__BTF_ID_LIST(name, local) \
|
||||
extern u32 name[];
|
||||
|
||||
#define BTF_ID_LIST_GLOBAL(name) \
|
||||
#define BTF_ID_LIST_GLOBAL(name, n) \
|
||||
__BTF_ID_LIST(name, globl)
|
||||
|
||||
/* The BTF_ID_LIST_SINGLE macro defines a BTF_ID_LIST with
|
||||
|
@ -82,6 +82,9 @@ __BTF_ID_LIST(name, globl)
|
|||
#define BTF_ID_LIST_SINGLE(name, prefix, typename) \
|
||||
BTF_ID_LIST(name) \
|
||||
BTF_ID(prefix, typename)
|
||||
#define BTF_ID_LIST_GLOBAL_SINGLE(name, prefix, typename) \
|
||||
BTF_ID_LIST_GLOBAL(name, 1) \
|
||||
BTF_ID(prefix, typename)
|
||||
|
||||
/*
|
||||
* The BTF_ID_UNUSED macro defines 4 zero bytes.
|
||||
|
@ -143,13 +146,14 @@ extern struct btf_id_set name;
|
|||
|
||||
#else
|
||||
|
||||
#define BTF_ID_LIST(name) static u32 name[5];
|
||||
#define BTF_ID_LIST(name) static u32 __maybe_unused name[5];
|
||||
#define BTF_ID(prefix, name)
|
||||
#define BTF_ID_UNUSED
|
||||
#define BTF_ID_LIST_GLOBAL(name) u32 name[1];
|
||||
#define BTF_ID_LIST_SINGLE(name, prefix, typename) static u32 name[1];
|
||||
#define BTF_SET_START(name) static struct btf_id_set name = { 0 };
|
||||
#define BTF_SET_START_GLOBAL(name) static struct btf_id_set name = { 0 };
|
||||
#define BTF_ID_LIST_GLOBAL(name, n) u32 __maybe_unused name[n];
|
||||
#define BTF_ID_LIST_SINGLE(name, prefix, typename) static u32 __maybe_unused name[1];
|
||||
#define BTF_ID_LIST_GLOBAL_SINGLE(name, prefix, typename) u32 __maybe_unused name[1];
|
||||
#define BTF_SET_START(name) static struct btf_id_set __maybe_unused name = { 0 };
|
||||
#define BTF_SET_START_GLOBAL(name) static struct btf_id_set __maybe_unused name = { 0 };
|
||||
#define BTF_SET_END(name)
|
||||
|
||||
#endif /* CONFIG_DEBUG_INFO_BTF */
|
||||
|
@ -172,7 +176,10 @@ extern struct btf_id_set name;
|
|||
BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP_TW, tcp_timewait_sock) \
|
||||
BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP6, tcp6_sock) \
|
||||
BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP, udp_sock) \
|
||||
BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP6, udp6_sock)
|
||||
BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP6, udp6_sock) \
|
||||
BTF_SOCK_TYPE(BTF_SOCK_TYPE_UNIX, unix_sock) \
|
||||
BTF_SOCK_TYPE(BTF_SOCK_TYPE_MPTCP, mptcp_sock) \
|
||||
BTF_SOCK_TYPE(BTF_SOCK_TYPE_SOCKET, socket)
|
||||
|
||||
enum {
|
||||
#define BTF_SOCK_TYPE(name, str) name,
|
||||
|
@ -184,4 +191,18 @@ MAX_BTF_SOCK_TYPE,
|
|||
extern u32 btf_sock_ids[];
|
||||
#endif
|
||||
|
||||
#define BTF_TRACING_TYPE_xxx \
|
||||
BTF_TRACING_TYPE(BTF_TRACING_TYPE_TASK, task_struct) \
|
||||
BTF_TRACING_TYPE(BTF_TRACING_TYPE_FILE, file) \
|
||||
BTF_TRACING_TYPE(BTF_TRACING_TYPE_VMA, vm_area_struct)
|
||||
|
||||
enum {
|
||||
#define BTF_TRACING_TYPE(name, type) name,
|
||||
BTF_TRACING_TYPE_xxx
|
||||
#undef BTF_TRACING_TYPE
|
||||
MAX_BTF_TRACING_TYPE,
|
||||
};
|
||||
|
||||
extern u32 btf_tracing_ids[];
|
||||
|
||||
#endif
|
||||
|
|
|
@ -998,6 +998,7 @@ enum bpf_attach_type {
|
|||
BPF_SK_REUSEPORT_SELECT_OR_MIGRATE,
|
||||
BPF_PERF_EVENT,
|
||||
BPF_TRACE_KPROBE_MULTI,
|
||||
BPF_LSM_CGROUP,
|
||||
__MAX_BPF_ATTACH_TYPE
|
||||
};
|
||||
|
||||
|
@ -1431,6 +1432,7 @@ union bpf_attr {
|
|||
__u32 attach_flags;
|
||||
__aligned_u64 prog_ids;
|
||||
__u32 prog_cnt;
|
||||
__aligned_u64 prog_attach_flags; /* output: per-program attach_flags */
|
||||
} query;
|
||||
|
||||
struct { /* anonymous struct used by BPF_RAW_TRACEPOINT_OPEN command */
|
||||
|
@ -6075,6 +6077,8 @@ struct bpf_prog_info {
|
|||
__u64 run_cnt;
|
||||
__u64 recursion_misses;
|
||||
__u32 verified_insns;
|
||||
__u32 attach_btf_obj_id;
|
||||
__u32 attach_btf_id;
|
||||
} __attribute__((aligned(8)));
|
||||
|
||||
struct bpf_map_info {
|
||||
|
@ -6782,6 +6786,7 @@ enum bpf_core_relo_kind {
|
|||
BPF_CORE_TYPE_SIZE = 9, /* type size in bytes */
|
||||
BPF_CORE_ENUMVAL_EXISTS = 10, /* enum value existence in target kernel */
|
||||
BPF_CORE_ENUMVAL_VALUE = 11, /* enum value integer value */
|
||||
BPF_CORE_TYPE_MATCHES = 12, /* type match in target kernel */
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o str_error.o \
|
||||
netlink.o bpf_prog_linfo.o libbpf_probes.o xsk.o hashmap.o \
|
||||
netlink.o bpf_prog_linfo.o libbpf_probes.o hashmap.o \
|
||||
btf_dump.o ringbuf.o strset.o linker.o gen_loader.o relo_core.o \
|
||||
usdt.o
|
||||
|
|
|
@ -237,7 +237,7 @@ install_lib: all_cmd
|
|||
$(call do_install_mkdir,$(libdir_SQ)); \
|
||||
cp -fpR $(LIB_FILE) $(DESTDIR)$(libdir_SQ)
|
||||
|
||||
SRC_HDRS := bpf.h libbpf.h btf.h libbpf_common.h libbpf_legacy.h xsk.h \
|
||||
SRC_HDRS := bpf.h libbpf.h btf.h libbpf_common.h libbpf_legacy.h \
|
||||
bpf_helpers.h bpf_tracing.h bpf_endian.h bpf_core_read.h \
|
||||
skel_internal.h libbpf_version.h usdt.bpf.h
|
||||
GEN_HDRS := $(BPF_GENERATED)
|
||||
|
|
|
@ -147,10 +147,6 @@ int bump_rlimit_memlock(void)
|
|||
{
|
||||
struct rlimit rlim;
|
||||
|
||||
/* this the default in libbpf 1.0, but for now user has to opt-in explicitly */
|
||||
if (!(libbpf_mode & LIBBPF_STRICT_AUTO_RLIMIT_MEMLOCK))
|
||||
return 0;
|
||||
|
||||
/* if kernel supports memcg-based accounting, skip bumping RLIMIT_MEMLOCK */
|
||||
if (memlock_bumped || kernel_supports(NULL, FEAT_MEMCG_ACCOUNT))
|
||||
return 0;
|
||||
|
@ -233,11 +229,10 @@ alloc_zero_tailing_info(const void *orecord, __u32 cnt,
|
|||
return info;
|
||||
}
|
||||
|
||||
DEFAULT_VERSION(bpf_prog_load_v0_6_0, bpf_prog_load, LIBBPF_0.6.0)
|
||||
int bpf_prog_load_v0_6_0(enum bpf_prog_type prog_type,
|
||||
const char *prog_name, const char *license,
|
||||
const struct bpf_insn *insns, size_t insn_cnt,
|
||||
const struct bpf_prog_load_opts *opts)
|
||||
int bpf_prog_load(enum bpf_prog_type prog_type,
|
||||
const char *prog_name, const char *license,
|
||||
const struct bpf_insn *insns, size_t insn_cnt,
|
||||
const struct bpf_prog_load_opts *opts)
|
||||
{
|
||||
void *finfo = NULL, *linfo = NULL;
|
||||
const char *func_info, *line_info;
|
||||
|
@ -384,94 +379,6 @@ done:
|
|||
return libbpf_err_errno(fd);
|
||||
}
|
||||
|
||||
__attribute__((alias("bpf_load_program_xattr2")))
|
||||
int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
|
||||
char *log_buf, size_t log_buf_sz);
|
||||
|
||||
static int bpf_load_program_xattr2(const struct bpf_load_program_attr *load_attr,
|
||||
char *log_buf, size_t log_buf_sz)
|
||||
{
|
||||
LIBBPF_OPTS(bpf_prog_load_opts, p);
|
||||
|
||||
if (!load_attr || !log_buf != !log_buf_sz)
|
||||
return libbpf_err(-EINVAL);
|
||||
|
||||
p.expected_attach_type = load_attr->expected_attach_type;
|
||||
switch (load_attr->prog_type) {
|
||||
case BPF_PROG_TYPE_STRUCT_OPS:
|
||||
case BPF_PROG_TYPE_LSM:
|
||||
p.attach_btf_id = load_attr->attach_btf_id;
|
||||
break;
|
||||
case BPF_PROG_TYPE_TRACING:
|
||||
case BPF_PROG_TYPE_EXT:
|
||||
p.attach_btf_id = load_attr->attach_btf_id;
|
||||
p.attach_prog_fd = load_attr->attach_prog_fd;
|
||||
break;
|
||||
default:
|
||||
p.prog_ifindex = load_attr->prog_ifindex;
|
||||
p.kern_version = load_attr->kern_version;
|
||||
}
|
||||
p.log_level = load_attr->log_level;
|
||||
p.log_buf = log_buf;
|
||||
p.log_size = log_buf_sz;
|
||||
p.prog_btf_fd = load_attr->prog_btf_fd;
|
||||
p.func_info_rec_size = load_attr->func_info_rec_size;
|
||||
p.func_info_cnt = load_attr->func_info_cnt;
|
||||
p.func_info = load_attr->func_info;
|
||||
p.line_info_rec_size = load_attr->line_info_rec_size;
|
||||
p.line_info_cnt = load_attr->line_info_cnt;
|
||||
p.line_info = load_attr->line_info;
|
||||
p.prog_flags = load_attr->prog_flags;
|
||||
|
||||
return bpf_prog_load(load_attr->prog_type, load_attr->name, load_attr->license,
|
||||
load_attr->insns, load_attr->insns_cnt, &p);
|
||||
}
|
||||
|
||||
int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns,
|
||||
size_t insns_cnt, const char *license,
|
||||
__u32 kern_version, char *log_buf,
|
||||
size_t log_buf_sz)
|
||||
{
|
||||
struct bpf_load_program_attr load_attr;
|
||||
|
||||
memset(&load_attr, 0, sizeof(struct bpf_load_program_attr));
|
||||
load_attr.prog_type = type;
|
||||
load_attr.expected_attach_type = 0;
|
||||
load_attr.name = NULL;
|
||||
load_attr.insns = insns;
|
||||
load_attr.insns_cnt = insns_cnt;
|
||||
load_attr.license = license;
|
||||
load_attr.kern_version = kern_version;
|
||||
|
||||
return bpf_load_program_xattr2(&load_attr, log_buf, log_buf_sz);
|
||||
}
|
||||
|
||||
int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns,
|
||||
size_t insns_cnt, __u32 prog_flags, const char *license,
|
||||
__u32 kern_version, char *log_buf, size_t log_buf_sz,
|
||||
int log_level)
|
||||
{
|
||||
union bpf_attr attr;
|
||||
int fd;
|
||||
|
||||
bump_rlimit_memlock();
|
||||
|
||||
memset(&attr, 0, sizeof(attr));
|
||||
attr.prog_type = type;
|
||||
attr.insn_cnt = (__u32)insns_cnt;
|
||||
attr.insns = ptr_to_u64(insns);
|
||||
attr.license = ptr_to_u64(license);
|
||||
attr.log_buf = ptr_to_u64(log_buf);
|
||||
attr.log_size = log_buf_sz;
|
||||
attr.log_level = log_level;
|
||||
log_buf[0] = 0;
|
||||
attr.kern_version = kern_version;
|
||||
attr.prog_flags = prog_flags;
|
||||
|
||||
fd = sys_bpf_prog_load(&attr, sizeof(attr), PROG_LOAD_ATTEMPTS);
|
||||
return libbpf_err_errno(fd);
|
||||
}
|
||||
|
||||
int bpf_map_update_elem(int fd, const void *key, const void *value,
|
||||
__u64 flags)
|
||||
{
|
||||
|
@ -888,80 +795,48 @@ int bpf_iter_create(int link_fd)
|
|||
return libbpf_err_errno(fd);
|
||||
}
|
||||
|
||||
int bpf_prog_query(int target_fd, enum bpf_attach_type type, __u32 query_flags,
|
||||
__u32 *attach_flags, __u32 *prog_ids, __u32 *prog_cnt)
|
||||
int bpf_prog_query_opts(int target_fd,
|
||||
enum bpf_attach_type type,
|
||||
struct bpf_prog_query_opts *opts)
|
||||
{
|
||||
union bpf_attr attr;
|
||||
int ret;
|
||||
|
||||
memset(&attr, 0, sizeof(attr));
|
||||
attr.query.target_fd = target_fd;
|
||||
attr.query.attach_type = type;
|
||||
attr.query.query_flags = query_flags;
|
||||
attr.query.prog_cnt = *prog_cnt;
|
||||
attr.query.prog_ids = ptr_to_u64(prog_ids);
|
||||
|
||||
ret = sys_bpf(BPF_PROG_QUERY, &attr, sizeof(attr));
|
||||
|
||||
if (attach_flags)
|
||||
*attach_flags = attr.query.attach_flags;
|
||||
*prog_cnt = attr.query.prog_cnt;
|
||||
|
||||
return libbpf_err_errno(ret);
|
||||
}
|
||||
|
||||
int bpf_prog_test_run(int prog_fd, int repeat, void *data, __u32 size,
|
||||
void *data_out, __u32 *size_out, __u32 *retval,
|
||||
__u32 *duration)
|
||||
{
|
||||
union bpf_attr attr;
|
||||
int ret;
|
||||
|
||||
memset(&attr, 0, sizeof(attr));
|
||||
attr.test.prog_fd = prog_fd;
|
||||
attr.test.data_in = ptr_to_u64(data);
|
||||
attr.test.data_out = ptr_to_u64(data_out);
|
||||
attr.test.data_size_in = size;
|
||||
attr.test.repeat = repeat;
|
||||
|
||||
ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, sizeof(attr));
|
||||
|
||||
if (size_out)
|
||||
*size_out = attr.test.data_size_out;
|
||||
if (retval)
|
||||
*retval = attr.test.retval;
|
||||
if (duration)
|
||||
*duration = attr.test.duration;
|
||||
|
||||
return libbpf_err_errno(ret);
|
||||
}
|
||||
|
||||
int bpf_prog_test_run_xattr(struct bpf_prog_test_run_attr *test_attr)
|
||||
{
|
||||
union bpf_attr attr;
|
||||
int ret;
|
||||
|
||||
if (!test_attr->data_out && test_attr->data_size_out > 0)
|
||||
if (!OPTS_VALID(opts, bpf_prog_query_opts))
|
||||
return libbpf_err(-EINVAL);
|
||||
|
||||
memset(&attr, 0, sizeof(attr));
|
||||
attr.test.prog_fd = test_attr->prog_fd;
|
||||
attr.test.data_in = ptr_to_u64(test_attr->data_in);
|
||||
attr.test.data_out = ptr_to_u64(test_attr->data_out);
|
||||
attr.test.data_size_in = test_attr->data_size_in;
|
||||
attr.test.data_size_out = test_attr->data_size_out;
|
||||
attr.test.ctx_in = ptr_to_u64(test_attr->ctx_in);
|
||||
attr.test.ctx_out = ptr_to_u64(test_attr->ctx_out);
|
||||
attr.test.ctx_size_in = test_attr->ctx_size_in;
|
||||
attr.test.ctx_size_out = test_attr->ctx_size_out;
|
||||
attr.test.repeat = test_attr->repeat;
|
||||
|
||||
ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, sizeof(attr));
|
||||
attr.query.target_fd = target_fd;
|
||||
attr.query.attach_type = type;
|
||||
attr.query.query_flags = OPTS_GET(opts, query_flags, 0);
|
||||
attr.query.prog_cnt = OPTS_GET(opts, prog_cnt, 0);
|
||||
attr.query.prog_ids = ptr_to_u64(OPTS_GET(opts, prog_ids, NULL));
|
||||
attr.query.prog_attach_flags = ptr_to_u64(OPTS_GET(opts, prog_attach_flags, NULL));
|
||||
|
||||
test_attr->data_size_out = attr.test.data_size_out;
|
||||
test_attr->ctx_size_out = attr.test.ctx_size_out;
|
||||
test_attr->retval = attr.test.retval;
|
||||
test_attr->duration = attr.test.duration;
|
||||
ret = sys_bpf(BPF_PROG_QUERY, &attr, sizeof(attr));
|
||||
|
||||
OPTS_SET(opts, attach_flags, attr.query.attach_flags);
|
||||
OPTS_SET(opts, prog_cnt, attr.query.prog_cnt);
|
||||
|
||||
return libbpf_err_errno(ret);
|
||||
}
|
||||
|
||||
int bpf_prog_query(int target_fd, enum bpf_attach_type type, __u32 query_flags,
|
||||
__u32 *attach_flags, __u32 *prog_ids, __u32 *prog_cnt)
|
||||
{
|
||||
LIBBPF_OPTS(bpf_prog_query_opts, opts);
|
||||
int ret;
|
||||
|
||||
opts.query_flags = query_flags;
|
||||
opts.prog_ids = prog_ids;
|
||||
opts.prog_cnt = *prog_cnt;
|
||||
|
||||
ret = bpf_prog_query_opts(target_fd, type, &opts);
|
||||
|
||||
if (attach_flags)
|
||||
*attach_flags = opts.attach_flags;
|
||||
*prog_cnt = opts.prog_cnt;
|
||||
|
||||
return libbpf_err_errno(ret);
|
||||
}
|
||||
|
@ -1162,27 +1037,6 @@ int bpf_btf_load(const void *btf_data, size_t btf_size, const struct bpf_btf_loa
|
|||
return libbpf_err_errno(fd);
|
||||
}
|
||||
|
||||
int bpf_load_btf(const void *btf, __u32 btf_size, char *log_buf, __u32 log_buf_size, bool do_log)
|
||||
{
|
||||
LIBBPF_OPTS(bpf_btf_load_opts, opts);
|
||||
int fd;
|
||||
|
||||
retry:
|
||||
if (do_log && log_buf && log_buf_size) {
|
||||
opts.log_buf = log_buf;
|
||||
opts.log_size = log_buf_size;
|
||||
opts.log_level = 1;
|
||||
}
|
||||
|
||||
fd = bpf_btf_load(btf, btf_size, &opts);
|
||||
if (fd < 0 && !do_log && log_buf && log_buf_size) {
|
||||
do_log = true;
|
||||
goto retry;
|
||||
}
|
||||
|
||||
return libbpf_err_errno(fd);
|
||||
}
|
||||
|
||||
int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf, __u32 *buf_len,
|
||||
__u32 *prog_id, __u32 *fd_type, __u64 *probe_offset,
|
||||
__u64 *probe_addr)
|
||||
|
|
|
@ -103,54 +103,6 @@ LIBBPF_API int bpf_prog_load(enum bpf_prog_type prog_type,
|
|||
const char *prog_name, const char *license,
|
||||
const struct bpf_insn *insns, size_t insn_cnt,
|
||||
const struct bpf_prog_load_opts *opts);
|
||||
/* this "specialization" should go away in libbpf 1.0 */
|
||||
LIBBPF_API int bpf_prog_load_v0_6_0(enum bpf_prog_type prog_type,
|
||||
const char *prog_name, const char *license,
|
||||
const struct bpf_insn *insns, size_t insn_cnt,
|
||||
const struct bpf_prog_load_opts *opts);
|
||||
|
||||
/* This is an elaborate way to not conflict with deprecated bpf_prog_load()
|
||||
* API, defined in libbpf.h. Once we hit libbpf 1.0, all this will be gone.
|
||||
* With this approach, if someone is calling bpf_prog_load() with
|
||||
* 4 arguments, they will use the deprecated API, which keeps backwards
|
||||
* compatibility (both source code and binary). If bpf_prog_load() is called
|
||||
* with 6 arguments, though, it gets redirected to __bpf_prog_load.
|
||||
* So looking forward to libbpf 1.0 when this hack will be gone and
|
||||
* __bpf_prog_load() will be called just bpf_prog_load().
|
||||
*/
|
||||
#ifndef bpf_prog_load
|
||||
#define bpf_prog_load(...) ___libbpf_overload(___bpf_prog_load, __VA_ARGS__)
|
||||
#define ___bpf_prog_load4(file, type, pobj, prog_fd) \
|
||||
bpf_prog_load_deprecated(file, type, pobj, prog_fd)
|
||||
#define ___bpf_prog_load6(prog_type, prog_name, license, insns, insn_cnt, opts) \
|
||||
bpf_prog_load(prog_type, prog_name, license, insns, insn_cnt, opts)
|
||||
#endif /* bpf_prog_load */
|
||||
|
||||
struct bpf_load_program_attr {
|
||||
enum bpf_prog_type prog_type;
|
||||
enum bpf_attach_type expected_attach_type;
|
||||
const char *name;
|
||||
const struct bpf_insn *insns;
|
||||
size_t insns_cnt;
|
||||
const char *license;
|
||||
union {
|
||||
__u32 kern_version;
|
||||
__u32 attach_prog_fd;
|
||||
};
|
||||
union {
|
||||
__u32 prog_ifindex;
|
||||
__u32 attach_btf_id;
|
||||
};
|
||||
__u32 prog_btf_fd;
|
||||
__u32 func_info_rec_size;
|
||||
const void *func_info;
|
||||
__u32 func_info_cnt;
|
||||
__u32 line_info_rec_size;
|
||||
const void *line_info;
|
||||
__u32 line_info_cnt;
|
||||
__u32 log_level;
|
||||
__u32 prog_flags;
|
||||
};
|
||||
|
||||
/* Flags to direct loading requirements */
|
||||
#define MAPS_RELAX_COMPAT 0x01
|
||||
|
@ -158,22 +110,6 @@ struct bpf_load_program_attr {
|
|||
/* Recommended log buffer size */
|
||||
#define BPF_LOG_BUF_SIZE (UINT32_MAX >> 8) /* verifier maximum in kernels <= 5.1 */
|
||||
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_prog_load() instead")
|
||||
LIBBPF_API int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
|
||||
char *log_buf, size_t log_buf_sz);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_prog_load() instead")
|
||||
LIBBPF_API int bpf_load_program(enum bpf_prog_type type,
|
||||
const struct bpf_insn *insns, size_t insns_cnt,
|
||||
const char *license, __u32 kern_version,
|
||||
char *log_buf, size_t log_buf_sz);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_prog_load() instead")
|
||||
LIBBPF_API int bpf_verify_program(enum bpf_prog_type type,
|
||||
const struct bpf_insn *insns,
|
||||
size_t insns_cnt, __u32 prog_flags,
|
||||
const char *license, __u32 kern_version,
|
||||
char *log_buf, size_t log_buf_sz,
|
||||
int log_level);
|
||||
|
||||
struct bpf_btf_load_opts {
|
||||
size_t sz; /* size of this struct for forward/backward compatibility */
|
||||
|
||||
|
@ -187,10 +123,6 @@ struct bpf_btf_load_opts {
|
|||
LIBBPF_API int bpf_btf_load(const void *btf_data, size_t btf_size,
|
||||
const struct bpf_btf_load_opts *opts);
|
||||
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_btf_load() instead")
|
||||
LIBBPF_API int bpf_load_btf(const void *btf, __u32 btf_size, char *log_buf,
|
||||
__u32 log_buf_size, bool do_log);
|
||||
|
||||
LIBBPF_API int bpf_map_update_elem(int fd, const void *key, const void *value,
|
||||
__u64 flags);
|
||||
|
||||
|
@ -353,10 +285,6 @@ LIBBPF_API int bpf_prog_attach(int prog_fd, int attachable_fd,
|
|||
LIBBPF_API int bpf_prog_attach_opts(int prog_fd, int attachable_fd,
|
||||
enum bpf_attach_type type,
|
||||
const struct bpf_prog_attach_opts *opts);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_prog_attach_opts() instead")
|
||||
LIBBPF_API int bpf_prog_attach_xattr(int prog_fd, int attachable_fd,
|
||||
enum bpf_attach_type type,
|
||||
const struct bpf_prog_attach_opts *opts);
|
||||
LIBBPF_API int bpf_prog_detach(int attachable_fd, enum bpf_attach_type type);
|
||||
LIBBPF_API int bpf_prog_detach2(int prog_fd, int attachable_fd,
|
||||
enum bpf_attach_type type);
|
||||
|
@ -422,17 +350,6 @@ struct bpf_prog_test_run_attr {
|
|||
* out: length of cxt_out */
|
||||
};
|
||||
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_prog_test_run_opts() instead")
|
||||
LIBBPF_API int bpf_prog_test_run_xattr(struct bpf_prog_test_run_attr *test_attr);
|
||||
|
||||
/*
|
||||
* bpf_prog_test_run does not check that data_out is large enough. Consider
|
||||
* using bpf_prog_test_run_opts instead.
|
||||
*/
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_prog_test_run_opts() instead")
|
||||
LIBBPF_API int bpf_prog_test_run(int prog_fd, int repeat, void *data,
|
||||
__u32 size, void *data_out, __u32 *size_out,
|
||||
__u32 *retval, __u32 *duration);
|
||||
LIBBPF_API int bpf_prog_get_next_id(__u32 start_id, __u32 *next_id);
|
||||
LIBBPF_API int bpf_map_get_next_id(__u32 start_id, __u32 *next_id);
|
||||
LIBBPF_API int bpf_btf_get_next_id(__u32 start_id, __u32 *next_id);
|
||||
|
@ -442,9 +359,24 @@ LIBBPF_API int bpf_map_get_fd_by_id(__u32 id);
|
|||
LIBBPF_API int bpf_btf_get_fd_by_id(__u32 id);
|
||||
LIBBPF_API int bpf_link_get_fd_by_id(__u32 id);
|
||||
LIBBPF_API int bpf_obj_get_info_by_fd(int bpf_fd, void *info, __u32 *info_len);
|
||||
|
||||
struct bpf_prog_query_opts {
|
||||
size_t sz; /* size of this struct for forward/backward compatibility */
|
||||
__u32 query_flags;
|
||||
__u32 attach_flags; /* output argument */
|
||||
__u32 *prog_ids;
|
||||
__u32 prog_cnt; /* input+output argument */
|
||||
__u32 *prog_attach_flags;
|
||||
};
|
||||
#define bpf_prog_query_opts__last_field prog_attach_flags
|
||||
|
||||
LIBBPF_API int bpf_prog_query_opts(int target_fd,
|
||||
enum bpf_attach_type type,
|
||||
struct bpf_prog_query_opts *opts);
|
||||
LIBBPF_API int bpf_prog_query(int target_fd, enum bpf_attach_type type,
|
||||
__u32 query_flags, __u32 *attach_flags,
|
||||
__u32 *prog_ids, __u32 *prog_cnt);
|
||||
|
||||
LIBBPF_API int bpf_raw_tracepoint_open(const char *name, int prog_fd);
|
||||
LIBBPF_API int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf,
|
||||
__u32 *buf_len, __u32 *prog_id, __u32 *fd_type,
|
||||
|
|
|
@ -29,6 +29,7 @@ enum bpf_type_id_kind {
|
|||
enum bpf_type_info_kind {
|
||||
BPF_TYPE_EXISTS = 0, /* type existence in target kernel */
|
||||
BPF_TYPE_SIZE = 1, /* type size in target kernel */
|
||||
BPF_TYPE_MATCHES = 2, /* type match in target kernel */
|
||||
};
|
||||
|
||||
/* second argument to __builtin_preserve_enum_value() built-in */
|
||||
|
@ -183,6 +184,16 @@ enum bpf_enum_value_kind {
|
|||
#define bpf_core_type_exists(type) \
|
||||
__builtin_preserve_type_info(*(typeof(type) *)0, BPF_TYPE_EXISTS)
|
||||
|
||||
/*
|
||||
* Convenience macro to check that provided named type
|
||||
* (struct/union/enum/typedef) "matches" that in a target kernel.
|
||||
* Returns:
|
||||
* 1, if the type matches in the target kernel's BTF;
|
||||
* 0, if the type does not match any in the target kernel
|
||||
*/
|
||||
#define bpf_core_type_matches(type) \
|
||||
__builtin_preserve_type_info(*(typeof(type) *)0, BPF_TYPE_MATCHES)
|
||||
|
||||
/*
|
||||
* Convenience macro to get the byte size of a provided named type
|
||||
* (struct/union/enum/typedef) in a target kernel.
|
||||
|
|
|
@ -22,12 +22,25 @@
|
|||
* To allow use of SEC() with externs (e.g., for extern .maps declarations),
|
||||
* make sure __attribute__((unused)) doesn't trigger compilation warning.
|
||||
*/
|
||||
#if __GNUC__ && !__clang__
|
||||
|
||||
/*
|
||||
* Pragma macros are broken on GCC
|
||||
* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=55578
|
||||
* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=90400
|
||||
*/
|
||||
#define SEC(name) __attribute__((section(name), used))
|
||||
|
||||
#else
|
||||
|
||||
#define SEC(name) \
|
||||
_Pragma("GCC diagnostic push") \
|
||||
_Pragma("GCC diagnostic ignored \"-Wignored-attributes\"") \
|
||||
__attribute__((section(name), used)) \
|
||||
_Pragma("GCC diagnostic pop") \
|
||||
|
||||
#endif
|
||||
|
||||
/* Avoid 'linux/stddef.h' definition of '__always_inline'. */
|
||||
#undef __always_inline
|
||||
#define __always_inline inline __attribute__((always_inline))
|
||||
|
|
|
@ -233,7 +233,7 @@ struct pt_regs___arm64 {
|
|||
#define __PT_PARM5_REG a4
|
||||
#define __PT_RET_REG ra
|
||||
#define __PT_FP_REG s0
|
||||
#define __PT_RC_REG a5
|
||||
#define __PT_RC_REG a0
|
||||
#define __PT_SP_REG sp
|
||||
#define __PT_IP_REG pc
|
||||
/* riscv does not select ARCH_HAS_SYSCALL_WRAPPER. */
|
||||
|
|
|
@ -448,11 +448,6 @@ static int btf_parse_type_sec(struct btf *btf)
|
|||
return 0;
|
||||
}
|
||||
|
||||
__u32 btf__get_nr_types(const struct btf *btf)
|
||||
{
|
||||
return btf->start_id + btf->nr_types - 1;
|
||||
}
|
||||
|
||||
__u32 btf__type_cnt(const struct btf *btf)
|
||||
{
|
||||
return btf->start_id + btf->nr_types;
|
||||
|
@ -1408,92 +1403,6 @@ struct btf *btf__load_from_kernel_by_id(__u32 id)
|
|||
return btf__load_from_kernel_by_id_split(id, NULL);
|
||||
}
|
||||
|
||||
int btf__get_from_id(__u32 id, struct btf **btf)
|
||||
{
|
||||
struct btf *res;
|
||||
int err;
|
||||
|
||||
*btf = NULL;
|
||||
res = btf__load_from_kernel_by_id(id);
|
||||
err = libbpf_get_error(res);
|
||||
|
||||
if (err)
|
||||
return libbpf_err(err);
|
||||
|
||||
*btf = res;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int btf__get_map_kv_tids(const struct btf *btf, const char *map_name,
|
||||
__u32 expected_key_size, __u32 expected_value_size,
|
||||
__u32 *key_type_id, __u32 *value_type_id)
|
||||
{
|
||||
const struct btf_type *container_type;
|
||||
const struct btf_member *key, *value;
|
||||
const size_t max_name = 256;
|
||||
char container_name[max_name];
|
||||
__s64 key_size, value_size;
|
||||
__s32 container_id;
|
||||
|
||||
if (snprintf(container_name, max_name, "____btf_map_%s", map_name) == max_name) {
|
||||
pr_warn("map:%s length of '____btf_map_%s' is too long\n",
|
||||
map_name, map_name);
|
||||
return libbpf_err(-EINVAL);
|
||||
}
|
||||
|
||||
container_id = btf__find_by_name(btf, container_name);
|
||||
if (container_id < 0) {
|
||||
pr_debug("map:%s container_name:%s cannot be found in BTF. Missing BPF_ANNOTATE_KV_PAIR?\n",
|
||||
map_name, container_name);
|
||||
return libbpf_err(container_id);
|
||||
}
|
||||
|
||||
container_type = btf__type_by_id(btf, container_id);
|
||||
if (!container_type) {
|
||||
pr_warn("map:%s cannot find BTF type for container_id:%u\n",
|
||||
map_name, container_id);
|
||||
return libbpf_err(-EINVAL);
|
||||
}
|
||||
|
||||
if (!btf_is_struct(container_type) || btf_vlen(container_type) < 2) {
|
||||
pr_warn("map:%s container_name:%s is an invalid container struct\n",
|
||||
map_name, container_name);
|
||||
return libbpf_err(-EINVAL);
|
||||
}
|
||||
|
||||
key = btf_members(container_type);
|
||||
value = key + 1;
|
||||
|
||||
key_size = btf__resolve_size(btf, key->type);
|
||||
if (key_size < 0) {
|
||||
pr_warn("map:%s invalid BTF key_type_size\n", map_name);
|
||||
return libbpf_err(key_size);
|
||||
}
|
||||
|
||||
if (expected_key_size != key_size) {
|
||||
pr_warn("map:%s btf_key_type_size:%u != map_def_key_size:%u\n",
|
||||
map_name, (__u32)key_size, expected_key_size);
|
||||
return libbpf_err(-EINVAL);
|
||||
}
|
||||
|
||||
value_size = btf__resolve_size(btf, value->type);
|
||||
if (value_size < 0) {
|
||||
pr_warn("map:%s invalid BTF value_type_size\n", map_name);
|
||||
return libbpf_err(value_size);
|
||||
}
|
||||
|
||||
if (expected_value_size != value_size) {
|
||||
pr_warn("map:%s btf_value_type_size:%u != map_def_value_size:%u\n",
|
||||
map_name, (__u32)value_size, expected_value_size);
|
||||
return libbpf_err(-EINVAL);
|
||||
}
|
||||
|
||||
*key_type_id = key->type;
|
||||
*value_type_id = value->type;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void btf_invalidate_raw_data(struct btf *btf)
|
||||
{
|
||||
if (btf->raw_data) {
|
||||
|
@ -2965,81 +2874,6 @@ const void *btf_ext__get_raw_data(const struct btf_ext *btf_ext, __u32 *size)
|
|||
return btf_ext->data;
|
||||
}
|
||||
|
||||
static int btf_ext_reloc_info(const struct btf *btf,
|
||||
const struct btf_ext_info *ext_info,
|
||||
const char *sec_name, __u32 insns_cnt,
|
||||
void **info, __u32 *cnt)
|
||||
{
|
||||
__u32 sec_hdrlen = sizeof(struct btf_ext_info_sec);
|
||||
__u32 i, record_size, existing_len, records_len;
|
||||
struct btf_ext_info_sec *sinfo;
|
||||
const char *info_sec_name;
|
||||
__u64 remain_len;
|
||||
void *data;
|
||||
|
||||
record_size = ext_info->rec_size;
|
||||
sinfo = ext_info->info;
|
||||
remain_len = ext_info->len;
|
||||
while (remain_len > 0) {
|
||||
records_len = sinfo->num_info * record_size;
|
||||
info_sec_name = btf__name_by_offset(btf, sinfo->sec_name_off);
|
||||
if (strcmp(info_sec_name, sec_name)) {
|
||||
remain_len -= sec_hdrlen + records_len;
|
||||
sinfo = (void *)sinfo + sec_hdrlen + records_len;
|
||||
continue;
|
||||
}
|
||||
|
||||
existing_len = (*cnt) * record_size;
|
||||
data = realloc(*info, existing_len + records_len);
|
||||
if (!data)
|
||||
return libbpf_err(-ENOMEM);
|
||||
|
||||
memcpy(data + existing_len, sinfo->data, records_len);
|
||||
/* adjust insn_off only, the rest data will be passed
|
||||
* to the kernel.
|
||||
*/
|
||||
for (i = 0; i < sinfo->num_info; i++) {
|
||||
__u32 *insn_off;
|
||||
|
||||
insn_off = data + existing_len + (i * record_size);
|
||||
*insn_off = *insn_off / sizeof(struct bpf_insn) + insns_cnt;
|
||||
}
|
||||
*info = data;
|
||||
*cnt += sinfo->num_info;
|
||||
return 0;
|
||||
}
|
||||
|
||||
return libbpf_err(-ENOENT);
|
||||
}
|
||||
|
||||
int btf_ext__reloc_func_info(const struct btf *btf,
|
||||
const struct btf_ext *btf_ext,
|
||||
const char *sec_name, __u32 insns_cnt,
|
||||
void **func_info, __u32 *cnt)
|
||||
{
|
||||
return btf_ext_reloc_info(btf, &btf_ext->func_info, sec_name,
|
||||
insns_cnt, func_info, cnt);
|
||||
}
|
||||
|
||||
int btf_ext__reloc_line_info(const struct btf *btf,
|
||||
const struct btf_ext *btf_ext,
|
||||
const char *sec_name, __u32 insns_cnt,
|
||||
void **line_info, __u32 *cnt)
|
||||
{
|
||||
return btf_ext_reloc_info(btf, &btf_ext->line_info, sec_name,
|
||||
insns_cnt, line_info, cnt);
|
||||
}
|
||||
|
||||
__u32 btf_ext__func_info_rec_size(const struct btf_ext *btf_ext)
|
||||
{
|
||||
return btf_ext->func_info.rec_size;
|
||||
}
|
||||
|
||||
__u32 btf_ext__line_info_rec_size(const struct btf_ext *btf_ext)
|
||||
{
|
||||
return btf_ext->line_info.rec_size;
|
||||
}
|
||||
|
||||
struct btf_dedup;
|
||||
|
||||
static struct btf_dedup *btf_dedup_new(struct btf *btf, const struct btf_dedup_opts *opts);
|
||||
|
@ -3189,9 +3023,7 @@ static int btf_dedup_remap_types(struct btf_dedup *d);
|
|||
* deduplicating structs/unions is described in greater details in comments for
|
||||
* `btf_dedup_is_equiv` function.
|
||||
*/
|
||||
|
||||
DEFAULT_VERSION(btf__dedup_v0_6_0, btf__dedup, LIBBPF_0.6.0)
|
||||
int btf__dedup_v0_6_0(struct btf *btf, const struct btf_dedup_opts *opts)
|
||||
int btf__dedup(struct btf *btf, const struct btf_dedup_opts *opts)
|
||||
{
|
||||
struct btf_dedup *d;
|
||||
int err;
|
||||
|
@ -3251,19 +3083,6 @@ done:
|
|||
return libbpf_err(err);
|
||||
}
|
||||
|
||||
COMPAT_VERSION(btf__dedup_deprecated, btf__dedup, LIBBPF_0.0.2)
|
||||
int btf__dedup_deprecated(struct btf *btf, struct btf_ext *btf_ext, const void *unused_opts)
|
||||
{
|
||||
LIBBPF_OPTS(btf_dedup_opts, opts, .btf_ext = btf_ext);
|
||||
|
||||
if (unused_opts) {
|
||||
pr_warn("please use new version of btf__dedup() that supports options\n");
|
||||
return libbpf_err(-ENOTSUP);
|
||||
}
|
||||
|
||||
return btf__dedup(btf, &opts);
|
||||
}
|
||||
|
||||
#define BTF_UNPROCESSED_ID ((__u32)-1)
|
||||
#define BTF_IN_PROGRESS_ID ((__u32)-2)
|
||||
|
||||
|
|
|
@ -120,20 +120,12 @@ LIBBPF_API struct btf *libbpf_find_kernel_btf(void);
|
|||
|
||||
LIBBPF_API struct btf *btf__load_from_kernel_by_id(__u32 id);
|
||||
LIBBPF_API struct btf *btf__load_from_kernel_by_id_split(__u32 id, struct btf *base_btf);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 6, "use btf__load_from_kernel_by_id instead")
|
||||
LIBBPF_API int btf__get_from_id(__u32 id, struct btf **btf);
|
||||
|
||||
LIBBPF_DEPRECATED_SINCE(0, 6, "intended for internal libbpf use only")
|
||||
LIBBPF_API int btf__finalize_data(struct bpf_object *obj, struct btf *btf);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 6, "use btf__load_into_kernel instead")
|
||||
LIBBPF_API int btf__load(struct btf *btf);
|
||||
LIBBPF_API int btf__load_into_kernel(struct btf *btf);
|
||||
LIBBPF_API __s32 btf__find_by_name(const struct btf *btf,
|
||||
const char *type_name);
|
||||
LIBBPF_API __s32 btf__find_by_name_kind(const struct btf *btf,
|
||||
const char *type_name, __u32 kind);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "use btf__type_cnt() instead; note that btf__get_nr_types() == btf__type_cnt() - 1")
|
||||
LIBBPF_API __u32 btf__get_nr_types(const struct btf *btf);
|
||||
LIBBPF_API __u32 btf__type_cnt(const struct btf *btf);
|
||||
LIBBPF_API const struct btf *btf__base_btf(const struct btf *btf);
|
||||
LIBBPF_API const struct btf_type *btf__type_by_id(const struct btf *btf,
|
||||
|
@ -150,29 +142,10 @@ LIBBPF_API void btf__set_fd(struct btf *btf, int fd);
|
|||
LIBBPF_API const void *btf__raw_data(const struct btf *btf, __u32 *size);
|
||||
LIBBPF_API const char *btf__name_by_offset(const struct btf *btf, __u32 offset);
|
||||
LIBBPF_API const char *btf__str_by_offset(const struct btf *btf, __u32 offset);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "this API is not necessary when BTF-defined maps are used")
|
||||
LIBBPF_API int btf__get_map_kv_tids(const struct btf *btf, const char *map_name,
|
||||
__u32 expected_key_size,
|
||||
__u32 expected_value_size,
|
||||
__u32 *key_type_id, __u32 *value_type_id);
|
||||
|
||||
LIBBPF_API struct btf_ext *btf_ext__new(const __u8 *data, __u32 size);
|
||||
LIBBPF_API void btf_ext__free(struct btf_ext *btf_ext);
|
||||
LIBBPF_API const void *btf_ext__raw_data(const struct btf_ext *btf_ext, __u32 *size);
|
||||
LIBBPF_API LIBBPF_DEPRECATED("btf_ext__reloc_func_info was never meant as a public API and has wrong assumptions embedded in it; it will be removed in the future libbpf versions")
|
||||
int btf_ext__reloc_func_info(const struct btf *btf,
|
||||
const struct btf_ext *btf_ext,
|
||||
const char *sec_name, __u32 insns_cnt,
|
||||
void **func_info, __u32 *cnt);
|
||||
LIBBPF_API LIBBPF_DEPRECATED("btf_ext__reloc_line_info was never meant as a public API and has wrong assumptions embedded in it; it will be removed in the future libbpf versions")
|
||||
int btf_ext__reloc_line_info(const struct btf *btf,
|
||||
const struct btf_ext *btf_ext,
|
||||
const char *sec_name, __u32 insns_cnt,
|
||||
void **line_info, __u32 *cnt);
|
||||
LIBBPF_API LIBBPF_DEPRECATED("btf_ext__reloc_func_info is deprecated; write custom func_info parsing to fetch rec_size")
|
||||
__u32 btf_ext__func_info_rec_size(const struct btf_ext *btf_ext);
|
||||
LIBBPF_API LIBBPF_DEPRECATED("btf_ext__reloc_line_info is deprecated; write custom line_info parsing to fetch rec_size")
|
||||
__u32 btf_ext__line_info_rec_size(const struct btf_ext *btf_ext);
|
||||
|
||||
LIBBPF_API int btf__find_str(struct btf *btf, const char *s);
|
||||
LIBBPF_API int btf__add_str(struct btf *btf, const char *s);
|
||||
|
@ -259,22 +232,12 @@ struct btf_dedup_opts {
|
|||
|
||||
LIBBPF_API int btf__dedup(struct btf *btf, const struct btf_dedup_opts *opts);
|
||||
|
||||
LIBBPF_API int btf__dedup_v0_6_0(struct btf *btf, const struct btf_dedup_opts *opts);
|
||||
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "use btf__dedup() instead")
|
||||
LIBBPF_API int btf__dedup_deprecated(struct btf *btf, struct btf_ext *btf_ext, const void *opts);
|
||||
#define btf__dedup(...) ___libbpf_overload(___btf_dedup, __VA_ARGS__)
|
||||
#define ___btf_dedup3(btf, btf_ext, opts) btf__dedup_deprecated(btf, btf_ext, opts)
|
||||
#define ___btf_dedup2(btf, opts) btf__dedup(btf, opts)
|
||||
|
||||
struct btf_dump;
|
||||
|
||||
struct btf_dump_opts {
|
||||
union {
|
||||
size_t sz;
|
||||
void *ctx; /* DEPRECATED: will be gone in v1.0 */
|
||||
};
|
||||
size_t sz;
|
||||
};
|
||||
#define btf_dump_opts__last_field sz
|
||||
|
||||
typedef void (*btf_dump_printf_fn_t)(void *ctx, const char *fmt, va_list args);
|
||||
|
||||
|
@ -283,51 +246,6 @@ LIBBPF_API struct btf_dump *btf_dump__new(const struct btf *btf,
|
|||
void *ctx,
|
||||
const struct btf_dump_opts *opts);
|
||||
|
||||
LIBBPF_API struct btf_dump *btf_dump__new_v0_6_0(const struct btf *btf,
|
||||
btf_dump_printf_fn_t printf_fn,
|
||||
void *ctx,
|
||||
const struct btf_dump_opts *opts);
|
||||
|
||||
LIBBPF_API struct btf_dump *btf_dump__new_deprecated(const struct btf *btf,
|
||||
const struct btf_ext *btf_ext,
|
||||
const struct btf_dump_opts *opts,
|
||||
btf_dump_printf_fn_t printf_fn);
|
||||
|
||||
/* Choose either btf_dump__new() or btf_dump__new_deprecated() based on the
|
||||
* type of 4th argument. If it's btf_dump's print callback, use deprecated
|
||||
* API; otherwise, choose the new btf_dump__new(). ___libbpf_override()
|
||||
* doesn't work here because both variants have 4 input arguments.
|
||||
*
|
||||
* (void *) casts are necessary to avoid compilation warnings about type
|
||||
* mismatches, because even though __builtin_choose_expr() only ever evaluates
|
||||
* one side the other side still has to satisfy type constraints (this is
|
||||
* compiler implementation limitation which might be lifted eventually,
|
||||
* according to the documentation). So passing struct btf_ext in place of
|
||||
* btf_dump_printf_fn_t would be generating compilation warning. Casting to
|
||||
* void * avoids this issue.
|
||||
*
|
||||
* Also, two type compatibility checks for a function and function pointer are
|
||||
* required because passing function reference into btf_dump__new() as
|
||||
* btf_dump__new(..., my_callback, ...) and as btf_dump__new(...,
|
||||
* &my_callback, ...) (not explicit ampersand in the latter case) actually
|
||||
* differs as far as __builtin_types_compatible_p() is concerned. Thus two
|
||||
* checks are combined to detect callback argument.
|
||||
*
|
||||
* The rest works just like in case of ___libbpf_override() usage with symbol
|
||||
* versioning.
|
||||
*
|
||||
* C++ compilers don't support __builtin_types_compatible_p(), so at least
|
||||
* don't screw up compilation for them and let C++ users pick btf_dump__new
|
||||
* vs btf_dump__new_deprecated explicitly.
|
||||
*/
|
||||
#ifndef __cplusplus
|
||||
#define btf_dump__new(a1, a2, a3, a4) __builtin_choose_expr( \
|
||||
__builtin_types_compatible_p(typeof(a4), btf_dump_printf_fn_t) || \
|
||||
__builtin_types_compatible_p(typeof(a4), void(void *, const char *, va_list)), \
|
||||
btf_dump__new_deprecated((void *)a1, (void *)a2, (void *)a3, (void *)a4), \
|
||||
btf_dump__new((void *)a1, (void *)a2, (void *)a3, (void *)a4))
|
||||
#endif
|
||||
|
||||
LIBBPF_API void btf_dump__free(struct btf_dump *d);
|
||||
|
||||
LIBBPF_API int btf_dump__dump_type(struct btf_dump *d, __u32 id);
|
||||
|
|
|
@ -144,15 +144,17 @@ static void btf_dump_printf(const struct btf_dump *d, const char *fmt, ...)
|
|||
static int btf_dump_mark_referenced(struct btf_dump *d);
|
||||
static int btf_dump_resize(struct btf_dump *d);
|
||||
|
||||
DEFAULT_VERSION(btf_dump__new_v0_6_0, btf_dump__new, LIBBPF_0.6.0)
|
||||
struct btf_dump *btf_dump__new_v0_6_0(const struct btf *btf,
|
||||
btf_dump_printf_fn_t printf_fn,
|
||||
void *ctx,
|
||||
const struct btf_dump_opts *opts)
|
||||
struct btf_dump *btf_dump__new(const struct btf *btf,
|
||||
btf_dump_printf_fn_t printf_fn,
|
||||
void *ctx,
|
||||
const struct btf_dump_opts *opts)
|
||||
{
|
||||
struct btf_dump *d;
|
||||
int err;
|
||||
|
||||
if (!OPTS_VALID(opts, btf_dump_opts))
|
||||
return libbpf_err_ptr(-EINVAL);
|
||||
|
||||
if (!printf_fn)
|
||||
return libbpf_err_ptr(-EINVAL);
|
||||
|
||||
|
@ -188,17 +190,6 @@ err:
|
|||
return libbpf_err_ptr(err);
|
||||
}
|
||||
|
||||
COMPAT_VERSION(btf_dump__new_deprecated, btf_dump__new, LIBBPF_0.0.4)
|
||||
struct btf_dump *btf_dump__new_deprecated(const struct btf *btf,
|
||||
const struct btf_ext *btf_ext,
|
||||
const struct btf_dump_opts *opts,
|
||||
btf_dump_printf_fn_t printf_fn)
|
||||
{
|
||||
if (!printf_fn)
|
||||
return libbpf_err_ptr(-EINVAL);
|
||||
return btf_dump__new_v0_6_0(btf, printf_fn, opts ? opts->ctx : NULL, opts);
|
||||
}
|
||||
|
||||
static int btf_dump_resize(struct btf_dump *d)
|
||||
{
|
||||
int err, last_id = btf__type_cnt(d->btf) - 1;
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -101,11 +101,6 @@ LIBBPF_API libbpf_print_fn_t libbpf_set_print(libbpf_print_fn_t fn);
|
|||
/* Hide internal to user */
|
||||
struct bpf_object;
|
||||
|
||||
struct bpf_object_open_attr {
|
||||
const char *file;
|
||||
enum bpf_prog_type prog_type;
|
||||
};
|
||||
|
||||
struct bpf_object_open_opts {
|
||||
/* size of this struct, for forward/backward compatibility */
|
||||
size_t sz;
|
||||
|
@ -118,21 +113,12 @@ struct bpf_object_open_opts {
|
|||
const char *object_name;
|
||||
/* parse map definitions non-strictly, allowing extra attributes/data */
|
||||
bool relaxed_maps;
|
||||
/* DEPRECATED: handle CO-RE relocations non-strictly, allowing failures.
|
||||
* Value is ignored. Relocations always are processed non-strictly.
|
||||
* Non-relocatable instructions are replaced with invalid ones to
|
||||
* prevent accidental errors.
|
||||
* */
|
||||
LIBBPF_DEPRECATED_SINCE(0, 6, "field has no effect")
|
||||
bool relaxed_core_relocs;
|
||||
/* maps that set the 'pinning' attribute in their definition will have
|
||||
* their pin_path attribute set to a file in this directory, and be
|
||||
* auto-pinned to that path on load; defaults to "/sys/fs/bpf".
|
||||
*/
|
||||
const char *pin_root_path;
|
||||
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_program__set_attach_target() on each individual bpf_program")
|
||||
__u32 attach_prog_fd;
|
||||
long :0;
|
||||
/* Additional kernel config content that augments and overrides
|
||||
* system Kconfig for CONFIG_xxx externs.
|
||||
*/
|
||||
|
@ -215,20 +201,10 @@ LIBBPF_API struct bpf_object *
|
|||
bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz,
|
||||
const struct bpf_object_open_opts *opts);
|
||||
|
||||
/* deprecated bpf_object__open variants */
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_object__open_mem() instead")
|
||||
LIBBPF_API struct bpf_object *
|
||||
bpf_object__open_buffer(const void *obj_buf, size_t obj_buf_sz,
|
||||
const char *name);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_object__open_file() instead")
|
||||
LIBBPF_API struct bpf_object *
|
||||
bpf_object__open_xattr(struct bpf_object_open_attr *attr);
|
||||
/* Load/unload object into/from kernel */
|
||||
LIBBPF_API int bpf_object__load(struct bpf_object *obj);
|
||||
|
||||
enum libbpf_pin_type {
|
||||
LIBBPF_PIN_NONE,
|
||||
/* PIN_BY_NAME: pin maps by name (in /sys/fs/bpf by default) */
|
||||
LIBBPF_PIN_BY_NAME,
|
||||
};
|
||||
LIBBPF_API void bpf_object__close(struct bpf_object *object);
|
||||
|
||||
/* pin_maps and unpin_maps can both be called with a NULL path, in which case
|
||||
* they will use the pin_path attribute of each map (and ignore all maps that
|
||||
|
@ -242,20 +218,6 @@ LIBBPF_API int bpf_object__pin_programs(struct bpf_object *obj,
|
|||
LIBBPF_API int bpf_object__unpin_programs(struct bpf_object *obj,
|
||||
const char *path);
|
||||
LIBBPF_API int bpf_object__pin(struct bpf_object *object, const char *path);
|
||||
LIBBPF_API void bpf_object__close(struct bpf_object *object);
|
||||
|
||||
struct bpf_object_load_attr {
|
||||
struct bpf_object *obj;
|
||||
int log_level;
|
||||
const char *target_btf_path;
|
||||
};
|
||||
|
||||
/* Load/unload object into/from kernel */
|
||||
LIBBPF_API int bpf_object__load(struct bpf_object *obj);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_object__load() instead")
|
||||
LIBBPF_API int bpf_object__load_xattr(struct bpf_object_load_attr *attr);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 6, "bpf_object__unload() is deprecated, use bpf_object__close() instead")
|
||||
LIBBPF_API int bpf_object__unload(struct bpf_object *obj);
|
||||
|
||||
LIBBPF_API const char *bpf_object__name(const struct bpf_object *obj);
|
||||
LIBBPF_API unsigned int bpf_object__kversion(const struct bpf_object *obj);
|
||||
|
@ -265,29 +227,10 @@ struct btf;
|
|||
LIBBPF_API struct btf *bpf_object__btf(const struct bpf_object *obj);
|
||||
LIBBPF_API int bpf_object__btf_fd(const struct bpf_object *obj);
|
||||
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_object__find_program_by_name() instead")
|
||||
LIBBPF_API struct bpf_program *
|
||||
bpf_object__find_program_by_title(const struct bpf_object *obj,
|
||||
const char *title);
|
||||
LIBBPF_API struct bpf_program *
|
||||
bpf_object__find_program_by_name(const struct bpf_object *obj,
|
||||
const char *name);
|
||||
|
||||
LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "track bpf_objects in application code instead")
|
||||
struct bpf_object *bpf_object__next(struct bpf_object *prev);
|
||||
#define bpf_object__for_each_safe(pos, tmp) \
|
||||
for ((pos) = bpf_object__next(NULL), \
|
||||
(tmp) = bpf_object__next(pos); \
|
||||
(pos) != NULL; \
|
||||
(pos) = (tmp), (tmp) = bpf_object__next(tmp))
|
||||
|
||||
typedef void (*bpf_object_clear_priv_t)(struct bpf_object *, void *);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "storage via set_priv/priv is deprecated")
|
||||
LIBBPF_API int bpf_object__set_priv(struct bpf_object *obj, void *priv,
|
||||
bpf_object_clear_priv_t clear_priv);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "storage via set_priv/priv is deprecated")
|
||||
LIBBPF_API void *bpf_object__priv(const struct bpf_object *prog);
|
||||
|
||||
LIBBPF_API int
|
||||
libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
|
||||
enum bpf_attach_type *expected_attach_type);
|
||||
|
@ -298,9 +241,7 @@ LIBBPF_API int libbpf_find_vmlinux_btf_id(const char *name,
|
|||
|
||||
/* Accessors of bpf_program */
|
||||
struct bpf_program;
|
||||
LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_object__next_program() instead")
|
||||
struct bpf_program *bpf_program__next(struct bpf_program *prog,
|
||||
const struct bpf_object *obj);
|
||||
|
||||
LIBBPF_API struct bpf_program *
|
||||
bpf_object__next_program(const struct bpf_object *obj, struct bpf_program *prog);
|
||||
|
||||
|
@ -309,33 +250,17 @@ bpf_object__next_program(const struct bpf_object *obj, struct bpf_program *prog)
|
|||
(pos) != NULL; \
|
||||
(pos) = bpf_object__next_program((obj), (pos)))
|
||||
|
||||
LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_object__prev_program() instead")
|
||||
struct bpf_program *bpf_program__prev(struct bpf_program *prog,
|
||||
const struct bpf_object *obj);
|
||||
LIBBPF_API struct bpf_program *
|
||||
bpf_object__prev_program(const struct bpf_object *obj, struct bpf_program *prog);
|
||||
|
||||
typedef void (*bpf_program_clear_priv_t)(struct bpf_program *, void *);
|
||||
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "storage via set_priv/priv is deprecated")
|
||||
LIBBPF_API int bpf_program__set_priv(struct bpf_program *prog, void *priv,
|
||||
bpf_program_clear_priv_t clear_priv);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "storage via set_priv/priv is deprecated")
|
||||
LIBBPF_API void *bpf_program__priv(const struct bpf_program *prog);
|
||||
LIBBPF_API void bpf_program__set_ifindex(struct bpf_program *prog,
|
||||
__u32 ifindex);
|
||||
|
||||
LIBBPF_API const char *bpf_program__name(const struct bpf_program *prog);
|
||||
LIBBPF_API const char *bpf_program__section_name(const struct bpf_program *prog);
|
||||
LIBBPF_API LIBBPF_DEPRECATED("BPF program title is confusing term; please use bpf_program__section_name() instead")
|
||||
const char *bpf_program__title(const struct bpf_program *prog, bool needs_copy);
|
||||
LIBBPF_API bool bpf_program__autoload(const struct bpf_program *prog);
|
||||
LIBBPF_API int bpf_program__set_autoload(struct bpf_program *prog, bool autoload);
|
||||
|
||||
/* returns program size in bytes */
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_program__insn_cnt() instead")
|
||||
LIBBPF_API size_t bpf_program__size(const struct bpf_program *prog);
|
||||
|
||||
struct bpf_insn;
|
||||
|
||||
/**
|
||||
|
@ -388,17 +313,7 @@ LIBBPF_API int bpf_program__set_insns(struct bpf_program *prog,
|
|||
*/
|
||||
LIBBPF_API size_t bpf_program__insn_cnt(const struct bpf_program *prog);
|
||||
|
||||
LIBBPF_DEPRECATED_SINCE(0, 6, "use bpf_object__load() instead")
|
||||
LIBBPF_API int bpf_program__load(struct bpf_program *prog, const char *license, __u32 kern_version);
|
||||
LIBBPF_API int bpf_program__fd(const struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "multi-instance bpf_program support is deprecated")
|
||||
LIBBPF_API int bpf_program__pin_instance(struct bpf_program *prog,
|
||||
const char *path,
|
||||
int instance);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "multi-instance bpf_program support is deprecated")
|
||||
LIBBPF_API int bpf_program__unpin_instance(struct bpf_program *prog,
|
||||
const char *path,
|
||||
int instance);
|
||||
|
||||
/**
|
||||
* @brief **bpf_program__pin()** pins the BPF program to a file
|
||||
|
@ -698,99 +613,6 @@ LIBBPF_API struct bpf_link *
|
|||
bpf_program__attach_iter(const struct bpf_program *prog,
|
||||
const struct bpf_iter_attach_opts *opts);
|
||||
|
||||
/*
|
||||
* Libbpf allows callers to adjust BPF programs before being loaded
|
||||
* into kernel. One program in an object file can be transformed into
|
||||
* multiple variants to be attached to different hooks.
|
||||
*
|
||||
* bpf_program_prep_t, bpf_program__set_prep and bpf_program__nth_fd
|
||||
* form an API for this purpose.
|
||||
*
|
||||
* - bpf_program_prep_t:
|
||||
* Defines a 'preprocessor', which is a caller defined function
|
||||
* passed to libbpf through bpf_program__set_prep(), and will be
|
||||
* called before program is loaded. The processor should adjust
|
||||
* the program one time for each instance according to the instance id
|
||||
* passed to it.
|
||||
*
|
||||
* - bpf_program__set_prep:
|
||||
* Attaches a preprocessor to a BPF program. The number of instances
|
||||
* that should be created is also passed through this function.
|
||||
*
|
||||
* - bpf_program__nth_fd:
|
||||
* After the program is loaded, get resulting FD of a given instance
|
||||
* of the BPF program.
|
||||
*
|
||||
* If bpf_program__set_prep() is not used, the program would be loaded
|
||||
* without adjustment during bpf_object__load(). The program has only
|
||||
* one instance. In this case bpf_program__fd(prog) is equal to
|
||||
* bpf_program__nth_fd(prog, 0).
|
||||
*/
|
||||
struct bpf_prog_prep_result {
|
||||
/*
|
||||
* If not NULL, load new instruction array.
|
||||
* If set to NULL, don't load this instance.
|
||||
*/
|
||||
struct bpf_insn *new_insn_ptr;
|
||||
int new_insn_cnt;
|
||||
|
||||
/* If not NULL, result FD is written to it. */
|
||||
int *pfd;
|
||||
};
|
||||
|
||||
/*
|
||||
* Parameters of bpf_program_prep_t:
|
||||
* - prog: The bpf_program being loaded.
|
||||
* - n: Index of instance being generated.
|
||||
* - insns: BPF instructions array.
|
||||
* - insns_cnt:Number of instructions in insns.
|
||||
* - res: Output parameter, result of transformation.
|
||||
*
|
||||
* Return value:
|
||||
* - Zero: pre-processing success.
|
||||
* - Non-zero: pre-processing error, stop loading.
|
||||
*/
|
||||
typedef int (*bpf_program_prep_t)(struct bpf_program *prog, int n,
|
||||
struct bpf_insn *insns, int insns_cnt,
|
||||
struct bpf_prog_prep_result *res);
|
||||
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_program__insns() for getting bpf_program instructions")
|
||||
LIBBPF_API int bpf_program__set_prep(struct bpf_program *prog, int nr_instance,
|
||||
bpf_program_prep_t prep);
|
||||
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "multi-instance bpf_program support is deprecated")
|
||||
LIBBPF_API int bpf_program__nth_fd(const struct bpf_program *prog, int n);
|
||||
|
||||
/*
|
||||
* Adjust type of BPF program. Default is kprobe.
|
||||
*/
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead")
|
||||
LIBBPF_API int bpf_program__set_socket_filter(struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead")
|
||||
LIBBPF_API int bpf_program__set_tracepoint(struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead")
|
||||
LIBBPF_API int bpf_program__set_raw_tracepoint(struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead")
|
||||
LIBBPF_API int bpf_program__set_kprobe(struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead")
|
||||
LIBBPF_API int bpf_program__set_lsm(struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead")
|
||||
LIBBPF_API int bpf_program__set_sched_cls(struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead")
|
||||
LIBBPF_API int bpf_program__set_sched_act(struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead")
|
||||
LIBBPF_API int bpf_program__set_xdp(struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead")
|
||||
LIBBPF_API int bpf_program__set_perf_event(struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead")
|
||||
LIBBPF_API int bpf_program__set_tracing(struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead")
|
||||
LIBBPF_API int bpf_program__set_struct_ops(struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead")
|
||||
LIBBPF_API int bpf_program__set_extension(struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead")
|
||||
LIBBPF_API int bpf_program__set_sk_lookup(struct bpf_program *prog);
|
||||
|
||||
LIBBPF_API enum bpf_prog_type bpf_program__type(const struct bpf_program *prog);
|
||||
|
||||
/**
|
||||
|
@ -853,47 +675,6 @@ LIBBPF_API int
|
|||
bpf_program__set_attach_target(struct bpf_program *prog, int attach_prog_fd,
|
||||
const char *attach_func_name);
|
||||
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead")
|
||||
LIBBPF_API bool bpf_program__is_socket_filter(const struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead")
|
||||
LIBBPF_API bool bpf_program__is_tracepoint(const struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead")
|
||||
LIBBPF_API bool bpf_program__is_raw_tracepoint(const struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead")
|
||||
LIBBPF_API bool bpf_program__is_kprobe(const struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead")
|
||||
LIBBPF_API bool bpf_program__is_lsm(const struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead")
|
||||
LIBBPF_API bool bpf_program__is_sched_cls(const struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead")
|
||||
LIBBPF_API bool bpf_program__is_sched_act(const struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead")
|
||||
LIBBPF_API bool bpf_program__is_xdp(const struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead")
|
||||
LIBBPF_API bool bpf_program__is_perf_event(const struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead")
|
||||
LIBBPF_API bool bpf_program__is_tracing(const struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead")
|
||||
LIBBPF_API bool bpf_program__is_struct_ops(const struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead")
|
||||
LIBBPF_API bool bpf_program__is_extension(const struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead")
|
||||
LIBBPF_API bool bpf_program__is_sk_lookup(const struct bpf_program *prog);
|
||||
|
||||
/*
|
||||
* No need for __attribute__((packed)), all members of 'bpf_map_def'
|
||||
* are all aligned. In addition, using __attribute__((packed))
|
||||
* would trigger a -Wpacked warning message, and lead to an error
|
||||
* if -Werror is set.
|
||||
*/
|
||||
struct bpf_map_def {
|
||||
unsigned int type;
|
||||
unsigned int key_size;
|
||||
unsigned int value_size;
|
||||
unsigned int max_entries;
|
||||
unsigned int map_flags;
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief **bpf_object__find_map_by_name()** returns BPF map of
|
||||
* the given name, if it exists within the passed BPF object
|
||||
|
@ -908,16 +689,6 @@ bpf_object__find_map_by_name(const struct bpf_object *obj, const char *name);
|
|||
LIBBPF_API int
|
||||
bpf_object__find_map_fd_by_name(const struct bpf_object *obj, const char *name);
|
||||
|
||||
/*
|
||||
* Get bpf_map through the offset of corresponding struct bpf_map_def
|
||||
* in the BPF object file.
|
||||
*/
|
||||
LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_object__find_map_by_name() instead")
|
||||
struct bpf_map *
|
||||
bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset);
|
||||
|
||||
LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_object__next_map() instead")
|
||||
struct bpf_map *bpf_map__next(const struct bpf_map *map, const struct bpf_object *obj);
|
||||
LIBBPF_API struct bpf_map *
|
||||
bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *map);
|
||||
|
||||
|
@ -927,8 +698,6 @@ bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *map);
|
|||
(pos) = bpf_object__next_map((obj), (pos)))
|
||||
#define bpf_map__for_each bpf_object__for_each_map
|
||||
|
||||
LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_object__prev_map() instead")
|
||||
struct bpf_map *bpf_map__prev(const struct bpf_map *map, const struct bpf_object *obj);
|
||||
LIBBPF_API struct bpf_map *
|
||||
bpf_object__prev_map(const struct bpf_object *obj, const struct bpf_map *map);
|
||||
|
||||
|
@ -962,9 +731,6 @@ LIBBPF_API bool bpf_map__autocreate(const struct bpf_map *map);
|
|||
*/
|
||||
LIBBPF_API int bpf_map__fd(const struct bpf_map *map);
|
||||
LIBBPF_API int bpf_map__reuse_fd(struct bpf_map *map, int fd);
|
||||
/* get map definition */
|
||||
LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 8, "use appropriate getters or setters instead")
|
||||
const struct bpf_map_def *bpf_map__def(const struct bpf_map *map);
|
||||
/* get map name */
|
||||
LIBBPF_API const char *bpf_map__name(const struct bpf_map *map);
|
||||
/* get/set map type */
|
||||
|
@ -973,8 +739,6 @@ LIBBPF_API int bpf_map__set_type(struct bpf_map *map, enum bpf_map_type type);
|
|||
/* get/set map size (max_entries) */
|
||||
LIBBPF_API __u32 bpf_map__max_entries(const struct bpf_map *map);
|
||||
LIBBPF_API int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_map__set_max_entries() instead")
|
||||
LIBBPF_API int bpf_map__resize(struct bpf_map *map, __u32 max_entries);
|
||||
/* get/set map flags */
|
||||
LIBBPF_API __u32 bpf_map__map_flags(const struct bpf_map *map);
|
||||
LIBBPF_API int bpf_map__set_map_flags(struct bpf_map *map, __u32 flags);
|
||||
|
@ -997,17 +761,9 @@ LIBBPF_API int bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex);
|
|||
LIBBPF_API __u64 bpf_map__map_extra(const struct bpf_map *map);
|
||||
LIBBPF_API int bpf_map__set_map_extra(struct bpf_map *map, __u64 map_extra);
|
||||
|
||||
typedef void (*bpf_map_clear_priv_t)(struct bpf_map *, void *);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "storage via set_priv/priv is deprecated")
|
||||
LIBBPF_API int bpf_map__set_priv(struct bpf_map *map, void *priv,
|
||||
bpf_map_clear_priv_t clear_priv);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "storage via set_priv/priv is deprecated")
|
||||
LIBBPF_API void *bpf_map__priv(const struct bpf_map *map);
|
||||
LIBBPF_API int bpf_map__set_initial_value(struct bpf_map *map,
|
||||
const void *data, size_t size);
|
||||
LIBBPF_API const void *bpf_map__initial_value(struct bpf_map *map, size_t *psize);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_map__type() instead")
|
||||
LIBBPF_API bool bpf_map__is_offload_neutral(const struct bpf_map *map);
|
||||
|
||||
/**
|
||||
* @brief **bpf_map__is_internal()** tells the caller whether or not the
|
||||
|
@ -1130,65 +886,6 @@ LIBBPF_API int bpf_map__lookup_and_delete_elem(const struct bpf_map *map,
|
|||
LIBBPF_API int bpf_map__get_next_key(const struct bpf_map *map,
|
||||
const void *cur_key, void *next_key, size_t key_sz);
|
||||
|
||||
/**
|
||||
* @brief **libbpf_get_error()** extracts the error code from the passed
|
||||
* pointer
|
||||
* @param ptr pointer returned from libbpf API function
|
||||
* @return error code; or 0 if no error occured
|
||||
*
|
||||
* Many libbpf API functions which return pointers have logic to encode error
|
||||
* codes as pointers, and do not return NULL. Meaning **libbpf_get_error()**
|
||||
* should be used on the return value from these functions immediately after
|
||||
* calling the API function, with no intervening calls that could clobber the
|
||||
* `errno` variable. Consult the individual functions documentation to verify
|
||||
* if this logic applies should be used.
|
||||
*
|
||||
* For these API functions, if `libbpf_set_strict_mode(LIBBPF_STRICT_CLEAN_PTRS)`
|
||||
* is enabled, NULL is returned on error instead.
|
||||
*
|
||||
* If ptr is NULL, then errno should be already set by the failing
|
||||
* API, because libbpf never returns NULL on success and it now always
|
||||
* sets errno on error.
|
||||
*
|
||||
* Example usage:
|
||||
*
|
||||
* struct perf_buffer *pb;
|
||||
*
|
||||
* pb = perf_buffer__new(bpf_map__fd(obj->maps.events), PERF_BUFFER_PAGES, &opts);
|
||||
* err = libbpf_get_error(pb);
|
||||
* if (err) {
|
||||
* pb = NULL;
|
||||
* fprintf(stderr, "failed to open perf buffer: %d\n", err);
|
||||
* goto cleanup;
|
||||
* }
|
||||
*/
|
||||
LIBBPF_API long libbpf_get_error(const void *ptr);
|
||||
|
||||
struct bpf_prog_load_attr {
|
||||
const char *file;
|
||||
enum bpf_prog_type prog_type;
|
||||
enum bpf_attach_type expected_attach_type;
|
||||
int ifindex;
|
||||
int log_level;
|
||||
int prog_flags;
|
||||
};
|
||||
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_object__open() and bpf_object__load() instead")
|
||||
LIBBPF_API int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
|
||||
struct bpf_object **pobj, int *prog_fd);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_object__open() and bpf_object__load() instead")
|
||||
LIBBPF_API int bpf_prog_load_deprecated(const char *file, enum bpf_prog_type type,
|
||||
struct bpf_object **pobj, int *prog_fd);
|
||||
|
||||
/* XDP related API */
|
||||
struct xdp_link_info {
|
||||
__u32 prog_id;
|
||||
__u32 drv_prog_id;
|
||||
__u32 hw_prog_id;
|
||||
__u32 skb_prog_id;
|
||||
__u8 attach_mode;
|
||||
};
|
||||
|
||||
struct bpf_xdp_set_link_opts {
|
||||
size_t sz;
|
||||
int old_fd;
|
||||
|
@ -1196,17 +893,6 @@ struct bpf_xdp_set_link_opts {
|
|||
};
|
||||
#define bpf_xdp_set_link_opts__last_field old_fd
|
||||
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_xdp_attach() instead")
|
||||
LIBBPF_API int bpf_set_link_xdp_fd(int ifindex, int fd, __u32 flags);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_xdp_attach() instead")
|
||||
LIBBPF_API int bpf_set_link_xdp_fd_opts(int ifindex, int fd, __u32 flags,
|
||||
const struct bpf_xdp_set_link_opts *opts);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_xdp_query_id() instead")
|
||||
LIBBPF_API int bpf_get_link_xdp_id(int ifindex, __u32 *prog_id, __u32 flags);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_xdp_query() instead")
|
||||
LIBBPF_API int bpf_get_link_xdp_info(int ifindex, struct xdp_link_info *info,
|
||||
size_t info_size, __u32 flags);
|
||||
|
||||
struct bpf_xdp_attach_opts {
|
||||
size_t sz;
|
||||
int old_prog_fd;
|
||||
|
@ -1305,17 +991,7 @@ typedef void (*perf_buffer_lost_fn)(void *ctx, int cpu, __u64 cnt);
|
|||
|
||||
/* common use perf buffer options */
|
||||
struct perf_buffer_opts {
|
||||
union {
|
||||
size_t sz;
|
||||
struct { /* DEPRECATED: will be removed in v1.0 */
|
||||
/* if specified, sample_cb is called for each sample */
|
||||
perf_buffer_sample_fn sample_cb;
|
||||
/* if specified, lost_cb is called for each batch of lost samples */
|
||||
perf_buffer_lost_fn lost_cb;
|
||||
/* ctx is provided to sample_cb and lost_cb */
|
||||
void *ctx;
|
||||
};
|
||||
};
|
||||
size_t sz;
|
||||
};
|
||||
#define perf_buffer_opts__last_field sz
|
||||
|
||||
|
@ -1336,21 +1012,6 @@ perf_buffer__new(int map_fd, size_t page_cnt,
|
|||
perf_buffer_sample_fn sample_cb, perf_buffer_lost_fn lost_cb, void *ctx,
|
||||
const struct perf_buffer_opts *opts);
|
||||
|
||||
LIBBPF_API struct perf_buffer *
|
||||
perf_buffer__new_v0_6_0(int map_fd, size_t page_cnt,
|
||||
perf_buffer_sample_fn sample_cb, perf_buffer_lost_fn lost_cb, void *ctx,
|
||||
const struct perf_buffer_opts *opts);
|
||||
|
||||
LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "use new variant of perf_buffer__new() instead")
|
||||
struct perf_buffer *perf_buffer__new_deprecated(int map_fd, size_t page_cnt,
|
||||
const struct perf_buffer_opts *opts);
|
||||
|
||||
#define perf_buffer__new(...) ___libbpf_overload(___perf_buffer_new, __VA_ARGS__)
|
||||
#define ___perf_buffer_new6(map_fd, page_cnt, sample_cb, lost_cb, ctx, opts) \
|
||||
perf_buffer__new(map_fd, page_cnt, sample_cb, lost_cb, ctx, opts)
|
||||
#define ___perf_buffer_new3(map_fd, page_cnt, opts) \
|
||||
perf_buffer__new_deprecated(map_fd, page_cnt, opts)
|
||||
|
||||
enum bpf_perf_event_ret {
|
||||
LIBBPF_PERF_EVENT_DONE = 0,
|
||||
LIBBPF_PERF_EVENT_ERROR = -1,
|
||||
|
@ -1364,21 +1025,9 @@ typedef enum bpf_perf_event_ret
|
|||
|
||||
/* raw perf buffer options, giving most power and control */
|
||||
struct perf_buffer_raw_opts {
|
||||
union {
|
||||
struct {
|
||||
size_t sz;
|
||||
long :0;
|
||||
long :0;
|
||||
};
|
||||
struct { /* DEPRECATED: will be removed in v1.0 */
|
||||
/* perf event attrs passed directly into perf_event_open() */
|
||||
struct perf_event_attr *attr;
|
||||
/* raw event callback */
|
||||
perf_buffer_event_fn event_cb;
|
||||
/* ctx is provided to event_cb */
|
||||
void *ctx;
|
||||
};
|
||||
};
|
||||
size_t sz;
|
||||
long :0;
|
||||
long :0;
|
||||
/* if cpu_cnt == 0, open all on all possible CPUs (up to the number of
|
||||
* max_entries of given PERF_EVENT_ARRAY map)
|
||||
*/
|
||||
|
@ -1390,26 +1039,13 @@ struct perf_buffer_raw_opts {
|
|||
};
|
||||
#define perf_buffer_raw_opts__last_field map_keys
|
||||
|
||||
struct perf_event_attr;
|
||||
|
||||
LIBBPF_API struct perf_buffer *
|
||||
perf_buffer__new_raw(int map_fd, size_t page_cnt, struct perf_event_attr *attr,
|
||||
perf_buffer_event_fn event_cb, void *ctx,
|
||||
const struct perf_buffer_raw_opts *opts);
|
||||
|
||||
LIBBPF_API struct perf_buffer *
|
||||
perf_buffer__new_raw_v0_6_0(int map_fd, size_t page_cnt, struct perf_event_attr *attr,
|
||||
perf_buffer_event_fn event_cb, void *ctx,
|
||||
const struct perf_buffer_raw_opts *opts);
|
||||
|
||||
LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "use new variant of perf_buffer__new_raw() instead")
|
||||
struct perf_buffer *perf_buffer__new_raw_deprecated(int map_fd, size_t page_cnt,
|
||||
const struct perf_buffer_raw_opts *opts);
|
||||
|
||||
#define perf_buffer__new_raw(...) ___libbpf_overload(___perf_buffer_new_raw, __VA_ARGS__)
|
||||
#define ___perf_buffer_new_raw6(map_fd, page_cnt, attr, event_cb, ctx, opts) \
|
||||
perf_buffer__new_raw(map_fd, page_cnt, attr, event_cb, ctx, opts)
|
||||
#define ___perf_buffer_new_raw3(map_fd, page_cnt, opts) \
|
||||
perf_buffer__new_raw_deprecated(map_fd, page_cnt, opts)
|
||||
|
||||
LIBBPF_API void perf_buffer__free(struct perf_buffer *pb);
|
||||
LIBBPF_API int perf_buffer__epoll_fd(const struct perf_buffer *pb);
|
||||
LIBBPF_API int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms);
|
||||
|
@ -1418,15 +1054,6 @@ LIBBPF_API int perf_buffer__consume_buffer(struct perf_buffer *pb, size_t buf_id
|
|||
LIBBPF_API size_t perf_buffer__buffer_cnt(const struct perf_buffer *pb);
|
||||
LIBBPF_API int perf_buffer__buffer_fd(const struct perf_buffer *pb, size_t buf_idx);
|
||||
|
||||
typedef enum bpf_perf_event_ret
|
||||
(*bpf_perf_event_print_t)(struct perf_event_header *hdr,
|
||||
void *private_data);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use perf_buffer__poll() or perf_buffer__consume() instead")
|
||||
LIBBPF_API enum bpf_perf_event_ret
|
||||
bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
|
||||
void **copy_mem, size_t *copy_size,
|
||||
bpf_perf_event_print_t fn, void *private_data);
|
||||
|
||||
struct bpf_prog_linfo;
|
||||
struct bpf_prog_info;
|
||||
|
||||
|
@ -1448,14 +1075,6 @@ bpf_prog_linfo__lfind(const struct bpf_prog_linfo *prog_linfo,
|
|||
* user, causing subsequent probes to fail. In this case, the caller may want
|
||||
* to adjust that limit with setrlimit().
|
||||
*/
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use libbpf_probe_bpf_prog_type() instead")
|
||||
LIBBPF_API bool bpf_probe_prog_type(enum bpf_prog_type prog_type, __u32 ifindex);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use libbpf_probe_bpf_map_type() instead")
|
||||
LIBBPF_API bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use libbpf_probe_bpf_helper() instead")
|
||||
LIBBPF_API bool bpf_probe_helper(enum bpf_func_id id, enum bpf_prog_type prog_type, __u32 ifindex);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "implement your own or use bpftool for feature detection")
|
||||
LIBBPF_API bool bpf_probe_large_insn_limit(__u32 ifindex);
|
||||
|
||||
/**
|
||||
* @brief **libbpf_probe_bpf_prog_type()** detects if host kernel supports
|
||||
|
@ -1499,72 +1118,6 @@ LIBBPF_API int libbpf_probe_bpf_map_type(enum bpf_map_type map_type, const void
|
|||
LIBBPF_API int libbpf_probe_bpf_helper(enum bpf_prog_type prog_type,
|
||||
enum bpf_func_id helper_id, const void *opts);
|
||||
|
||||
/*
|
||||
* Get bpf_prog_info in continuous memory
|
||||
*
|
||||
* struct bpf_prog_info has multiple arrays. The user has option to choose
|
||||
* arrays to fetch from kernel. The following APIs provide an uniform way to
|
||||
* fetch these data. All arrays in bpf_prog_info are stored in a single
|
||||
* continuous memory region. This makes it easy to store the info in a
|
||||
* file.
|
||||
*
|
||||
* Before writing bpf_prog_info_linear to files, it is necessary to
|
||||
* translate pointers in bpf_prog_info to offsets. Helper functions
|
||||
* bpf_program__bpil_addr_to_offs() and bpf_program__bpil_offs_to_addr()
|
||||
* are introduced to switch between pointers and offsets.
|
||||
*
|
||||
* Examples:
|
||||
* # To fetch map_ids and prog_tags:
|
||||
* __u64 arrays = (1UL << BPF_PROG_INFO_MAP_IDS) |
|
||||
* (1UL << BPF_PROG_INFO_PROG_TAGS);
|
||||
* struct bpf_prog_info_linear *info_linear =
|
||||
* bpf_program__get_prog_info_linear(fd, arrays);
|
||||
*
|
||||
* # To save data in file
|
||||
* bpf_program__bpil_addr_to_offs(info_linear);
|
||||
* write(f, info_linear, sizeof(*info_linear) + info_linear->data_len);
|
||||
*
|
||||
* # To read data from file
|
||||
* read(f, info_linear, <proper_size>);
|
||||
* bpf_program__bpil_offs_to_addr(info_linear);
|
||||
*/
|
||||
enum bpf_prog_info_array {
|
||||
BPF_PROG_INFO_FIRST_ARRAY = 0,
|
||||
BPF_PROG_INFO_JITED_INSNS = 0,
|
||||
BPF_PROG_INFO_XLATED_INSNS,
|
||||
BPF_PROG_INFO_MAP_IDS,
|
||||
BPF_PROG_INFO_JITED_KSYMS,
|
||||
BPF_PROG_INFO_JITED_FUNC_LENS,
|
||||
BPF_PROG_INFO_FUNC_INFO,
|
||||
BPF_PROG_INFO_LINE_INFO,
|
||||
BPF_PROG_INFO_JITED_LINE_INFO,
|
||||
BPF_PROG_INFO_PROG_TAGS,
|
||||
BPF_PROG_INFO_LAST_ARRAY,
|
||||
};
|
||||
|
||||
struct bpf_prog_info_linear {
|
||||
/* size of struct bpf_prog_info, when the tool is compiled */
|
||||
__u32 info_len;
|
||||
/* total bytes allocated for data, round up to 8 bytes */
|
||||
__u32 data_len;
|
||||
/* which arrays are included in data */
|
||||
__u64 arrays;
|
||||
struct bpf_prog_info info;
|
||||
__u8 data[];
|
||||
};
|
||||
|
||||
LIBBPF_DEPRECATED_SINCE(0, 6, "use a custom linear prog_info wrapper")
|
||||
LIBBPF_API struct bpf_prog_info_linear *
|
||||
bpf_program__get_prog_info_linear(int fd, __u64 arrays);
|
||||
|
||||
LIBBPF_DEPRECATED_SINCE(0, 6, "use a custom linear prog_info wrapper")
|
||||
LIBBPF_API void
|
||||
bpf_program__bpil_addr_to_offs(struct bpf_prog_info_linear *info_linear);
|
||||
|
||||
LIBBPF_DEPRECATED_SINCE(0, 6, "use a custom linear prog_info wrapper")
|
||||
LIBBPF_API void
|
||||
bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear);
|
||||
|
||||
/**
|
||||
* @brief **libbpf_num_possible_cpus()** is a helper function to get the
|
||||
* number of possible CPUs that the host kernel supports and expects.
|
||||
|
|
|
@ -1,29 +1,14 @@
|
|||
LIBBPF_0.0.1 {
|
||||
global:
|
||||
bpf_btf_get_fd_by_id;
|
||||
bpf_create_map;
|
||||
bpf_create_map_in_map;
|
||||
bpf_create_map_in_map_node;
|
||||
bpf_create_map_name;
|
||||
bpf_create_map_node;
|
||||
bpf_create_map_xattr;
|
||||
bpf_load_btf;
|
||||
bpf_load_program;
|
||||
bpf_load_program_xattr;
|
||||
bpf_map__btf_key_type_id;
|
||||
bpf_map__btf_value_type_id;
|
||||
bpf_map__def;
|
||||
bpf_map__fd;
|
||||
bpf_map__is_offload_neutral;
|
||||
bpf_map__name;
|
||||
bpf_map__next;
|
||||
bpf_map__pin;
|
||||
bpf_map__prev;
|
||||
bpf_map__priv;
|
||||
bpf_map__reuse_fd;
|
||||
bpf_map__set_ifindex;
|
||||
bpf_map__set_inner_map_fd;
|
||||
bpf_map__set_priv;
|
||||
bpf_map__unpin;
|
||||
bpf_map_delete_elem;
|
||||
bpf_map_get_fd_by_id;
|
||||
|
@ -38,79 +23,37 @@ LIBBPF_0.0.1 {
|
|||
bpf_object__btf_fd;
|
||||
bpf_object__close;
|
||||
bpf_object__find_map_by_name;
|
||||
bpf_object__find_map_by_offset;
|
||||
bpf_object__find_program_by_title;
|
||||
bpf_object__kversion;
|
||||
bpf_object__load;
|
||||
bpf_object__name;
|
||||
bpf_object__next;
|
||||
bpf_object__open;
|
||||
bpf_object__open_buffer;
|
||||
bpf_object__open_xattr;
|
||||
bpf_object__pin;
|
||||
bpf_object__pin_maps;
|
||||
bpf_object__pin_programs;
|
||||
bpf_object__priv;
|
||||
bpf_object__set_priv;
|
||||
bpf_object__unload;
|
||||
bpf_object__unpin_maps;
|
||||
bpf_object__unpin_programs;
|
||||
bpf_perf_event_read_simple;
|
||||
bpf_prog_attach;
|
||||
bpf_prog_detach;
|
||||
bpf_prog_detach2;
|
||||
bpf_prog_get_fd_by_id;
|
||||
bpf_prog_get_next_id;
|
||||
bpf_prog_load;
|
||||
bpf_prog_load_xattr;
|
||||
bpf_prog_query;
|
||||
bpf_prog_test_run;
|
||||
bpf_prog_test_run_xattr;
|
||||
bpf_program__fd;
|
||||
bpf_program__is_kprobe;
|
||||
bpf_program__is_perf_event;
|
||||
bpf_program__is_raw_tracepoint;
|
||||
bpf_program__is_sched_act;
|
||||
bpf_program__is_sched_cls;
|
||||
bpf_program__is_socket_filter;
|
||||
bpf_program__is_tracepoint;
|
||||
bpf_program__is_xdp;
|
||||
bpf_program__load;
|
||||
bpf_program__next;
|
||||
bpf_program__nth_fd;
|
||||
bpf_program__pin;
|
||||
bpf_program__pin_instance;
|
||||
bpf_program__prev;
|
||||
bpf_program__priv;
|
||||
bpf_program__set_expected_attach_type;
|
||||
bpf_program__set_ifindex;
|
||||
bpf_program__set_kprobe;
|
||||
bpf_program__set_perf_event;
|
||||
bpf_program__set_prep;
|
||||
bpf_program__set_priv;
|
||||
bpf_program__set_raw_tracepoint;
|
||||
bpf_program__set_sched_act;
|
||||
bpf_program__set_sched_cls;
|
||||
bpf_program__set_socket_filter;
|
||||
bpf_program__set_tracepoint;
|
||||
bpf_program__set_type;
|
||||
bpf_program__set_xdp;
|
||||
bpf_program__title;
|
||||
bpf_program__unload;
|
||||
bpf_program__unpin;
|
||||
bpf_program__unpin_instance;
|
||||
bpf_prog_linfo__free;
|
||||
bpf_prog_linfo__new;
|
||||
bpf_prog_linfo__lfind_addr_func;
|
||||
bpf_prog_linfo__lfind;
|
||||
bpf_raw_tracepoint_open;
|
||||
bpf_set_link_xdp_fd;
|
||||
bpf_task_fd_query;
|
||||
bpf_verify_program;
|
||||
btf__fd;
|
||||
btf__find_by_name;
|
||||
btf__free;
|
||||
btf__get_from_id;
|
||||
btf__name_by_offset;
|
||||
btf__new;
|
||||
btf__resolve_size;
|
||||
|
@ -127,48 +70,24 @@ LIBBPF_0.0.1 {
|
|||
|
||||
LIBBPF_0.0.2 {
|
||||
global:
|
||||
bpf_probe_helper;
|
||||
bpf_probe_map_type;
|
||||
bpf_probe_prog_type;
|
||||
bpf_map__resize;
|
||||
bpf_map_lookup_elem_flags;
|
||||
bpf_object__btf;
|
||||
bpf_object__find_map_fd_by_name;
|
||||
bpf_get_link_xdp_id;
|
||||
btf__dedup;
|
||||
btf__get_map_kv_tids;
|
||||
btf__get_nr_types;
|
||||
btf__get_raw_data;
|
||||
btf__load;
|
||||
btf_ext__free;
|
||||
btf_ext__func_info_rec_size;
|
||||
btf_ext__get_raw_data;
|
||||
btf_ext__line_info_rec_size;
|
||||
btf_ext__new;
|
||||
btf_ext__reloc_func_info;
|
||||
btf_ext__reloc_line_info;
|
||||
xsk_umem__create;
|
||||
xsk_socket__create;
|
||||
xsk_umem__delete;
|
||||
xsk_socket__delete;
|
||||
xsk_umem__fd;
|
||||
xsk_socket__fd;
|
||||
bpf_program__get_prog_info_linear;
|
||||
bpf_program__bpil_addr_to_offs;
|
||||
bpf_program__bpil_offs_to_addr;
|
||||
} LIBBPF_0.0.1;
|
||||
|
||||
LIBBPF_0.0.3 {
|
||||
global:
|
||||
bpf_map__is_internal;
|
||||
bpf_map_freeze;
|
||||
btf__finalize_data;
|
||||
} LIBBPF_0.0.2;
|
||||
|
||||
LIBBPF_0.0.4 {
|
||||
global:
|
||||
bpf_link__destroy;
|
||||
bpf_object__load_xattr;
|
||||
bpf_program__attach_kprobe;
|
||||
bpf_program__attach_perf_event;
|
||||
bpf_program__attach_raw_tracepoint;
|
||||
|
@ -176,14 +95,10 @@ LIBBPF_0.0.4 {
|
|||
bpf_program__attach_uprobe;
|
||||
btf_dump__dump_type;
|
||||
btf_dump__free;
|
||||
btf_dump__new;
|
||||
btf__parse_elf;
|
||||
libbpf_num_possible_cpus;
|
||||
perf_buffer__free;
|
||||
perf_buffer__new;
|
||||
perf_buffer__new_raw;
|
||||
perf_buffer__poll;
|
||||
xsk_umem__create;
|
||||
} LIBBPF_0.0.3;
|
||||
|
||||
LIBBPF_0.0.5 {
|
||||
|
@ -193,7 +108,6 @@ LIBBPF_0.0.5 {
|
|||
|
||||
LIBBPF_0.0.6 {
|
||||
global:
|
||||
bpf_get_link_xdp_info;
|
||||
bpf_map__get_pin_path;
|
||||
bpf_map__is_pinned;
|
||||
bpf_map__set_pin_path;
|
||||
|
@ -202,9 +116,6 @@ LIBBPF_0.0.6 {
|
|||
bpf_program__attach_trace;
|
||||
bpf_program__get_expected_attach_type;
|
||||
bpf_program__get_type;
|
||||
bpf_program__is_tracing;
|
||||
bpf_program__set_tracing;
|
||||
bpf_program__size;
|
||||
btf__find_by_name_kind;
|
||||
libbpf_find_vmlinux_btf_id;
|
||||
} LIBBPF_0.0.5;
|
||||
|
@ -224,14 +135,8 @@ LIBBPF_0.0.7 {
|
|||
bpf_object__detach_skeleton;
|
||||
bpf_object__load_skeleton;
|
||||
bpf_object__open_skeleton;
|
||||
bpf_probe_large_insn_limit;
|
||||
bpf_prog_attach_xattr;
|
||||
bpf_program__attach;
|
||||
bpf_program__name;
|
||||
bpf_program__is_extension;
|
||||
bpf_program__is_struct_ops;
|
||||
bpf_program__set_extension;
|
||||
bpf_program__set_struct_ops;
|
||||
btf__align_of;
|
||||
libbpf_find_kernel_btf;
|
||||
} LIBBPF_0.0.6;
|
||||
|
@ -250,10 +155,7 @@ LIBBPF_0.0.8 {
|
|||
bpf_prog_attach_opts;
|
||||
bpf_program__attach_cgroup;
|
||||
bpf_program__attach_lsm;
|
||||
bpf_program__is_lsm;
|
||||
bpf_program__set_attach_target;
|
||||
bpf_program__set_lsm;
|
||||
bpf_set_link_xdp_fd_opts;
|
||||
} LIBBPF_0.0.7;
|
||||
|
||||
LIBBPF_0.0.9 {
|
||||
|
@ -291,9 +193,7 @@ LIBBPF_0.1.0 {
|
|||
bpf_map__value_size;
|
||||
bpf_program__attach_xdp;
|
||||
bpf_program__autoload;
|
||||
bpf_program__is_sk_lookup;
|
||||
bpf_program__set_autoload;
|
||||
bpf_program__set_sk_lookup;
|
||||
btf__parse;
|
||||
btf__parse_raw;
|
||||
btf__pointer_size;
|
||||
|
@ -336,7 +236,6 @@ LIBBPF_0.2.0 {
|
|||
perf_buffer__buffer_fd;
|
||||
perf_buffer__epoll_fd;
|
||||
perf_buffer__consume_buffer;
|
||||
xsk_socket__create_shared;
|
||||
} LIBBPF_0.1.0;
|
||||
|
||||
LIBBPF_0.3.0 {
|
||||
|
@ -348,8 +247,6 @@ LIBBPF_0.3.0 {
|
|||
btf__new_empty_split;
|
||||
btf__new_split;
|
||||
ring_buffer__epoll_fd;
|
||||
xsk_setup_xdp_prog;
|
||||
xsk_socket__update_xskmap;
|
||||
} LIBBPF_0.2.0;
|
||||
|
||||
LIBBPF_0.4.0 {
|
||||
|
@ -397,7 +294,6 @@ LIBBPF_0.6.0 {
|
|||
bpf_object__next_program;
|
||||
bpf_object__prev_map;
|
||||
bpf_object__prev_program;
|
||||
bpf_prog_load_deprecated;
|
||||
bpf_prog_load;
|
||||
bpf_program__flags;
|
||||
bpf_program__insn_cnt;
|
||||
|
@ -407,18 +303,14 @@ LIBBPF_0.6.0 {
|
|||
btf__add_decl_tag;
|
||||
btf__add_type_tag;
|
||||
btf__dedup;
|
||||
btf__dedup_deprecated;
|
||||
btf__raw_data;
|
||||
btf__type_cnt;
|
||||
btf_dump__new;
|
||||
btf_dump__new_deprecated;
|
||||
libbpf_major_version;
|
||||
libbpf_minor_version;
|
||||
libbpf_version_string;
|
||||
perf_buffer__new;
|
||||
perf_buffer__new_deprecated;
|
||||
perf_buffer__new_raw;
|
||||
perf_buffer__new_raw_deprecated;
|
||||
} LIBBPF_0.5.0;
|
||||
|
||||
LIBBPF_0.7.0 {
|
||||
|
@ -434,10 +326,11 @@ LIBBPF_0.7.0 {
|
|||
bpf_xdp_detach;
|
||||
bpf_xdp_query;
|
||||
bpf_xdp_query_id;
|
||||
btf_ext__raw_data;
|
||||
libbpf_probe_bpf_helper;
|
||||
libbpf_probe_bpf_map_type;
|
||||
libbpf_probe_bpf_prog_type;
|
||||
libbpf_set_memlock_rlim_max;
|
||||
libbpf_set_memlock_rlim;
|
||||
} LIBBPF_0.6.0;
|
||||
|
||||
LIBBPF_0.8.0 {
|
||||
|
@ -462,12 +355,11 @@ LIBBPF_0.8.0 {
|
|||
|
||||
LIBBPF_1.0.0 {
|
||||
global:
|
||||
bpf_prog_query_opts;
|
||||
btf__add_enum64;
|
||||
btf__add_enum64_value;
|
||||
libbpf_bpf_attach_type_str;
|
||||
libbpf_bpf_link_type_str;
|
||||
libbpf_bpf_map_type_str;
|
||||
libbpf_bpf_prog_type_str;
|
||||
|
||||
local: *;
|
||||
};
|
||||
|
|
|
@ -30,20 +30,10 @@
|
|||
/* Add checks for other versions below when planning deprecation of API symbols
|
||||
* with the LIBBPF_DEPRECATED_SINCE macro.
|
||||
*/
|
||||
#if __LIBBPF_CURRENT_VERSION_GEQ(0, 6)
|
||||
#define __LIBBPF_MARK_DEPRECATED_0_6(X) X
|
||||
#if __LIBBPF_CURRENT_VERSION_GEQ(1, 0)
|
||||
#define __LIBBPF_MARK_DEPRECATED_1_0(X) X
|
||||
#else
|
||||
#define __LIBBPF_MARK_DEPRECATED_0_6(X)
|
||||
#endif
|
||||
#if __LIBBPF_CURRENT_VERSION_GEQ(0, 7)
|
||||
#define __LIBBPF_MARK_DEPRECATED_0_7(X) X
|
||||
#else
|
||||
#define __LIBBPF_MARK_DEPRECATED_0_7(X)
|
||||
#endif
|
||||
#if __LIBBPF_CURRENT_VERSION_GEQ(0, 8)
|
||||
#define __LIBBPF_MARK_DEPRECATED_0_8(X) X
|
||||
#else
|
||||
#define __LIBBPF_MARK_DEPRECATED_0_8(X)
|
||||
#define __LIBBPF_MARK_DEPRECATED_1_0(X)
|
||||
#endif
|
||||
|
||||
/* This set of internal macros allows to do "function overloading" based on
|
||||
|
|
|
@ -15,7 +15,6 @@
|
|||
#include <linux/err.h>
|
||||
#include <fcntl.h>
|
||||
#include <unistd.h>
|
||||
#include "libbpf_legacy.h"
|
||||
#include "relo_core.h"
|
||||
|
||||
/* make sure libbpf doesn't use kernel-only integer typedefs */
|
||||
|
@ -478,8 +477,6 @@ int btf_ext_visit_str_offs(struct btf_ext *btf_ext, str_off_visit_fn visit, void
|
|||
__s32 btf__find_by_name_kind_own(const struct btf *btf, const char *type_name,
|
||||
__u32 kind);
|
||||
|
||||
extern enum libbpf_strict_mode libbpf_mode;
|
||||
|
||||
typedef int (*kallsyms_cb_t)(unsigned long long sym_addr, char sym_type,
|
||||
const char *sym_name, void *ctx);
|
||||
|
||||
|
@ -498,12 +495,8 @@ static inline int libbpf_err(int ret)
|
|||
*/
|
||||
static inline int libbpf_err_errno(int ret)
|
||||
{
|
||||
if (libbpf_mode & LIBBPF_STRICT_DIRECT_ERRS)
|
||||
/* errno is already assumed to be set on error */
|
||||
return ret < 0 ? -errno : ret;
|
||||
|
||||
/* legacy: on error return -1 directly and don't touch errno */
|
||||
return ret;
|
||||
/* errno is already assumed to be set on error */
|
||||
return ret < 0 ? -errno : ret;
|
||||
}
|
||||
|
||||
/* handle error for pointer-returning APIs, err is assumed to be < 0 always */
|
||||
|
@ -511,12 +504,7 @@ static inline void *libbpf_err_ptr(int err)
|
|||
{
|
||||
/* set errno on error, this doesn't break anything */
|
||||
errno = -err;
|
||||
|
||||
if (libbpf_mode & LIBBPF_STRICT_CLEAN_PTRS)
|
||||
return NULL;
|
||||
|
||||
/* legacy: encode err as ptr */
|
||||
return ERR_PTR(err);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* handle pointer-returning APIs' error handling */
|
||||
|
@ -526,11 +514,7 @@ static inline void *libbpf_ptr(void *ret)
|
|||
if (IS_ERR(ret))
|
||||
errno = -PTR_ERR(ret);
|
||||
|
||||
if (libbpf_mode & LIBBPF_STRICT_CLEAN_PTRS)
|
||||
return IS_ERR(ret) ? NULL : ret;
|
||||
|
||||
/* legacy: pass-through original pointer */
|
||||
return ret;
|
||||
return IS_ERR(ret) ? NULL : ret;
|
||||
}
|
||||
|
||||
static inline bool str_is_empty(const char *s)
|
||||
|
|
|
@ -20,6 +20,11 @@
|
|||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* As of libbpf 1.0 libbpf_set_strict_mode() and enum libbpf_struct_mode have
|
||||
* no effect. But they are left in libbpf_legacy.h so that applications that
|
||||
* prepared for libbpf 1.0 before final release by using
|
||||
* libbpf_set_strict_mode() still work with libbpf 1.0+ without any changes.
|
||||
*/
|
||||
enum libbpf_strict_mode {
|
||||
/* Turn on all supported strict features of libbpf to simulate libbpf
|
||||
* v1.0 behavior.
|
||||
|
@ -71,8 +76,8 @@ enum libbpf_strict_mode {
|
|||
* first BPF program or map creation operation. This is done only if
|
||||
* kernel is too old to support memcg-based memory accounting for BPF
|
||||
* subsystem. By default, RLIMIT_MEMLOCK limit is set to RLIM_INFINITY,
|
||||
* but it can be overriden with libbpf_set_memlock_rlim_max() API.
|
||||
* Note that libbpf_set_memlock_rlim_max() needs to be called before
|
||||
* but it can be overriden with libbpf_set_memlock_rlim() API.
|
||||
* Note that libbpf_set_memlock_rlim() needs to be called before
|
||||
* the very first bpf_prog_load(), bpf_map_create() or bpf_object__load()
|
||||
* operation.
|
||||
*/
|
||||
|
@ -88,6 +93,25 @@ enum libbpf_strict_mode {
|
|||
|
||||
LIBBPF_API int libbpf_set_strict_mode(enum libbpf_strict_mode mode);
|
||||
|
||||
/**
|
||||
* @brief **libbpf_get_error()** extracts the error code from the passed
|
||||
* pointer
|
||||
* @param ptr pointer returned from libbpf API function
|
||||
* @return error code; or 0 if no error occured
|
||||
*
|
||||
* Note, as of libbpf 1.0 this function is not necessary and not recommended
|
||||
* to be used. Libbpf doesn't return error code embedded into the pointer
|
||||
* itself. Instead, NULL is returned on error and error code is passed through
|
||||
* thread-local errno variable. **libbpf_get_error()** is just returning -errno
|
||||
* value if it receives NULL, which is correct only if errno hasn't been
|
||||
* modified between libbpf API call and corresponding **libbpf_get_error()**
|
||||
* call. Prefer to check return for NULL and use errno directly.
|
||||
*
|
||||
* This API is left in libbpf 1.0 to allow applications that were 1.0-ready
|
||||
* before final libbpf 1.0 without needing to change them.
|
||||
*/
|
||||
LIBBPF_API long libbpf_get_error(const void *ptr);
|
||||
|
||||
#define DECLARE_LIBBPF_OPTS LIBBPF_OPTS
|
||||
|
||||
/* "Discouraged" APIs which don't follow consistent libbpf naming patterns.
|
||||
|
|
|
@ -17,47 +17,14 @@
|
|||
#include "libbpf.h"
|
||||
#include "libbpf_internal.h"
|
||||
|
||||
static bool grep(const char *buffer, const char *pattern)
|
||||
{
|
||||
return !!strstr(buffer, pattern);
|
||||
}
|
||||
|
||||
static int get_vendor_id(int ifindex)
|
||||
{
|
||||
char ifname[IF_NAMESIZE], path[64], buf[8];
|
||||
ssize_t len;
|
||||
int fd;
|
||||
|
||||
if (!if_indextoname(ifindex, ifname))
|
||||
return -1;
|
||||
|
||||
snprintf(path, sizeof(path), "/sys/class/net/%s/device/vendor", ifname);
|
||||
|
||||
fd = open(path, O_RDONLY | O_CLOEXEC);
|
||||
if (fd < 0)
|
||||
return -1;
|
||||
|
||||
len = read(fd, buf, sizeof(buf));
|
||||
close(fd);
|
||||
if (len < 0)
|
||||
return -1;
|
||||
if (len >= (ssize_t)sizeof(buf))
|
||||
return -1;
|
||||
buf[len] = '\0';
|
||||
|
||||
return strtol(buf, NULL, 0);
|
||||
}
|
||||
|
||||
static int probe_prog_load(enum bpf_prog_type prog_type,
|
||||
const struct bpf_insn *insns, size_t insns_cnt,
|
||||
char *log_buf, size_t log_buf_sz,
|
||||
__u32 ifindex)
|
||||
char *log_buf, size_t log_buf_sz)
|
||||
{
|
||||
LIBBPF_OPTS(bpf_prog_load_opts, opts,
|
||||
.log_buf = log_buf,
|
||||
.log_size = log_buf_sz,
|
||||
.log_level = log_buf ? 1 : 0,
|
||||
.prog_ifindex = ifindex,
|
||||
);
|
||||
int fd, err, exp_err = 0;
|
||||
const char *exp_msg = NULL;
|
||||
|
@ -161,31 +128,10 @@ int libbpf_probe_bpf_prog_type(enum bpf_prog_type prog_type, const void *opts)
|
|||
if (opts)
|
||||
return libbpf_err(-EINVAL);
|
||||
|
||||
ret = probe_prog_load(prog_type, insns, insn_cnt, NULL, 0, 0);
|
||||
ret = probe_prog_load(prog_type, insns, insn_cnt, NULL, 0);
|
||||
return libbpf_err(ret);
|
||||
}
|
||||
|
||||
bool bpf_probe_prog_type(enum bpf_prog_type prog_type, __u32 ifindex)
|
||||
{
|
||||
struct bpf_insn insns[2] = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN()
|
||||
};
|
||||
|
||||
/* prefer libbpf_probe_bpf_prog_type() unless offload is requested */
|
||||
if (ifindex == 0)
|
||||
return libbpf_probe_bpf_prog_type(prog_type, NULL) == 1;
|
||||
|
||||
if (ifindex && prog_type == BPF_PROG_TYPE_SCHED_CLS)
|
||||
/* nfp returns -EINVAL on exit(0) with TC offload */
|
||||
insns[0].imm = 2;
|
||||
|
||||
errno = 0;
|
||||
probe_prog_load(prog_type, insns, ARRAY_SIZE(insns), NULL, 0, ifindex);
|
||||
|
||||
return errno != EINVAL && errno != EOPNOTSUPP;
|
||||
}
|
||||
|
||||
int libbpf__load_raw_btf(const char *raw_types, size_t types_len,
|
||||
const char *str_sec, size_t str_len)
|
||||
{
|
||||
|
@ -242,15 +188,13 @@ static int load_local_storage_btf(void)
|
|||
strs, sizeof(strs));
|
||||
}
|
||||
|
||||
static int probe_map_create(enum bpf_map_type map_type, __u32 ifindex)
|
||||
static int probe_map_create(enum bpf_map_type map_type)
|
||||
{
|
||||
LIBBPF_OPTS(bpf_map_create_opts, opts);
|
||||
int key_size, value_size, max_entries;
|
||||
__u32 btf_key_type_id = 0, btf_value_type_id = 0;
|
||||
int fd = -1, btf_fd = -1, fd_inner = -1, exp_err = 0, err;
|
||||
|
||||
opts.map_ifindex = ifindex;
|
||||
|
||||
key_size = sizeof(__u32);
|
||||
value_size = sizeof(__u32);
|
||||
max_entries = 1;
|
||||
|
@ -326,12 +270,6 @@ static int probe_map_create(enum bpf_map_type map_type, __u32 ifindex)
|
|||
|
||||
if (map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
|
||||
map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
|
||||
/* TODO: probe for device, once libbpf has a function to create
|
||||
* map-in-map for offload
|
||||
*/
|
||||
if (ifindex)
|
||||
goto cleanup;
|
||||
|
||||
fd_inner = bpf_map_create(BPF_MAP_TYPE_HASH, NULL,
|
||||
sizeof(__u32), sizeof(__u32), 1, NULL);
|
||||
if (fd_inner < 0)
|
||||
|
@ -370,15 +308,10 @@ int libbpf_probe_bpf_map_type(enum bpf_map_type map_type, const void *opts)
|
|||
if (opts)
|
||||
return libbpf_err(-EINVAL);
|
||||
|
||||
ret = probe_map_create(map_type, 0);
|
||||
ret = probe_map_create(map_type);
|
||||
return libbpf_err(ret);
|
||||
}
|
||||
|
||||
bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex)
|
||||
{
|
||||
return probe_map_create(map_type, ifindex) == 1;
|
||||
}
|
||||
|
||||
int libbpf_probe_bpf_helper(enum bpf_prog_type prog_type, enum bpf_func_id helper_id,
|
||||
const void *opts)
|
||||
{
|
||||
|
@ -407,7 +340,7 @@ int libbpf_probe_bpf_helper(enum bpf_prog_type prog_type, enum bpf_func_id helpe
|
|||
}
|
||||
|
||||
buf[0] = '\0';
|
||||
ret = probe_prog_load(prog_type, insns, insn_cnt, buf, sizeof(buf), 0);
|
||||
ret = probe_prog_load(prog_type, insns, insn_cnt, buf, sizeof(buf));
|
||||
if (ret < 0)
|
||||
return libbpf_err(ret);
|
||||
|
||||
|
@ -427,51 +360,3 @@ int libbpf_probe_bpf_helper(enum bpf_prog_type prog_type, enum bpf_func_id helpe
|
|||
return 0;
|
||||
return 1; /* assume supported */
|
||||
}
|
||||
|
||||
bool bpf_probe_helper(enum bpf_func_id id, enum bpf_prog_type prog_type,
|
||||
__u32 ifindex)
|
||||
{
|
||||
struct bpf_insn insns[2] = {
|
||||
BPF_EMIT_CALL(id),
|
||||
BPF_EXIT_INSN()
|
||||
};
|
||||
char buf[4096] = {};
|
||||
bool res;
|
||||
|
||||
probe_prog_load(prog_type, insns, ARRAY_SIZE(insns), buf, sizeof(buf), ifindex);
|
||||
res = !grep(buf, "invalid func ") && !grep(buf, "unknown func ");
|
||||
|
||||
if (ifindex) {
|
||||
switch (get_vendor_id(ifindex)) {
|
||||
case 0x19ee: /* Netronome specific */
|
||||
res = res && !grep(buf, "not supported by FW") &&
|
||||
!grep(buf, "unsupported function id");
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
/*
|
||||
* Probe for availability of kernel commit (5.3):
|
||||
*
|
||||
* c04c0d2b968a ("bpf: increase complexity limit and maximum program size")
|
||||
*/
|
||||
bool bpf_probe_large_insn_limit(__u32 ifindex)
|
||||
{
|
||||
struct bpf_insn insns[BPF_MAXINSNS + 1];
|
||||
int i;
|
||||
|
||||
for (i = 0; i < BPF_MAXINSNS; i++)
|
||||
insns[i] = BPF_MOV64_IMM(BPF_REG_0, 1);
|
||||
insns[BPF_MAXINSNS] = BPF_EXIT_INSN();
|
||||
|
||||
errno = 0;
|
||||
probe_prog_load(BPF_PROG_TYPE_SCHED_CLS, insns, ARRAY_SIZE(insns), NULL, 0,
|
||||
ifindex);
|
||||
|
||||
return errno != E2BIG && errno != EINVAL;
|
||||
}
|
||||
|
|
|
@ -27,6 +27,14 @@ typedef int (*libbpf_dump_nlmsg_t)(void *cookie, void *msg, struct nlattr **tb);
|
|||
typedef int (*__dump_nlmsg_t)(struct nlmsghdr *nlmsg, libbpf_dump_nlmsg_t,
|
||||
void *cookie);
|
||||
|
||||
struct xdp_link_info {
|
||||
__u32 prog_id;
|
||||
__u32 drv_prog_id;
|
||||
__u32 hw_prog_id;
|
||||
__u32 skb_prog_id;
|
||||
__u8 attach_mode;
|
||||
};
|
||||
|
||||
struct xdp_id_md {
|
||||
int ifindex;
|
||||
__u32 flags;
|
||||
|
@ -288,31 +296,6 @@ int bpf_xdp_detach(int ifindex, __u32 flags, const struct bpf_xdp_attach_opts *o
|
|||
return bpf_xdp_attach(ifindex, -1, flags, opts);
|
||||
}
|
||||
|
||||
int bpf_set_link_xdp_fd_opts(int ifindex, int fd, __u32 flags,
|
||||
const struct bpf_xdp_set_link_opts *opts)
|
||||
{
|
||||
int old_fd = -1, ret;
|
||||
|
||||
if (!OPTS_VALID(opts, bpf_xdp_set_link_opts))
|
||||
return libbpf_err(-EINVAL);
|
||||
|
||||
if (OPTS_HAS(opts, old_fd)) {
|
||||
old_fd = OPTS_GET(opts, old_fd, -1);
|
||||
flags |= XDP_FLAGS_REPLACE;
|
||||
}
|
||||
|
||||
ret = __bpf_set_link_xdp_fd_replace(ifindex, fd, old_fd, flags);
|
||||
return libbpf_err(ret);
|
||||
}
|
||||
|
||||
int bpf_set_link_xdp_fd(int ifindex, int fd, __u32 flags)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = __bpf_set_link_xdp_fd_replace(ifindex, fd, 0, flags);
|
||||
return libbpf_err(ret);
|
||||
}
|
||||
|
||||
static int __dump_link_nlmsg(struct nlmsghdr *nlh,
|
||||
libbpf_dump_nlmsg_t dump_link_nlmsg, void *cookie)
|
||||
{
|
||||
|
@ -413,30 +396,6 @@ int bpf_xdp_query(int ifindex, int xdp_flags, struct bpf_xdp_query_opts *opts)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int bpf_get_link_xdp_info(int ifindex, struct xdp_link_info *info,
|
||||
size_t info_size, __u32 flags)
|
||||
{
|
||||
LIBBPF_OPTS(bpf_xdp_query_opts, opts);
|
||||
size_t sz;
|
||||
int err;
|
||||
|
||||
if (!info_size)
|
||||
return libbpf_err(-EINVAL);
|
||||
|
||||
err = bpf_xdp_query(ifindex, flags, &opts);
|
||||
if (err)
|
||||
return libbpf_err(err);
|
||||
|
||||
/* struct xdp_link_info field layout matches struct bpf_xdp_query_opts
|
||||
* layout after sz field
|
||||
*/
|
||||
sz = min(info_size, offsetofend(struct xdp_link_info, attach_mode));
|
||||
memcpy(info, &opts.prog_id, sz);
|
||||
memset((void *)info + sz, 0, info_size - sz);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bpf_xdp_query_id(int ifindex, int flags, __u32 *prog_id)
|
||||
{
|
||||
LIBBPF_OPTS(bpf_xdp_query_opts, opts);
|
||||
|
@ -463,11 +422,6 @@ int bpf_xdp_query_id(int ifindex, int flags, __u32 *prog_id)
|
|||
}
|
||||
|
||||
|
||||
int bpf_get_link_xdp_id(int ifindex, __u32 *prog_id, __u32 flags)
|
||||
{
|
||||
return bpf_xdp_query_id(ifindex, flags, prog_id);
|
||||
}
|
||||
|
||||
typedef int (*qdisc_config_t)(struct libbpf_nla_req *req);
|
||||
|
||||
static int clsact_config(struct libbpf_nla_req *req)
|
||||
|
|
|
@ -95,6 +95,7 @@ static const char *core_relo_kind_str(enum bpf_core_relo_kind kind)
|
|||
case BPF_CORE_TYPE_ID_LOCAL: return "local_type_id";
|
||||
case BPF_CORE_TYPE_ID_TARGET: return "target_type_id";
|
||||
case BPF_CORE_TYPE_EXISTS: return "type_exists";
|
||||
case BPF_CORE_TYPE_MATCHES: return "type_matches";
|
||||
case BPF_CORE_TYPE_SIZE: return "type_size";
|
||||
case BPF_CORE_ENUMVAL_EXISTS: return "enumval_exists";
|
||||
case BPF_CORE_ENUMVAL_VALUE: return "enumval_value";
|
||||
|
@ -123,6 +124,7 @@ static bool core_relo_is_type_based(enum bpf_core_relo_kind kind)
|
|||
case BPF_CORE_TYPE_ID_LOCAL:
|
||||
case BPF_CORE_TYPE_ID_TARGET:
|
||||
case BPF_CORE_TYPE_EXISTS:
|
||||
case BPF_CORE_TYPE_MATCHES:
|
||||
case BPF_CORE_TYPE_SIZE:
|
||||
return true;
|
||||
default:
|
||||
|
@ -141,6 +143,86 @@ static bool core_relo_is_enumval_based(enum bpf_core_relo_kind kind)
|
|||
}
|
||||
}
|
||||
|
||||
int __bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id,
|
||||
const struct btf *targ_btf, __u32 targ_id, int level)
|
||||
{
|
||||
const struct btf_type *local_type, *targ_type;
|
||||
int depth = 32; /* max recursion depth */
|
||||
|
||||
/* caller made sure that names match (ignoring flavor suffix) */
|
||||
local_type = btf_type_by_id(local_btf, local_id);
|
||||
targ_type = btf_type_by_id(targ_btf, targ_id);
|
||||
if (!btf_kind_core_compat(local_type, targ_type))
|
||||
return 0;
|
||||
|
||||
recur:
|
||||
depth--;
|
||||
if (depth < 0)
|
||||
return -EINVAL;
|
||||
|
||||
local_type = skip_mods_and_typedefs(local_btf, local_id, &local_id);
|
||||
targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
|
||||
if (!local_type || !targ_type)
|
||||
return -EINVAL;
|
||||
|
||||
if (!btf_kind_core_compat(local_type, targ_type))
|
||||
return 0;
|
||||
|
||||
switch (btf_kind(local_type)) {
|
||||
case BTF_KIND_UNKN:
|
||||
case BTF_KIND_STRUCT:
|
||||
case BTF_KIND_UNION:
|
||||
case BTF_KIND_ENUM:
|
||||
case BTF_KIND_FWD:
|
||||
case BTF_KIND_ENUM64:
|
||||
return 1;
|
||||
case BTF_KIND_INT:
|
||||
/* just reject deprecated bitfield-like integers; all other
|
||||
* integers are by default compatible between each other
|
||||
*/
|
||||
return btf_int_offset(local_type) == 0 && btf_int_offset(targ_type) == 0;
|
||||
case BTF_KIND_PTR:
|
||||
local_id = local_type->type;
|
||||
targ_id = targ_type->type;
|
||||
goto recur;
|
||||
case BTF_KIND_ARRAY:
|
||||
local_id = btf_array(local_type)->type;
|
||||
targ_id = btf_array(targ_type)->type;
|
||||
goto recur;
|
||||
case BTF_KIND_FUNC_PROTO: {
|
||||
struct btf_param *local_p = btf_params(local_type);
|
||||
struct btf_param *targ_p = btf_params(targ_type);
|
||||
__u16 local_vlen = btf_vlen(local_type);
|
||||
__u16 targ_vlen = btf_vlen(targ_type);
|
||||
int i, err;
|
||||
|
||||
if (local_vlen != targ_vlen)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < local_vlen; i++, local_p++, targ_p++) {
|
||||
if (level <= 0)
|
||||
return -EINVAL;
|
||||
|
||||
skip_mods_and_typedefs(local_btf, local_p->type, &local_id);
|
||||
skip_mods_and_typedefs(targ_btf, targ_p->type, &targ_id);
|
||||
err = __bpf_core_types_are_compat(local_btf, local_id, targ_btf, targ_id,
|
||||
level - 1);
|
||||
if (err <= 0)
|
||||
return err;
|
||||
}
|
||||
|
||||
/* tail recurse for return type check */
|
||||
skip_mods_and_typedefs(local_btf, local_type->type, &local_id);
|
||||
skip_mods_and_typedefs(targ_btf, targ_type->type, &targ_id);
|
||||
goto recur;
|
||||
}
|
||||
default:
|
||||
pr_warn("unexpected kind %s relocated, local [%d], target [%d]\n",
|
||||
btf_kind_str(local_type), local_id, targ_id);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Turn bpf_core_relo into a low- and high-level spec representation,
|
||||
* validating correctness along the way, as well as calculating resulting
|
||||
|
@ -171,7 +253,7 @@ static bool core_relo_is_enumval_based(enum bpf_core_relo_kind kind)
|
|||
* - field 'a' access (corresponds to '2' in low-level spec);
|
||||
* - array element #3 access (corresponds to '3' in low-level spec).
|
||||
*
|
||||
* Type-based relocations (TYPE_EXISTS/TYPE_SIZE,
|
||||
* Type-based relocations (TYPE_EXISTS/TYPE_MATCHES/TYPE_SIZE,
|
||||
* TYPE_ID_LOCAL/TYPE_ID_TARGET) don't capture any field information. Their
|
||||
* spec and raw_spec are kept empty.
|
||||
*
|
||||
|
@ -488,9 +570,14 @@ static int bpf_core_spec_match(struct bpf_core_spec *local_spec,
|
|||
targ_spec->relo_kind = local_spec->relo_kind;
|
||||
|
||||
if (core_relo_is_type_based(local_spec->relo_kind)) {
|
||||
return bpf_core_types_are_compat(local_spec->btf,
|
||||
local_spec->root_type_id,
|
||||
targ_btf, targ_id);
|
||||
if (local_spec->relo_kind == BPF_CORE_TYPE_MATCHES)
|
||||
return bpf_core_types_match(local_spec->btf,
|
||||
local_spec->root_type_id,
|
||||
targ_btf, targ_id);
|
||||
else
|
||||
return bpf_core_types_are_compat(local_spec->btf,
|
||||
local_spec->root_type_id,
|
||||
targ_btf, targ_id);
|
||||
}
|
||||
|
||||
local_acc = &local_spec->spec[0];
|
||||
|
@ -739,6 +826,7 @@ static int bpf_core_calc_type_relo(const struct bpf_core_relo *relo,
|
|||
*validate = false;
|
||||
break;
|
||||
case BPF_CORE_TYPE_EXISTS:
|
||||
case BPF_CORE_TYPE_MATCHES:
|
||||
*val = 1;
|
||||
break;
|
||||
case BPF_CORE_TYPE_SIZE:
|
||||
|
@ -1330,3 +1418,273 @@ int bpf_core_calc_relo_insn(const char *prog_name,
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool bpf_core_names_match(const struct btf *local_btf, size_t local_name_off,
|
||||
const struct btf *targ_btf, size_t targ_name_off)
|
||||
{
|
||||
const char *local_n, *targ_n;
|
||||
size_t local_len, targ_len;
|
||||
|
||||
local_n = btf__name_by_offset(local_btf, local_name_off);
|
||||
targ_n = btf__name_by_offset(targ_btf, targ_name_off);
|
||||
|
||||
if (str_is_empty(targ_n))
|
||||
return str_is_empty(local_n);
|
||||
|
||||
targ_len = bpf_core_essential_name_len(targ_n);
|
||||
local_len = bpf_core_essential_name_len(local_n);
|
||||
|
||||
return targ_len == local_len && strncmp(local_n, targ_n, local_len) == 0;
|
||||
}
|
||||
|
||||
static int bpf_core_enums_match(const struct btf *local_btf, const struct btf_type *local_t,
|
||||
const struct btf *targ_btf, const struct btf_type *targ_t)
|
||||
{
|
||||
__u16 local_vlen = btf_vlen(local_t);
|
||||
__u16 targ_vlen = btf_vlen(targ_t);
|
||||
int i, j;
|
||||
|
||||
if (local_t->size != targ_t->size)
|
||||
return 0;
|
||||
|
||||
if (local_vlen > targ_vlen)
|
||||
return 0;
|
||||
|
||||
/* iterate over the local enum's variants and make sure each has
|
||||
* a symbolic name correspondent in the target
|
||||
*/
|
||||
for (i = 0; i < local_vlen; i++) {
|
||||
bool matched = false;
|
||||
__u32 local_n_off, targ_n_off;
|
||||
|
||||
local_n_off = btf_is_enum(local_t) ? btf_enum(local_t)[i].name_off :
|
||||
btf_enum64(local_t)[i].name_off;
|
||||
|
||||
for (j = 0; j < targ_vlen; j++) {
|
||||
targ_n_off = btf_is_enum(targ_t) ? btf_enum(targ_t)[j].name_off :
|
||||
btf_enum64(targ_t)[j].name_off;
|
||||
|
||||
if (bpf_core_names_match(local_btf, local_n_off, targ_btf, targ_n_off)) {
|
||||
matched = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!matched)
|
||||
return 0;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int bpf_core_composites_match(const struct btf *local_btf, const struct btf_type *local_t,
|
||||
const struct btf *targ_btf, const struct btf_type *targ_t,
|
||||
bool behind_ptr, int level)
|
||||
{
|
||||
const struct btf_member *local_m = btf_members(local_t);
|
||||
__u16 local_vlen = btf_vlen(local_t);
|
||||
__u16 targ_vlen = btf_vlen(targ_t);
|
||||
int i, j, err;
|
||||
|
||||
if (local_vlen > targ_vlen)
|
||||
return 0;
|
||||
|
||||
/* check that all local members have a match in the target */
|
||||
for (i = 0; i < local_vlen; i++, local_m++) {
|
||||
const struct btf_member *targ_m = btf_members(targ_t);
|
||||
bool matched = false;
|
||||
|
||||
for (j = 0; j < targ_vlen; j++, targ_m++) {
|
||||
if (!bpf_core_names_match(local_btf, local_m->name_off,
|
||||
targ_btf, targ_m->name_off))
|
||||
continue;
|
||||
|
||||
err = __bpf_core_types_match(local_btf, local_m->type, targ_btf,
|
||||
targ_m->type, behind_ptr, level - 1);
|
||||
if (err < 0)
|
||||
return err;
|
||||
if (err > 0) {
|
||||
matched = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!matched)
|
||||
return 0;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Check that two types "match". This function assumes that root types were
|
||||
* already checked for name match.
|
||||
*
|
||||
* The matching relation is defined as follows:
|
||||
* - modifiers and typedefs are stripped (and, hence, effectively ignored)
|
||||
* - generally speaking types need to be of same kind (struct vs. struct, union
|
||||
* vs. union, etc.)
|
||||
* - exceptions are struct/union behind a pointer which could also match a
|
||||
* forward declaration of a struct or union, respectively, and enum vs.
|
||||
* enum64 (see below)
|
||||
* Then, depending on type:
|
||||
* - integers:
|
||||
* - match if size and signedness match
|
||||
* - arrays & pointers:
|
||||
* - target types are recursively matched
|
||||
* - structs & unions:
|
||||
* - local members need to exist in target with the same name
|
||||
* - for each member we recursively check match unless it is already behind a
|
||||
* pointer, in which case we only check matching names and compatible kind
|
||||
* - enums:
|
||||
* - local variants have to have a match in target by symbolic name (but not
|
||||
* numeric value)
|
||||
* - size has to match (but enum may match enum64 and vice versa)
|
||||
* - function pointers:
|
||||
* - number and position of arguments in local type has to match target
|
||||
* - for each argument and the return value we recursively check match
|
||||
*/
|
||||
int __bpf_core_types_match(const struct btf *local_btf, __u32 local_id, const struct btf *targ_btf,
|
||||
__u32 targ_id, bool behind_ptr, int level)
|
||||
{
|
||||
const struct btf_type *local_t, *targ_t;
|
||||
int depth = 32; /* max recursion depth */
|
||||
__u16 local_k, targ_k;
|
||||
|
||||
if (level <= 0)
|
||||
return -EINVAL;
|
||||
|
||||
local_t = btf_type_by_id(local_btf, local_id);
|
||||
targ_t = btf_type_by_id(targ_btf, targ_id);
|
||||
|
||||
recur:
|
||||
depth--;
|
||||
if (depth < 0)
|
||||
return -EINVAL;
|
||||
|
||||
local_t = skip_mods_and_typedefs(local_btf, local_id, &local_id);
|
||||
targ_t = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
|
||||
if (!local_t || !targ_t)
|
||||
return -EINVAL;
|
||||
|
||||
/* While the name check happens after typedefs are skipped, root-level
|
||||
* typedefs would still be name-matched as that's the contract with
|
||||
* callers.
|
||||
*/
|
||||
if (!bpf_core_names_match(local_btf, local_t->name_off, targ_btf, targ_t->name_off))
|
||||
return 0;
|
||||
|
||||
local_k = btf_kind(local_t);
|
||||
targ_k = btf_kind(targ_t);
|
||||
|
||||
switch (local_k) {
|
||||
case BTF_KIND_UNKN:
|
||||
return local_k == targ_k;
|
||||
case BTF_KIND_FWD: {
|
||||
bool local_f = BTF_INFO_KFLAG(local_t->info);
|
||||
|
||||
if (behind_ptr) {
|
||||
if (local_k == targ_k)
|
||||
return local_f == BTF_INFO_KFLAG(targ_t->info);
|
||||
|
||||
/* for forward declarations kflag dictates whether the
|
||||
* target is a struct (0) or union (1)
|
||||
*/
|
||||
return (targ_k == BTF_KIND_STRUCT && !local_f) ||
|
||||
(targ_k == BTF_KIND_UNION && local_f);
|
||||
} else {
|
||||
if (local_k != targ_k)
|
||||
return 0;
|
||||
|
||||
/* match if the forward declaration is for the same kind */
|
||||
return local_f == BTF_INFO_KFLAG(targ_t->info);
|
||||
}
|
||||
}
|
||||
case BTF_KIND_ENUM:
|
||||
case BTF_KIND_ENUM64:
|
||||
if (!btf_is_any_enum(targ_t))
|
||||
return 0;
|
||||
|
||||
return bpf_core_enums_match(local_btf, local_t, targ_btf, targ_t);
|
||||
case BTF_KIND_STRUCT:
|
||||
case BTF_KIND_UNION:
|
||||
if (behind_ptr) {
|
||||
bool targ_f = BTF_INFO_KFLAG(targ_t->info);
|
||||
|
||||
if (local_k == targ_k)
|
||||
return 1;
|
||||
|
||||
if (targ_k != BTF_KIND_FWD)
|
||||
return 0;
|
||||
|
||||
return (local_k == BTF_KIND_UNION) == targ_f;
|
||||
} else {
|
||||
if (local_k != targ_k)
|
||||
return 0;
|
||||
|
||||
return bpf_core_composites_match(local_btf, local_t, targ_btf, targ_t,
|
||||
behind_ptr, level);
|
||||
}
|
||||
case BTF_KIND_INT: {
|
||||
__u8 local_sgn;
|
||||
__u8 targ_sgn;
|
||||
|
||||
if (local_k != targ_k)
|
||||
return 0;
|
||||
|
||||
local_sgn = btf_int_encoding(local_t) & BTF_INT_SIGNED;
|
||||
targ_sgn = btf_int_encoding(targ_t) & BTF_INT_SIGNED;
|
||||
|
||||
return local_t->size == targ_t->size && local_sgn == targ_sgn;
|
||||
}
|
||||
case BTF_KIND_PTR:
|
||||
if (local_k != targ_k)
|
||||
return 0;
|
||||
|
||||
behind_ptr = true;
|
||||
|
||||
local_id = local_t->type;
|
||||
targ_id = targ_t->type;
|
||||
goto recur;
|
||||
case BTF_KIND_ARRAY: {
|
||||
const struct btf_array *local_array = btf_array(local_t);
|
||||
const struct btf_array *targ_array = btf_array(targ_t);
|
||||
|
||||
if (local_k != targ_k)
|
||||
return 0;
|
||||
|
||||
if (local_array->nelems != targ_array->nelems)
|
||||
return 0;
|
||||
|
||||
local_id = local_array->type;
|
||||
targ_id = targ_array->type;
|
||||
goto recur;
|
||||
}
|
||||
case BTF_KIND_FUNC_PROTO: {
|
||||
struct btf_param *local_p = btf_params(local_t);
|
||||
struct btf_param *targ_p = btf_params(targ_t);
|
||||
__u16 local_vlen = btf_vlen(local_t);
|
||||
__u16 targ_vlen = btf_vlen(targ_t);
|
||||
int i, err;
|
||||
|
||||
if (local_k != targ_k)
|
||||
return 0;
|
||||
|
||||
if (local_vlen != targ_vlen)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < local_vlen; i++, local_p++, targ_p++) {
|
||||
err = __bpf_core_types_match(local_btf, local_p->type, targ_btf,
|
||||
targ_p->type, behind_ptr, level - 1);
|
||||
if (err <= 0)
|
||||
return err;
|
||||
}
|
||||
|
||||
/* tail recurse for return type check */
|
||||
local_id = local_t->type;
|
||||
targ_id = targ_t->type;
|
||||
goto recur;
|
||||
}
|
||||
default:
|
||||
pr_warn("unexpected kind %s relocated, local [%d], target [%d]\n",
|
||||
btf_kind_str(local_t), local_id, targ_id);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -68,8 +68,14 @@ struct bpf_core_relo_res {
|
|||
__u32 new_type_id;
|
||||
};
|
||||
|
||||
int __bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id,
|
||||
const struct btf *targ_btf, __u32 targ_id, int level);
|
||||
int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id,
|
||||
const struct btf *targ_btf, __u32 targ_id);
|
||||
int __bpf_core_types_match(const struct btf *local_btf, __u32 local_id, const struct btf *targ_btf,
|
||||
__u32 targ_id, bool behind_ptr, int level);
|
||||
int bpf_core_types_match(const struct btf *local_btf, __u32 local_id, const struct btf *targ_btf,
|
||||
__u32 targ_id);
|
||||
|
||||
size_t bpf_core_essential_name_len(const char *name);
|
||||
|
||||
|
|
|
@ -652,11 +652,9 @@ static int collect_usdt_targets(struct usdt_manager *man, Elf *elf, const char *
|
|||
*
|
||||
* [0] https://sourceware.org/systemtap/wiki/UserSpaceProbeImplementation
|
||||
*/
|
||||
usdt_rel_ip = usdt_abs_ip = note.loc_addr;
|
||||
if (base_addr) {
|
||||
usdt_abs_ip = note.loc_addr;
|
||||
if (base_addr)
|
||||
usdt_abs_ip += base_addr - note.base_addr;
|
||||
usdt_rel_ip += base_addr - note.base_addr;
|
||||
}
|
||||
|
||||
/* When attaching uprobes (which is what USDTs basically are)
|
||||
* kernel expects file offset to be specified, not a relative
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
#include <linux/bpf.h>
|
||||
#include <bpf/libbpf.h>
|
||||
#include <bpf/bpf.h>
|
||||
#include <linux/filter.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/string.h>
|
||||
|
@ -49,6 +50,7 @@ struct bpf_prog_priv {
|
|||
struct bpf_insn *insns_buf;
|
||||
int nr_types;
|
||||
int *type_mapping;
|
||||
int *prologue_fds;
|
||||
};
|
||||
|
||||
struct bpf_perf_object {
|
||||
|
@ -56,6 +58,11 @@ struct bpf_perf_object {
|
|||
struct bpf_object *obj;
|
||||
};
|
||||
|
||||
struct bpf_preproc_result {
|
||||
struct bpf_insn *new_insn_ptr;
|
||||
int new_insn_cnt;
|
||||
};
|
||||
|
||||
static LIST_HEAD(bpf_objects_list);
|
||||
static struct hashmap *bpf_program_hash;
|
||||
static struct hashmap *bpf_map_hash;
|
||||
|
@ -86,6 +93,7 @@ bpf_perf_object__next(struct bpf_perf_object *prev)
|
|||
(perf_obj) = (tmp), (tmp) = bpf_perf_object__next(tmp))
|
||||
|
||||
static bool libbpf_initialized;
|
||||
static int libbpf_sec_handler;
|
||||
|
||||
static int bpf_perf_object__add(struct bpf_object *obj)
|
||||
{
|
||||
|
@ -99,12 +107,76 @@ static int bpf_perf_object__add(struct bpf_object *obj)
|
|||
return perf_obj ? 0 : -ENOMEM;
|
||||
}
|
||||
|
||||
static void *program_priv(const struct bpf_program *prog)
|
||||
{
|
||||
void *priv;
|
||||
|
||||
if (IS_ERR_OR_NULL(bpf_program_hash))
|
||||
return NULL;
|
||||
if (!hashmap__find(bpf_program_hash, prog, &priv))
|
||||
return NULL;
|
||||
return priv;
|
||||
}
|
||||
|
||||
static struct bpf_insn prologue_init_insn[] = {
|
||||
BPF_MOV64_IMM(BPF_REG_2, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_5, 0),
|
||||
};
|
||||
|
||||
static int libbpf_prog_prepare_load_fn(struct bpf_program *prog,
|
||||
struct bpf_prog_load_opts *opts __maybe_unused,
|
||||
long cookie __maybe_unused)
|
||||
{
|
||||
size_t init_size_cnt = ARRAY_SIZE(prologue_init_insn);
|
||||
size_t orig_insn_cnt, insn_cnt, init_size, orig_size;
|
||||
struct bpf_prog_priv *priv = program_priv(prog);
|
||||
const struct bpf_insn *orig_insn;
|
||||
struct bpf_insn *insn;
|
||||
|
||||
if (IS_ERR_OR_NULL(priv)) {
|
||||
pr_debug("bpf: failed to get private field\n");
|
||||
return -BPF_LOADER_ERRNO__INTERNAL;
|
||||
}
|
||||
|
||||
if (!priv->need_prologue)
|
||||
return 0;
|
||||
|
||||
/* prepend initialization code to program instructions */
|
||||
orig_insn = bpf_program__insns(prog);
|
||||
orig_insn_cnt = bpf_program__insn_cnt(prog);
|
||||
init_size = init_size_cnt * sizeof(*insn);
|
||||
orig_size = orig_insn_cnt * sizeof(*insn);
|
||||
|
||||
insn_cnt = orig_insn_cnt + init_size_cnt;
|
||||
insn = malloc(insn_cnt * sizeof(*insn));
|
||||
if (!insn)
|
||||
return -ENOMEM;
|
||||
|
||||
memcpy(insn, prologue_init_insn, init_size);
|
||||
memcpy((char *) insn + init_size, orig_insn, orig_size);
|
||||
bpf_program__set_insns(prog, insn, insn_cnt);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int libbpf_init(void)
|
||||
{
|
||||
LIBBPF_OPTS(libbpf_prog_handler_opts, handler_opts,
|
||||
.prog_prepare_load_fn = libbpf_prog_prepare_load_fn,
|
||||
);
|
||||
|
||||
if (libbpf_initialized)
|
||||
return 0;
|
||||
|
||||
libbpf_set_print(libbpf_perf_print);
|
||||
libbpf_sec_handler = libbpf_register_prog_handler(NULL, BPF_PROG_TYPE_KPROBE,
|
||||
0, &handler_opts);
|
||||
if (libbpf_sec_handler < 0) {
|
||||
pr_debug("bpf: failed to register libbpf section handler: %d\n",
|
||||
libbpf_sec_handler);
|
||||
return -BPF_LOADER_ERRNO__INTERNAL;
|
||||
}
|
||||
libbpf_initialized = true;
|
||||
return 0;
|
||||
}
|
||||
|
@ -188,14 +260,31 @@ struct bpf_object *bpf__prepare_load(const char *filename, bool source)
|
|||
return obj;
|
||||
}
|
||||
|
||||
static void close_prologue_programs(struct bpf_prog_priv *priv)
|
||||
{
|
||||
struct perf_probe_event *pev;
|
||||
int i, fd;
|
||||
|
||||
if (!priv->need_prologue)
|
||||
return;
|
||||
pev = &priv->pev;
|
||||
for (i = 0; i < pev->ntevs; i++) {
|
||||
fd = priv->prologue_fds[i];
|
||||
if (fd != -1)
|
||||
close(fd);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
clear_prog_priv(const struct bpf_program *prog __maybe_unused,
|
||||
void *_priv)
|
||||
{
|
||||
struct bpf_prog_priv *priv = _priv;
|
||||
|
||||
close_prologue_programs(priv);
|
||||
cleanup_perf_probe_events(&priv->pev, 1);
|
||||
zfree(&priv->insns_buf);
|
||||
zfree(&priv->prologue_fds);
|
||||
zfree(&priv->type_mapping);
|
||||
zfree(&priv->sys_name);
|
||||
zfree(&priv->evt_name);
|
||||
|
@ -243,17 +332,6 @@ static bool ptr_equal(const void *key1, const void *key2,
|
|||
return key1 == key2;
|
||||
}
|
||||
|
||||
static void *program_priv(const struct bpf_program *prog)
|
||||
{
|
||||
void *priv;
|
||||
|
||||
if (IS_ERR_OR_NULL(bpf_program_hash))
|
||||
return NULL;
|
||||
if (!hashmap__find(bpf_program_hash, prog, &priv))
|
||||
return NULL;
|
||||
return priv;
|
||||
}
|
||||
|
||||
static int program_set_priv(struct bpf_program *prog, void *priv)
|
||||
{
|
||||
void *old_priv;
|
||||
|
@ -558,8 +636,8 @@ static int bpf__prepare_probe(void)
|
|||
|
||||
static int
|
||||
preproc_gen_prologue(struct bpf_program *prog, int n,
|
||||
struct bpf_insn *orig_insns, int orig_insns_cnt,
|
||||
struct bpf_prog_prep_result *res)
|
||||
const struct bpf_insn *orig_insns, int orig_insns_cnt,
|
||||
struct bpf_preproc_result *res)
|
||||
{
|
||||
struct bpf_prog_priv *priv = program_priv(prog);
|
||||
struct probe_trace_event *tev;
|
||||
|
@ -607,7 +685,6 @@ preproc_gen_prologue(struct bpf_program *prog, int n,
|
|||
|
||||
res->new_insn_ptr = buf;
|
||||
res->new_insn_cnt = prologue_cnt + orig_insns_cnt;
|
||||
res->pfd = NULL;
|
||||
return 0;
|
||||
|
||||
errout:
|
||||
|
@ -715,7 +792,7 @@ static int hook_load_preprocessor(struct bpf_program *prog)
|
|||
struct bpf_prog_priv *priv = program_priv(prog);
|
||||
struct perf_probe_event *pev;
|
||||
bool need_prologue = false;
|
||||
int err, i;
|
||||
int i;
|
||||
|
||||
if (IS_ERR_OR_NULL(priv)) {
|
||||
pr_debug("Internal error when hook preprocessor\n");
|
||||
|
@ -753,6 +830,13 @@ static int hook_load_preprocessor(struct bpf_program *prog)
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
priv->prologue_fds = malloc(sizeof(int) * pev->ntevs);
|
||||
if (!priv->prologue_fds) {
|
||||
pr_debug("Not enough memory: alloc prologue fds failed\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
memset(priv->prologue_fds, -1, sizeof(int) * pev->ntevs);
|
||||
|
||||
priv->type_mapping = malloc(sizeof(int) * pev->ntevs);
|
||||
if (!priv->type_mapping) {
|
||||
pr_debug("Not enough memory: alloc type_mapping failed\n");
|
||||
|
@ -761,13 +845,7 @@ static int hook_load_preprocessor(struct bpf_program *prog)
|
|||
memset(priv->type_mapping, -1,
|
||||
sizeof(int) * pev->ntevs);
|
||||
|
||||
err = map_prologue(pev, priv->type_mapping, &priv->nr_types);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = bpf_program__set_prep(prog, priv->nr_types,
|
||||
preproc_gen_prologue);
|
||||
return err;
|
||||
return map_prologue(pev, priv->type_mapping, &priv->nr_types);
|
||||
}
|
||||
|
||||
int bpf__probe(struct bpf_object *obj)
|
||||
|
@ -874,6 +952,77 @@ int bpf__unprobe(struct bpf_object *obj)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int bpf_object__load_prologue(struct bpf_object *obj)
|
||||
{
|
||||
int init_cnt = ARRAY_SIZE(prologue_init_insn);
|
||||
const struct bpf_insn *orig_insns;
|
||||
struct bpf_preproc_result res;
|
||||
struct perf_probe_event *pev;
|
||||
struct bpf_program *prog;
|
||||
int orig_insns_cnt;
|
||||
|
||||
bpf_object__for_each_program(prog, obj) {
|
||||
struct bpf_prog_priv *priv = program_priv(prog);
|
||||
int err, i, fd;
|
||||
|
||||
if (IS_ERR_OR_NULL(priv)) {
|
||||
pr_debug("bpf: failed to get private field\n");
|
||||
return -BPF_LOADER_ERRNO__INTERNAL;
|
||||
}
|
||||
|
||||
if (!priv->need_prologue)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* For each program that needs prologue we do following:
|
||||
*
|
||||
* - take its current instructions and use them
|
||||
* to generate the new code with prologue
|
||||
* - load new instructions with bpf_prog_load
|
||||
* and keep the fd in prologue_fds
|
||||
* - new fd will be used in bpf__foreach_event
|
||||
* to connect this program with perf evsel
|
||||
*/
|
||||
orig_insns = bpf_program__insns(prog);
|
||||
orig_insns_cnt = bpf_program__insn_cnt(prog);
|
||||
|
||||
pev = &priv->pev;
|
||||
for (i = 0; i < pev->ntevs; i++) {
|
||||
/*
|
||||
* Skipping artificall prologue_init_insn instructions
|
||||
* (init_cnt), so the prologue can be generated instead
|
||||
* of them.
|
||||
*/
|
||||
err = preproc_gen_prologue(prog, i,
|
||||
orig_insns + init_cnt,
|
||||
orig_insns_cnt - init_cnt,
|
||||
&res);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
fd = bpf_prog_load(bpf_program__get_type(prog),
|
||||
bpf_program__name(prog), "GPL",
|
||||
res.new_insn_ptr,
|
||||
res.new_insn_cnt, NULL);
|
||||
if (fd < 0) {
|
||||
char bf[128];
|
||||
|
||||
libbpf_strerror(-errno, bf, sizeof(bf));
|
||||
pr_debug("bpf: load objects with prologue failed: err=%d: (%s)\n",
|
||||
-errno, bf);
|
||||
return -errno;
|
||||
}
|
||||
priv->prologue_fds[i] = fd;
|
||||
}
|
||||
/*
|
||||
* We no longer need the original program,
|
||||
* we can unload it.
|
||||
*/
|
||||
bpf_program__unload(prog);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bpf__load(struct bpf_object *obj)
|
||||
{
|
||||
int err;
|
||||
|
@ -885,7 +1034,7 @@ int bpf__load(struct bpf_object *obj)
|
|||
pr_debug("bpf: load objects failed: err=%d: (%s)\n", err, bf);
|
||||
return err;
|
||||
}
|
||||
return 0;
|
||||
return bpf_object__load_prologue(obj);
|
||||
}
|
||||
|
||||
int bpf__foreach_event(struct bpf_object *obj,
|
||||
|
@ -920,13 +1069,10 @@ int bpf__foreach_event(struct bpf_object *obj,
|
|||
for (i = 0; i < pev->ntevs; i++) {
|
||||
tev = &pev->tevs[i];
|
||||
|
||||
if (priv->need_prologue) {
|
||||
int type = priv->type_mapping[i];
|
||||
|
||||
fd = bpf_program__nth_fd(prog, type);
|
||||
} else {
|
||||
if (priv->need_prologue)
|
||||
fd = priv->prologue_fds[i];
|
||||
else
|
||||
fd = bpf_program__fd(prog);
|
||||
}
|
||||
|
||||
if (fd < 0) {
|
||||
pr_debug("bpf: failed to get file descriptor\n");
|
||||
|
|
|
@ -41,6 +41,6 @@ test_cpp
|
|||
/bench
|
||||
*.ko
|
||||
*.tmp
|
||||
xdpxceiver
|
||||
xskxceiver
|
||||
xdp_redirect_multi
|
||||
xdp_synproxy
|
||||
|
|
|
@ -82,7 +82,7 @@ TEST_PROGS_EXTENDED := with_addr.sh \
|
|||
TEST_GEN_PROGS_EXTENDED = test_sock_addr test_skb_cgroup_id_user \
|
||||
flow_dissector_load test_flow_dissector test_tcp_check_syncookie_user \
|
||||
test_lirc_mode2_user xdping test_cpp runqslower bench bpf_testmod.ko \
|
||||
xdpxceiver xdp_redirect_multi xdp_synproxy
|
||||
xskxceiver xdp_redirect_multi xdp_synproxy
|
||||
|
||||
TEST_CUSTOM_PROGS = $(OUTPUT)/urandom_read
|
||||
|
||||
|
@ -230,6 +230,8 @@ $(OUTPUT)/xdping: $(TESTING_HELPERS)
|
|||
$(OUTPUT)/flow_dissector_load: $(TESTING_HELPERS)
|
||||
$(OUTPUT)/test_maps: $(TESTING_HELPERS)
|
||||
$(OUTPUT)/test_verifier: $(TESTING_HELPERS) $(CAP_HELPERS)
|
||||
$(OUTPUT)/xsk.o: $(BPFOBJ)
|
||||
$(OUTPUT)/xskxceiver: $(OUTPUT)/xsk.o
|
||||
|
||||
BPFTOOL ?= $(DEFAULT_BPFTOOL)
|
||||
$(DEFAULT_BPFTOOL): $(wildcard $(BPFTOOLDIR)/*.[ch] $(BPFTOOLDIR)/Makefile) \
|
||||
|
@ -571,6 +573,8 @@ $(OUTPUT)/bench_bloom_filter_map.o: $(OUTPUT)/bloom_filter_bench.skel.h
|
|||
$(OUTPUT)/bench_bpf_loop.o: $(OUTPUT)/bpf_loop_bench.skel.h
|
||||
$(OUTPUT)/bench_strncmp.o: $(OUTPUT)/strncmp_bench.skel.h
|
||||
$(OUTPUT)/bench_bpf_hashmap_full_update.o: $(OUTPUT)/bpf_hashmap_full_update_bench.skel.h
|
||||
$(OUTPUT)/bench_local_storage.o: $(OUTPUT)/local_storage_bench.skel.h
|
||||
$(OUTPUT)/bench_local_storage_rcu_tasks_trace.o: $(OUTPUT)/local_storage_rcu_tasks_trace_bench.skel.h
|
||||
$(OUTPUT)/bench.o: bench.h testing_helpers.h $(BPFOBJ)
|
||||
$(OUTPUT)/bench: LDLIBS += -lm
|
||||
$(OUTPUT)/bench: $(OUTPUT)/bench.o \
|
||||
|
@ -583,7 +587,9 @@ $(OUTPUT)/bench: $(OUTPUT)/bench.o \
|
|||
$(OUTPUT)/bench_bloom_filter_map.o \
|
||||
$(OUTPUT)/bench_bpf_loop.o \
|
||||
$(OUTPUT)/bench_strncmp.o \
|
||||
$(OUTPUT)/bench_bpf_hashmap_full_update.o
|
||||
$(OUTPUT)/bench_bpf_hashmap_full_update.o \
|
||||
$(OUTPUT)/bench_local_storage.o \
|
||||
$(OUTPUT)/bench_local_storage_rcu_tasks_trace.o
|
||||
$(call msg,BINARY,,$@)
|
||||
$(Q)$(CC) $(CFLAGS) $(LDFLAGS) $(filter %.a %.o,$^) $(LDLIBS) -o $@
|
||||
|
||||
|
|
|
@ -79,6 +79,43 @@ void hits_drops_report_progress(int iter, struct bench_res *res, long delta_ns)
|
|||
hits_per_sec, hits_per_prod, drops_per_sec, hits_per_sec + drops_per_sec);
|
||||
}
|
||||
|
||||
void
|
||||
grace_period_latency_basic_stats(struct bench_res res[], int res_cnt, struct basic_stats *gp_stat)
|
||||
{
|
||||
int i;
|
||||
|
||||
memset(gp_stat, 0, sizeof(struct basic_stats));
|
||||
|
||||
for (i = 0; i < res_cnt; i++)
|
||||
gp_stat->mean += res[i].gp_ns / 1000.0 / (double)res[i].gp_ct / (0.0 + res_cnt);
|
||||
|
||||
#define IT_MEAN_DIFF (res[i].gp_ns / 1000.0 / (double)res[i].gp_ct - gp_stat->mean)
|
||||
if (res_cnt > 1) {
|
||||
for (i = 0; i < res_cnt; i++)
|
||||
gp_stat->stddev += (IT_MEAN_DIFF * IT_MEAN_DIFF) / (res_cnt - 1.0);
|
||||
}
|
||||
gp_stat->stddev = sqrt(gp_stat->stddev);
|
||||
#undef IT_MEAN_DIFF
|
||||
}
|
||||
|
||||
void
|
||||
grace_period_ticks_basic_stats(struct bench_res res[], int res_cnt, struct basic_stats *gp_stat)
|
||||
{
|
||||
int i;
|
||||
|
||||
memset(gp_stat, 0, sizeof(struct basic_stats));
|
||||
for (i = 0; i < res_cnt; i++)
|
||||
gp_stat->mean += res[i].stime / (double)res[i].gp_ct / (0.0 + res_cnt);
|
||||
|
||||
#define IT_MEAN_DIFF (res[i].stime / (double)res[i].gp_ct - gp_stat->mean)
|
||||
if (res_cnt > 1) {
|
||||
for (i = 0; i < res_cnt; i++)
|
||||
gp_stat->stddev += (IT_MEAN_DIFF * IT_MEAN_DIFF) / (res_cnt - 1.0);
|
||||
}
|
||||
gp_stat->stddev = sqrt(gp_stat->stddev);
|
||||
#undef IT_MEAN_DIFF
|
||||
}
|
||||
|
||||
void hits_drops_report_final(struct bench_res res[], int res_cnt)
|
||||
{
|
||||
int i;
|
||||
|
@ -150,6 +187,53 @@ void ops_report_final(struct bench_res res[], int res_cnt)
|
|||
printf("latency %8.3lf ns/op\n", 1000.0 / hits_mean * env.producer_cnt);
|
||||
}
|
||||
|
||||
void local_storage_report_progress(int iter, struct bench_res *res,
|
||||
long delta_ns)
|
||||
{
|
||||
double important_hits_per_sec, hits_per_sec;
|
||||
double delta_sec = delta_ns / 1000000000.0;
|
||||
|
||||
hits_per_sec = res->hits / 1000000.0 / delta_sec;
|
||||
important_hits_per_sec = res->important_hits / 1000000.0 / delta_sec;
|
||||
|
||||
printf("Iter %3d (%7.3lfus): ", iter, (delta_ns - 1000000000) / 1000.0);
|
||||
|
||||
printf("hits %8.3lfM/s ", hits_per_sec);
|
||||
printf("important_hits %8.3lfM/s\n", important_hits_per_sec);
|
||||
}
|
||||
|
||||
void local_storage_report_final(struct bench_res res[], int res_cnt)
|
||||
{
|
||||
double important_hits_mean = 0.0, important_hits_stddev = 0.0;
|
||||
double hits_mean = 0.0, hits_stddev = 0.0;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < res_cnt; i++) {
|
||||
hits_mean += res[i].hits / 1000000.0 / (0.0 + res_cnt);
|
||||
important_hits_mean += res[i].important_hits / 1000000.0 / (0.0 + res_cnt);
|
||||
}
|
||||
|
||||
if (res_cnt > 1) {
|
||||
for (i = 0; i < res_cnt; i++) {
|
||||
hits_stddev += (hits_mean - res[i].hits / 1000000.0) *
|
||||
(hits_mean - res[i].hits / 1000000.0) /
|
||||
(res_cnt - 1.0);
|
||||
important_hits_stddev +=
|
||||
(important_hits_mean - res[i].important_hits / 1000000.0) *
|
||||
(important_hits_mean - res[i].important_hits / 1000000.0) /
|
||||
(res_cnt - 1.0);
|
||||
}
|
||||
|
||||
hits_stddev = sqrt(hits_stddev);
|
||||
important_hits_stddev = sqrt(important_hits_stddev);
|
||||
}
|
||||
printf("Summary: hits throughput %8.3lf \u00B1 %5.3lf M ops/s, ",
|
||||
hits_mean, hits_stddev);
|
||||
printf("hits latency %8.3lf ns/op, ", 1000.0 / hits_mean);
|
||||
printf("important_hits throughput %8.3lf \u00B1 %5.3lf M ops/s\n",
|
||||
important_hits_mean, important_hits_stddev);
|
||||
}
|
||||
|
||||
const char *argp_program_version = "benchmark";
|
||||
const char *argp_program_bug_address = "<bpf@vger.kernel.org>";
|
||||
const char argp_program_doc[] =
|
||||
|
@ -188,13 +272,18 @@ static const struct argp_option opts[] = {
|
|||
extern struct argp bench_ringbufs_argp;
|
||||
extern struct argp bench_bloom_map_argp;
|
||||
extern struct argp bench_bpf_loop_argp;
|
||||
extern struct argp bench_local_storage_argp;
|
||||
extern struct argp bench_local_storage_rcu_tasks_trace_argp;
|
||||
extern struct argp bench_strncmp_argp;
|
||||
|
||||
static const struct argp_child bench_parsers[] = {
|
||||
{ &bench_ringbufs_argp, 0, "Ring buffers benchmark", 0 },
|
||||
{ &bench_bloom_map_argp, 0, "Bloom filter map benchmark", 0 },
|
||||
{ &bench_bpf_loop_argp, 0, "bpf_loop helper benchmark", 0 },
|
||||
{ &bench_local_storage_argp, 0, "local_storage benchmark", 0 },
|
||||
{ &bench_strncmp_argp, 0, "bpf_strncmp helper benchmark", 0 },
|
||||
{ &bench_local_storage_rcu_tasks_trace_argp, 0,
|
||||
"local_storage RCU Tasks Trace slowdown benchmark", 0 },
|
||||
{},
|
||||
};
|
||||
|
||||
|
@ -397,6 +486,10 @@ extern const struct bench bench_bpf_loop;
|
|||
extern const struct bench bench_strncmp_no_helper;
|
||||
extern const struct bench bench_strncmp_helper;
|
||||
extern const struct bench bench_bpf_hashmap_full_update;
|
||||
extern const struct bench bench_local_storage_cache_seq_get;
|
||||
extern const struct bench bench_local_storage_cache_interleaved_get;
|
||||
extern const struct bench bench_local_storage_cache_hashmap_control;
|
||||
extern const struct bench bench_local_storage_tasks_trace;
|
||||
|
||||
static const struct bench *benchs[] = {
|
||||
&bench_count_global,
|
||||
|
@ -432,6 +525,10 @@ static const struct bench *benchs[] = {
|
|||
&bench_strncmp_no_helper,
|
||||
&bench_strncmp_helper,
|
||||
&bench_bpf_hashmap_full_update,
|
||||
&bench_local_storage_cache_seq_get,
|
||||
&bench_local_storage_cache_interleaved_get,
|
||||
&bench_local_storage_cache_hashmap_control,
|
||||
&bench_local_storage_tasks_trace,
|
||||
};
|
||||
|
||||
static void setup_benchmark()
|
||||
|
|
|
@ -30,10 +30,19 @@ struct env {
|
|||
struct cpu_set cons_cpus;
|
||||
};
|
||||
|
||||
struct basic_stats {
|
||||
double mean;
|
||||
double stddev;
|
||||
};
|
||||
|
||||
struct bench_res {
|
||||
long hits;
|
||||
long drops;
|
||||
long false_hits;
|
||||
long important_hits;
|
||||
unsigned long gp_ns;
|
||||
unsigned long gp_ct;
|
||||
unsigned int stime;
|
||||
};
|
||||
|
||||
struct bench {
|
||||
|
@ -61,6 +70,13 @@ void false_hits_report_progress(int iter, struct bench_res *res, long delta_ns);
|
|||
void false_hits_report_final(struct bench_res res[], int res_cnt);
|
||||
void ops_report_progress(int iter, struct bench_res *res, long delta_ns);
|
||||
void ops_report_final(struct bench_res res[], int res_cnt);
|
||||
void local_storage_report_progress(int iter, struct bench_res *res,
|
||||
long delta_ns);
|
||||
void local_storage_report_final(struct bench_res res[], int res_cnt);
|
||||
void grace_period_latency_basic_stats(struct bench_res res[], int res_cnt,
|
||||
struct basic_stats *gp_stat);
|
||||
void grace_period_ticks_basic_stats(struct bench_res res[], int res_cnt,
|
||||
struct basic_stats *gp_stat);
|
||||
|
||||
static inline __u64 get_time_ns(void)
|
||||
{
|
||||
|
|
|
@ -0,0 +1,287 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
|
||||
|
||||
#include <argp.h>
|
||||
#include <linux/btf.h>
|
||||
|
||||
#include "local_storage_bench.skel.h"
|
||||
#include "bench.h"
|
||||
|
||||
#include <test_btf.h>
|
||||
|
||||
static struct {
|
||||
__u32 nr_maps;
|
||||
__u32 hashmap_nr_keys_used;
|
||||
} args = {
|
||||
.nr_maps = 1000,
|
||||
.hashmap_nr_keys_used = 1000,
|
||||
};
|
||||
|
||||
enum {
|
||||
ARG_NR_MAPS = 6000,
|
||||
ARG_HASHMAP_NR_KEYS_USED = 6001,
|
||||
};
|
||||
|
||||
static const struct argp_option opts[] = {
|
||||
{ "nr_maps", ARG_NR_MAPS, "NR_MAPS", 0,
|
||||
"Set number of local_storage maps"},
|
||||
{ "hashmap_nr_keys_used", ARG_HASHMAP_NR_KEYS_USED, "NR_KEYS",
|
||||
0, "When doing hashmap test, set number of hashmap keys test uses"},
|
||||
{},
|
||||
};
|
||||
|
||||
static error_t parse_arg(int key, char *arg, struct argp_state *state)
|
||||
{
|
||||
long ret;
|
||||
|
||||
switch (key) {
|
||||
case ARG_NR_MAPS:
|
||||
ret = strtol(arg, NULL, 10);
|
||||
if (ret < 1 || ret > UINT_MAX) {
|
||||
fprintf(stderr, "invalid nr_maps");
|
||||
argp_usage(state);
|
||||
}
|
||||
args.nr_maps = ret;
|
||||
break;
|
||||
case ARG_HASHMAP_NR_KEYS_USED:
|
||||
ret = strtol(arg, NULL, 10);
|
||||
if (ret < 1 || ret > UINT_MAX) {
|
||||
fprintf(stderr, "invalid hashmap_nr_keys_used");
|
||||
argp_usage(state);
|
||||
}
|
||||
args.hashmap_nr_keys_used = ret;
|
||||
break;
|
||||
default:
|
||||
return ARGP_ERR_UNKNOWN;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct argp bench_local_storage_argp = {
|
||||
.options = opts,
|
||||
.parser = parse_arg,
|
||||
};
|
||||
|
||||
/* Keep in sync w/ array of maps in bpf */
|
||||
#define MAX_NR_MAPS 1000
|
||||
/* keep in sync w/ same define in bpf */
|
||||
#define HASHMAP_SZ 4194304
|
||||
|
||||
static void validate(void)
|
||||
{
|
||||
if (env.producer_cnt != 1) {
|
||||
fprintf(stderr, "benchmark doesn't support multi-producer!\n");
|
||||
exit(1);
|
||||
}
|
||||
if (env.consumer_cnt != 1) {
|
||||
fprintf(stderr, "benchmark doesn't support multi-consumer!\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (args.nr_maps > MAX_NR_MAPS) {
|
||||
fprintf(stderr, "nr_maps must be <= 1000\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (args.hashmap_nr_keys_used > HASHMAP_SZ) {
|
||||
fprintf(stderr, "hashmap_nr_keys_used must be <= %u\n", HASHMAP_SZ);
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
static struct {
|
||||
struct local_storage_bench *skel;
|
||||
void *bpf_obj;
|
||||
struct bpf_map *array_of_maps;
|
||||
} ctx;
|
||||
|
||||
static void prepopulate_hashmap(int fd)
|
||||
{
|
||||
int i, key, val;
|
||||
|
||||
/* local_storage gets will have BPF_LOCAL_STORAGE_GET_F_CREATE flag set, so
|
||||
* populate the hashmap for a similar comparison
|
||||
*/
|
||||
for (i = 0; i < HASHMAP_SZ; i++) {
|
||||
key = val = i;
|
||||
if (bpf_map_update_elem(fd, &key, &val, 0)) {
|
||||
fprintf(stderr, "Error prepopulating hashmap (key %d)\n", key);
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void __setup(struct bpf_program *prog, bool hashmap)
|
||||
{
|
||||
struct bpf_map *inner_map;
|
||||
int i, fd, mim_fd, err;
|
||||
|
||||
LIBBPF_OPTS(bpf_map_create_opts, create_opts);
|
||||
|
||||
if (!hashmap)
|
||||
create_opts.map_flags = BPF_F_NO_PREALLOC;
|
||||
|
||||
ctx.skel->rodata->num_maps = args.nr_maps;
|
||||
ctx.skel->rodata->hashmap_num_keys = args.hashmap_nr_keys_used;
|
||||
inner_map = bpf_map__inner_map(ctx.array_of_maps);
|
||||
create_opts.btf_key_type_id = bpf_map__btf_key_type_id(inner_map);
|
||||
create_opts.btf_value_type_id = bpf_map__btf_value_type_id(inner_map);
|
||||
|
||||
err = local_storage_bench__load(ctx.skel);
|
||||
if (err) {
|
||||
fprintf(stderr, "Error loading skeleton\n");
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
create_opts.btf_fd = bpf_object__btf_fd(ctx.skel->obj);
|
||||
|
||||
mim_fd = bpf_map__fd(ctx.array_of_maps);
|
||||
if (mim_fd < 0) {
|
||||
fprintf(stderr, "Error getting map_in_map fd\n");
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
for (i = 0; i < args.nr_maps; i++) {
|
||||
if (hashmap)
|
||||
fd = bpf_map_create(BPF_MAP_TYPE_HASH, NULL, sizeof(int),
|
||||
sizeof(int), HASHMAP_SZ, &create_opts);
|
||||
else
|
||||
fd = bpf_map_create(BPF_MAP_TYPE_TASK_STORAGE, NULL, sizeof(int),
|
||||
sizeof(int), 0, &create_opts);
|
||||
if (fd < 0) {
|
||||
fprintf(stderr, "Error creating map %d: %d\n", i, fd);
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
if (hashmap)
|
||||
prepopulate_hashmap(fd);
|
||||
|
||||
err = bpf_map_update_elem(mim_fd, &i, &fd, 0);
|
||||
if (err) {
|
||||
fprintf(stderr, "Error updating array-of-maps w/ map %d\n", i);
|
||||
goto err_out;
|
||||
}
|
||||
}
|
||||
|
||||
if (!bpf_program__attach(prog)) {
|
||||
fprintf(stderr, "Error attaching bpf program\n");
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
return;
|
||||
err_out:
|
||||
exit(1);
|
||||
}
|
||||
|
||||
static void hashmap_setup(void)
|
||||
{
|
||||
struct local_storage_bench *skel;
|
||||
|
||||
setup_libbpf();
|
||||
|
||||
skel = local_storage_bench__open();
|
||||
ctx.skel = skel;
|
||||
ctx.array_of_maps = skel->maps.array_of_hash_maps;
|
||||
skel->rodata->use_hashmap = 1;
|
||||
skel->rodata->interleave = 0;
|
||||
|
||||
__setup(skel->progs.get_local, true);
|
||||
}
|
||||
|
||||
static void local_storage_cache_get_setup(void)
|
||||
{
|
||||
struct local_storage_bench *skel;
|
||||
|
||||
setup_libbpf();
|
||||
|
||||
skel = local_storage_bench__open();
|
||||
ctx.skel = skel;
|
||||
ctx.array_of_maps = skel->maps.array_of_local_storage_maps;
|
||||
skel->rodata->use_hashmap = 0;
|
||||
skel->rodata->interleave = 0;
|
||||
|
||||
__setup(skel->progs.get_local, false);
|
||||
}
|
||||
|
||||
static void local_storage_cache_get_interleaved_setup(void)
|
||||
{
|
||||
struct local_storage_bench *skel;
|
||||
|
||||
setup_libbpf();
|
||||
|
||||
skel = local_storage_bench__open();
|
||||
ctx.skel = skel;
|
||||
ctx.array_of_maps = skel->maps.array_of_local_storage_maps;
|
||||
skel->rodata->use_hashmap = 0;
|
||||
skel->rodata->interleave = 1;
|
||||
|
||||
__setup(skel->progs.get_local, false);
|
||||
}
|
||||
|
||||
static void measure(struct bench_res *res)
|
||||
{
|
||||
res->hits = atomic_swap(&ctx.skel->bss->hits, 0);
|
||||
res->important_hits = atomic_swap(&ctx.skel->bss->important_hits, 0);
|
||||
}
|
||||
|
||||
static inline void trigger_bpf_program(void)
|
||||
{
|
||||
syscall(__NR_getpgid);
|
||||
}
|
||||
|
||||
static void *consumer(void *input)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void *producer(void *input)
|
||||
{
|
||||
while (true)
|
||||
trigger_bpf_program();
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* cache sequential and interleaved get benchs test local_storage get
|
||||
* performance, specifically they demonstrate performance cliff of
|
||||
* current list-plus-cache local_storage model.
|
||||
*
|
||||
* cache sequential get: call bpf_task_storage_get on n maps in order
|
||||
* cache interleaved get: like "sequential get", but interleave 4 calls to the
|
||||
* 'important' map (idx 0 in array_of_maps) for every 10 calls. Goal
|
||||
* is to mimic environment where many progs are accessing their local_storage
|
||||
* maps, with 'our' prog needing to access its map more often than others
|
||||
*/
|
||||
const struct bench bench_local_storage_cache_seq_get = {
|
||||
.name = "local-storage-cache-seq-get",
|
||||
.validate = validate,
|
||||
.setup = local_storage_cache_get_setup,
|
||||
.producer_thread = producer,
|
||||
.consumer_thread = consumer,
|
||||
.measure = measure,
|
||||
.report_progress = local_storage_report_progress,
|
||||
.report_final = local_storage_report_final,
|
||||
};
|
||||
|
||||
const struct bench bench_local_storage_cache_interleaved_get = {
|
||||
.name = "local-storage-cache-int-get",
|
||||
.validate = validate,
|
||||
.setup = local_storage_cache_get_interleaved_setup,
|
||||
.producer_thread = producer,
|
||||
.consumer_thread = consumer,
|
||||
.measure = measure,
|
||||
.report_progress = local_storage_report_progress,
|
||||
.report_final = local_storage_report_final,
|
||||
};
|
||||
|
||||
const struct bench bench_local_storage_cache_hashmap_control = {
|
||||
.name = "local-storage-cache-hashmap-control",
|
||||
.validate = validate,
|
||||
.setup = hashmap_setup,
|
||||
.producer_thread = producer,
|
||||
.consumer_thread = consumer,
|
||||
.measure = measure,
|
||||
.report_progress = local_storage_report_progress,
|
||||
.report_final = local_storage_report_final,
|
||||
};
|
|
@ -0,0 +1,281 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
|
||||
|
||||
#include <argp.h>
|
||||
|
||||
#include <sys/prctl.h>
|
||||
#include "local_storage_rcu_tasks_trace_bench.skel.h"
|
||||
#include "bench.h"
|
||||
|
||||
#include <signal.h>
|
||||
|
||||
static struct {
|
||||
__u32 nr_procs;
|
||||
__u32 kthread_pid;
|
||||
bool quiet;
|
||||
} args = {
|
||||
.nr_procs = 1000,
|
||||
.kthread_pid = 0,
|
||||
.quiet = false,
|
||||
};
|
||||
|
||||
enum {
|
||||
ARG_NR_PROCS = 7000,
|
||||
ARG_KTHREAD_PID = 7001,
|
||||
ARG_QUIET = 7002,
|
||||
};
|
||||
|
||||
static const struct argp_option opts[] = {
|
||||
{ "nr_procs", ARG_NR_PROCS, "NR_PROCS", 0,
|
||||
"Set number of user processes to spin up"},
|
||||
{ "kthread_pid", ARG_KTHREAD_PID, "PID", 0,
|
||||
"Pid of rcu_tasks_trace kthread for ticks tracking"},
|
||||
{ "quiet", ARG_QUIET, "{0,1}", 0,
|
||||
"If true, don't report progress"},
|
||||
{},
|
||||
};
|
||||
|
||||
static error_t parse_arg(int key, char *arg, struct argp_state *state)
|
||||
{
|
||||
long ret;
|
||||
|
||||
switch (key) {
|
||||
case ARG_NR_PROCS:
|
||||
ret = strtol(arg, NULL, 10);
|
||||
if (ret < 1 || ret > UINT_MAX) {
|
||||
fprintf(stderr, "invalid nr_procs\n");
|
||||
argp_usage(state);
|
||||
}
|
||||
args.nr_procs = ret;
|
||||
break;
|
||||
case ARG_KTHREAD_PID:
|
||||
ret = strtol(arg, NULL, 10);
|
||||
if (ret < 1) {
|
||||
fprintf(stderr, "invalid kthread_pid\n");
|
||||
argp_usage(state);
|
||||
}
|
||||
args.kthread_pid = ret;
|
||||
break;
|
||||
case ARG_QUIET:
|
||||
ret = strtol(arg, NULL, 10);
|
||||
if (ret < 0 || ret > 1) {
|
||||
fprintf(stderr, "invalid quiet %ld\n", ret);
|
||||
argp_usage(state);
|
||||
}
|
||||
args.quiet = ret;
|
||||
break;
|
||||
break;
|
||||
default:
|
||||
return ARGP_ERR_UNKNOWN;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct argp bench_local_storage_rcu_tasks_trace_argp = {
|
||||
.options = opts,
|
||||
.parser = parse_arg,
|
||||
};
|
||||
|
||||
#define MAX_SLEEP_PROCS 150000
|
||||
|
||||
static void validate(void)
|
||||
{
|
||||
if (env.producer_cnt != 1) {
|
||||
fprintf(stderr, "benchmark doesn't support multi-producer!\n");
|
||||
exit(1);
|
||||
}
|
||||
if (env.consumer_cnt != 1) {
|
||||
fprintf(stderr, "benchmark doesn't support multi-consumer!\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (args.nr_procs > MAX_SLEEP_PROCS) {
|
||||
fprintf(stderr, "benchmark supports up to %u sleeper procs!\n",
|
||||
MAX_SLEEP_PROCS);
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
static long kthread_pid_ticks(void)
|
||||
{
|
||||
char procfs_path[100];
|
||||
long stime;
|
||||
FILE *f;
|
||||
|
||||
if (!args.kthread_pid)
|
||||
return -1;
|
||||
|
||||
sprintf(procfs_path, "/proc/%u/stat", args.kthread_pid);
|
||||
f = fopen(procfs_path, "r");
|
||||
if (!f) {
|
||||
fprintf(stderr, "couldn't open %s, exiting\n", procfs_path);
|
||||
goto err_out;
|
||||
}
|
||||
if (fscanf(f, "%*s %*s %*s %*s %*s %*s %*s %*s %*s %*s %*s %*s %*s %*s %ld", &stime) != 1) {
|
||||
fprintf(stderr, "fscanf of %s failed, exiting\n", procfs_path);
|
||||
goto err_out;
|
||||
}
|
||||
fclose(f);
|
||||
return stime;
|
||||
|
||||
err_out:
|
||||
if (f)
|
||||
fclose(f);
|
||||
exit(1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct {
|
||||
struct local_storage_rcu_tasks_trace_bench *skel;
|
||||
long prev_kthread_stime;
|
||||
} ctx;
|
||||
|
||||
static void sleep_and_loop(void)
|
||||
{
|
||||
while (true) {
|
||||
sleep(rand() % 4);
|
||||
syscall(__NR_getpgid);
|
||||
}
|
||||
}
|
||||
|
||||
static void local_storage_tasks_trace_setup(void)
|
||||
{
|
||||
int i, err, forkret, runner_pid;
|
||||
|
||||
runner_pid = getpid();
|
||||
|
||||
for (i = 0; i < args.nr_procs; i++) {
|
||||
forkret = fork();
|
||||
if (forkret < 0) {
|
||||
fprintf(stderr, "Error forking sleeper proc %u of %u, exiting\n", i,
|
||||
args.nr_procs);
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
if (!forkret) {
|
||||
err = prctl(PR_SET_PDEATHSIG, SIGKILL);
|
||||
if (err < 0) {
|
||||
fprintf(stderr, "prctl failed with err %d, exiting\n", errno);
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
if (getppid() != runner_pid) {
|
||||
fprintf(stderr, "Runner died while spinning up procs, exiting\n");
|
||||
goto err_out;
|
||||
}
|
||||
sleep_and_loop();
|
||||
}
|
||||
}
|
||||
printf("Spun up %u procs (our pid %d)\n", args.nr_procs, runner_pid);
|
||||
|
||||
setup_libbpf();
|
||||
|
||||
ctx.skel = local_storage_rcu_tasks_trace_bench__open_and_load();
|
||||
if (!ctx.skel) {
|
||||
fprintf(stderr, "Error doing open_and_load, exiting\n");
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
ctx.prev_kthread_stime = kthread_pid_ticks();
|
||||
|
||||
if (!bpf_program__attach(ctx.skel->progs.get_local)) {
|
||||
fprintf(stderr, "Error attaching bpf program\n");
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
if (!bpf_program__attach(ctx.skel->progs.pregp_step)) {
|
||||
fprintf(stderr, "Error attaching bpf program\n");
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
if (!bpf_program__attach(ctx.skel->progs.postgp)) {
|
||||
fprintf(stderr, "Error attaching bpf program\n");
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
return;
|
||||
err_out:
|
||||
exit(1);
|
||||
}
|
||||
|
||||
static void measure(struct bench_res *res)
|
||||
{
|
||||
long ticks;
|
||||
|
||||
res->gp_ct = atomic_swap(&ctx.skel->bss->gp_hits, 0);
|
||||
res->gp_ns = atomic_swap(&ctx.skel->bss->gp_times, 0);
|
||||
ticks = kthread_pid_ticks();
|
||||
res->stime = ticks - ctx.prev_kthread_stime;
|
||||
ctx.prev_kthread_stime = ticks;
|
||||
}
|
||||
|
||||
static void *consumer(void *input)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void *producer(void *input)
|
||||
{
|
||||
while (true)
|
||||
syscall(__NR_getpgid);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void report_progress(int iter, struct bench_res *res, long delta_ns)
|
||||
{
|
||||
if (ctx.skel->bss->unexpected) {
|
||||
fprintf(stderr, "Error: Unexpected order of bpf prog calls (postgp after pregp).");
|
||||
fprintf(stderr, "Data can't be trusted, exiting\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (args.quiet)
|
||||
return;
|
||||
|
||||
printf("Iter %d\t avg tasks_trace grace period latency\t%lf ns\n",
|
||||
iter, res->gp_ns / (double)res->gp_ct);
|
||||
printf("Iter %d\t avg ticks per tasks_trace grace period\t%lf\n",
|
||||
iter, res->stime / (double)res->gp_ct);
|
||||
}
|
||||
|
||||
static void report_final(struct bench_res res[], int res_cnt)
|
||||
{
|
||||
struct basic_stats gp_stat;
|
||||
|
||||
grace_period_latency_basic_stats(res, res_cnt, &gp_stat);
|
||||
printf("SUMMARY tasks_trace grace period latency");
|
||||
printf("\tavg %.3lf us\tstddev %.3lf us\n", gp_stat.mean, gp_stat.stddev);
|
||||
grace_period_ticks_basic_stats(res, res_cnt, &gp_stat);
|
||||
printf("SUMMARY ticks per tasks_trace grace period");
|
||||
printf("\tavg %.3lf\tstddev %.3lf\n", gp_stat.mean, gp_stat.stddev);
|
||||
}
|
||||
|
||||
/* local-storage-tasks-trace: Benchmark performance of BPF local_storage's use
|
||||
* of RCU Tasks-Trace.
|
||||
*
|
||||
* Stress RCU Tasks Trace by forking many tasks, all of which do no work aside
|
||||
* from sleep() loop, and creating/destroying BPF task-local storage on wakeup.
|
||||
* The number of forked tasks is configurable.
|
||||
*
|
||||
* exercising code paths which call call_rcu_tasks_trace while there are many
|
||||
* thousands of tasks on the system should result in RCU Tasks-Trace having to
|
||||
* do a noticeable amount of work.
|
||||
*
|
||||
* This should be observable by measuring rcu_tasks_trace_kthread CPU usage
|
||||
* after the grace period has ended, or by measuring grace period latency.
|
||||
*
|
||||
* This benchmark uses both approaches, attaching to rcu_tasks_trace_pregp_step
|
||||
* and rcu_tasks_trace_postgp functions to measure grace period latency and
|
||||
* using /proc/PID/stat to measure rcu_tasks_trace_kthread kernel ticks
|
||||
*/
|
||||
const struct bench bench_local_storage_tasks_trace = {
|
||||
.name = "local-storage-tasks-trace",
|
||||
.validate = validate,
|
||||
.setup = local_storage_tasks_trace_setup,
|
||||
.producer_thread = producer,
|
||||
.consumer_thread = consumer,
|
||||
.measure = measure,
|
||||
.report_progress = report_progress,
|
||||
.report_final = report_final,
|
||||
};
|
|
@ -0,0 +1,24 @@
|
|||
#!/bin/bash
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
source ./benchs/run_common.sh
|
||||
|
||||
set -eufo pipefail
|
||||
|
||||
header "Hashmap Control"
|
||||
for i in 10 1000 10000 100000 4194304; do
|
||||
subtitle "num keys: $i"
|
||||
summarize_local_storage "hashmap (control) sequential get: "\
|
||||
"$(./bench --nr_maps 1 --hashmap_nr_keys_used=$i local-storage-cache-hashmap-control)"
|
||||
printf "\n"
|
||||
done
|
||||
|
||||
header "Local Storage"
|
||||
for i in 1 10 16 17 24 32 100 1000; do
|
||||
subtitle "num_maps: $i"
|
||||
summarize_local_storage "local_storage cache sequential get: "\
|
||||
"$(./bench --nr_maps $i local-storage-cache-seq-get)"
|
||||
summarize_local_storage "local_storage cache interleaved get: "\
|
||||
"$(./bench --nr_maps $i local-storage-cache-int-get)"
|
||||
printf "\n"
|
||||
done
|
|
@ -0,0 +1,11 @@
|
|||
#!/bin/bash
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
kthread_pid=`pgrep rcu_tasks_trace_kthread`
|
||||
|
||||
if [ -z $kthread_pid ]; then
|
||||
echo "error: Couldn't find rcu_tasks_trace_kthread"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
./bench --nr_procs 15000 --kthread_pid $kthread_pid -d 600 --quiet 1 local-storage-tasks-trace
|
|
@ -41,6 +41,16 @@ function ops()
|
|||
echo "$*" | sed -E "s/.*latency\s+([0-9]+\.[0-9]+\sns\/op).*/\1/"
|
||||
}
|
||||
|
||||
function local_storage()
|
||||
{
|
||||
echo -n "hits throughput: "
|
||||
echo -n "$*" | sed -E "s/.* hits throughput\s+([0-9]+\.[0-9]+ ± [0-9]+\.[0-9]+\sM\sops\/s).*/\1/"
|
||||
echo -n -e ", hits latency: "
|
||||
echo -n "$*" | sed -E "s/.* hits latency\s+([0-9]+\.[0-9]+\sns\/op).*/\1/"
|
||||
echo -n ", important_hits throughput: "
|
||||
echo "$*" | sed -E "s/.*important_hits throughput\s+([0-9]+\.[0-9]+ ± [0-9]+\.[0-9]+\sM\sops\/s).*/\1/"
|
||||
}
|
||||
|
||||
function total()
|
||||
{
|
||||
echo "$*" | sed -E "s/.*total operations\s+([0-9]+\.[0-9]+ ± [0-9]+\.[0-9]+M\/s).*/\1/"
|
||||
|
@ -67,6 +77,13 @@ function summarize_ops()
|
|||
printf "%-20s %s\n" "$bench" "$(ops $summary)"
|
||||
}
|
||||
|
||||
function summarize_local_storage()
|
||||
{
|
||||
bench="$1"
|
||||
summary=$(echo $2 | tail -n1)
|
||||
printf "%-20s %s\n" "$bench" "$(local_storage $summary)"
|
||||
}
|
||||
|
||||
function summarize_total()
|
||||
{
|
||||
bench="$1"
|
||||
|
|
|
@ -2,15 +2,6 @@
|
|||
#ifndef __BPF_LEGACY__
|
||||
#define __BPF_LEGACY__
|
||||
|
||||
#define BPF_ANNOTATE_KV_PAIR(name, type_key, type_val) \
|
||||
struct ____btf_map_##name { \
|
||||
type_key key; \
|
||||
type_val value; \
|
||||
}; \
|
||||
struct ____btf_map_##name \
|
||||
__attribute__ ((section(".maps." #name), used)) \
|
||||
____btf_map_##name = { }
|
||||
|
||||
/* llvm builtin functions that eBPF C program may use to
|
||||
* emit BPF_LD_ABS and BPF_LD_IND instructions
|
||||
*/
|
||||
|
|
|
@ -57,3 +57,9 @@ CONFIG_FPROBE=y
|
|||
CONFIG_IKCONFIG=y
|
||||
CONFIG_IKCONFIG_PROC=y
|
||||
CONFIG_MPTCP=y
|
||||
CONFIG_NETFILTER_SYNPROXY=y
|
||||
CONFIG_NETFILTER_XT_TARGET_CT=y
|
||||
CONFIG_NETFILTER_XT_MATCH_STATE=y
|
||||
CONFIG_IP_NF_FILTER=y
|
||||
CONFIG_IP_NF_TARGET_SYNPROXY=y
|
||||
CONFIG_IP_NF_RAW=y
|
||||
|
|
|
@ -436,7 +436,7 @@ struct nstoken *open_netns(const char *name)
|
|||
int err;
|
||||
struct nstoken *token;
|
||||
|
||||
token = malloc(sizeof(struct nstoken));
|
||||
token = calloc(1, sizeof(struct nstoken));
|
||||
if (!ASSERT_OK_PTR(token, "malloc token"))
|
||||
return NULL;
|
||||
|
||||
|
|
|
@ -120,6 +120,64 @@ static void check_nested_calls(struct bpf_loop *skel)
|
|||
bpf_link__destroy(link);
|
||||
}
|
||||
|
||||
static void check_non_constant_callback(struct bpf_loop *skel)
|
||||
{
|
||||
struct bpf_link *link =
|
||||
bpf_program__attach(skel->progs.prog_non_constant_callback);
|
||||
|
||||
if (!ASSERT_OK_PTR(link, "link"))
|
||||
return;
|
||||
|
||||
skel->bss->callback_selector = 0x0F;
|
||||
usleep(1);
|
||||
ASSERT_EQ(skel->bss->g_output, 0x0F, "g_output #1");
|
||||
|
||||
skel->bss->callback_selector = 0xF0;
|
||||
usleep(1);
|
||||
ASSERT_EQ(skel->bss->g_output, 0xF0, "g_output #2");
|
||||
|
||||
bpf_link__destroy(link);
|
||||
}
|
||||
|
||||
static void check_stack(struct bpf_loop *skel)
|
||||
{
|
||||
struct bpf_link *link = bpf_program__attach(skel->progs.stack_check);
|
||||
const int max_key = 12;
|
||||
int key;
|
||||
int map_fd;
|
||||
|
||||
if (!ASSERT_OK_PTR(link, "link"))
|
||||
return;
|
||||
|
||||
map_fd = bpf_map__fd(skel->maps.map1);
|
||||
|
||||
if (!ASSERT_GE(map_fd, 0, "bpf_map__fd"))
|
||||
goto out;
|
||||
|
||||
for (key = 1; key <= max_key; ++key) {
|
||||
int val = key;
|
||||
int err = bpf_map_update_elem(map_fd, &key, &val, BPF_NOEXIST);
|
||||
|
||||
if (!ASSERT_OK(err, "bpf_map_update_elem"))
|
||||
goto out;
|
||||
}
|
||||
|
||||
usleep(1);
|
||||
|
||||
for (key = 1; key <= max_key; ++key) {
|
||||
int val;
|
||||
int err = bpf_map_lookup_elem(map_fd, &key, &val);
|
||||
|
||||
if (!ASSERT_OK(err, "bpf_map_lookup_elem"))
|
||||
goto out;
|
||||
if (!ASSERT_EQ(val, key + 1, "bad value in the map"))
|
||||
goto out;
|
||||
}
|
||||
|
||||
out:
|
||||
bpf_link__destroy(link);
|
||||
}
|
||||
|
||||
void test_bpf_loop(void)
|
||||
{
|
||||
struct bpf_loop *skel;
|
||||
|
@ -140,6 +198,10 @@ void test_bpf_loop(void)
|
|||
check_invalid_flags(skel);
|
||||
if (test__start_subtest("check_nested_calls"))
|
||||
check_nested_calls(skel);
|
||||
if (test__start_subtest("check_non_constant_callback"))
|
||||
check_non_constant_callback(skel);
|
||||
if (test__start_subtest("check_stack"))
|
||||
check_stack(skel);
|
||||
|
||||
bpf_loop__destroy(skel);
|
||||
}
|
||||
|
|
|
@ -9,6 +9,9 @@
|
|||
#include "bpf_cubic.skel.h"
|
||||
#include "bpf_tcp_nogpl.skel.h"
|
||||
#include "bpf_dctcp_release.skel.h"
|
||||
#include "tcp_ca_write_sk_pacing.skel.h"
|
||||
#include "tcp_ca_incompl_cong_ops.skel.h"
|
||||
#include "tcp_ca_unsupp_cong_op.skel.h"
|
||||
|
||||
#ifndef ENOTSUPP
|
||||
#define ENOTSUPP 524
|
||||
|
@ -322,6 +325,58 @@ static void test_rel_setsockopt(void)
|
|||
bpf_dctcp_release__destroy(rel_skel);
|
||||
}
|
||||
|
||||
static void test_write_sk_pacing(void)
|
||||
{
|
||||
struct tcp_ca_write_sk_pacing *skel;
|
||||
struct bpf_link *link;
|
||||
|
||||
skel = tcp_ca_write_sk_pacing__open_and_load();
|
||||
if (!ASSERT_OK_PTR(skel, "open_and_load"))
|
||||
return;
|
||||
|
||||
link = bpf_map__attach_struct_ops(skel->maps.write_sk_pacing);
|
||||
ASSERT_OK_PTR(link, "attach_struct_ops");
|
||||
|
||||
bpf_link__destroy(link);
|
||||
tcp_ca_write_sk_pacing__destroy(skel);
|
||||
}
|
||||
|
||||
static void test_incompl_cong_ops(void)
|
||||
{
|
||||
struct tcp_ca_incompl_cong_ops *skel;
|
||||
struct bpf_link *link;
|
||||
|
||||
skel = tcp_ca_incompl_cong_ops__open_and_load();
|
||||
if (!ASSERT_OK_PTR(skel, "open_and_load"))
|
||||
return;
|
||||
|
||||
/* That cong_avoid() and cong_control() are missing is only reported at
|
||||
* this point:
|
||||
*/
|
||||
link = bpf_map__attach_struct_ops(skel->maps.incompl_cong_ops);
|
||||
ASSERT_ERR_PTR(link, "attach_struct_ops");
|
||||
|
||||
bpf_link__destroy(link);
|
||||
tcp_ca_incompl_cong_ops__destroy(skel);
|
||||
}
|
||||
|
||||
static void test_unsupp_cong_op(void)
|
||||
{
|
||||
libbpf_print_fn_t old_print_fn;
|
||||
struct tcp_ca_unsupp_cong_op *skel;
|
||||
|
||||
err_str = "attach to unsupported member get_info";
|
||||
found = false;
|
||||
old_print_fn = libbpf_set_print(libbpf_debug_print);
|
||||
|
||||
skel = tcp_ca_unsupp_cong_op__open_and_load();
|
||||
ASSERT_NULL(skel, "open_and_load");
|
||||
ASSERT_EQ(found, true, "expected_err_msg");
|
||||
|
||||
tcp_ca_unsupp_cong_op__destroy(skel);
|
||||
libbpf_set_print(old_print_fn);
|
||||
}
|
||||
|
||||
void test_bpf_tcp_ca(void)
|
||||
{
|
||||
if (test__start_subtest("dctcp"))
|
||||
|
@ -334,4 +389,10 @@ void test_bpf_tcp_ca(void)
|
|||
test_dctcp_fallback();
|
||||
if (test__start_subtest("rel_setsockopt"))
|
||||
test_rel_setsockopt();
|
||||
if (test__start_subtest("write_sk_pacing"))
|
||||
test_write_sk_pacing();
|
||||
if (test__start_subtest("incompl_cong_ops"))
|
||||
test_incompl_cong_ops();
|
||||
if (test__start_subtest("unsupp_cong_op"))
|
||||
test_unsupp_cong_op();
|
||||
}
|
||||
|
|
|
@ -34,7 +34,6 @@ static bool always_log;
|
|||
#undef CHECK
|
||||
#define CHECK(condition, format...) _CHECK(condition, "check", duration, format)
|
||||
|
||||
#define BTF_END_RAW 0xdeadbeef
|
||||
#define NAME_TBD 0xdeadb33f
|
||||
|
||||
#define NAME_NTH(N) (0xfffe0000 | N)
|
||||
|
@ -4652,7 +4651,6 @@ struct btf_file_test {
|
|||
};
|
||||
|
||||
static struct btf_file_test file_tests[] = {
|
||||
{ .file = "test_btf_haskv.o", },
|
||||
{ .file = "test_btf_newkv.o", },
|
||||
{ .file = "test_btf_nokv.o", .btf_kv_notfound = true, },
|
||||
};
|
||||
|
|
|
@ -543,7 +543,6 @@ static int __trigger_module_test_read(const struct core_reloc_test_case *test)
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static const struct core_reloc_test_case test_cases[] = {
|
||||
/* validate we can find kernel image and use its BTF for relocs */
|
||||
{
|
||||
|
@ -556,6 +555,7 @@ static const struct core_reloc_test_case test_cases[] = {
|
|||
.valid = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, },
|
||||
.comm = "test_progs",
|
||||
.comm_len = sizeof("test_progs"),
|
||||
.local_task_struct_matches = true,
|
||||
},
|
||||
.output_len = sizeof(struct core_reloc_kernel_output),
|
||||
.raw_tp_name = "sys_enter",
|
||||
|
@ -752,9 +752,10 @@ static const struct core_reloc_test_case test_cases[] = {
|
|||
SIZE_CASE(size___diff_offs),
|
||||
SIZE_ERR_CASE(size___err_ambiguous),
|
||||
|
||||
/* validate type existence and size relocations */
|
||||
/* validate type existence, match, and size relocations */
|
||||
TYPE_BASED_CASE(type_based, {
|
||||
.struct_exists = 1,
|
||||
.complex_struct_exists = 1,
|
||||
.union_exists = 1,
|
||||
.enum_exists = 1,
|
||||
.typedef_named_struct_exists = 1,
|
||||
|
@ -763,8 +764,24 @@ static const struct core_reloc_test_case test_cases[] = {
|
|||
.typedef_int_exists = 1,
|
||||
.typedef_enum_exists = 1,
|
||||
.typedef_void_ptr_exists = 1,
|
||||
.typedef_restrict_ptr_exists = 1,
|
||||
.typedef_func_proto_exists = 1,
|
||||
.typedef_arr_exists = 1,
|
||||
|
||||
.struct_matches = 1,
|
||||
.complex_struct_matches = 1,
|
||||
.union_matches = 1,
|
||||
.enum_matches = 1,
|
||||
.typedef_named_struct_matches = 1,
|
||||
.typedef_anon_struct_matches = 1,
|
||||
.typedef_struct_ptr_matches = 1,
|
||||
.typedef_int_matches = 1,
|
||||
.typedef_enum_matches = 1,
|
||||
.typedef_void_ptr_matches = 1,
|
||||
.typedef_restrict_ptr_matches = 1,
|
||||
.typedef_func_proto_matches = 1,
|
||||
.typedef_arr_matches = 1,
|
||||
|
||||
.struct_sz = sizeof(struct a_struct),
|
||||
.union_sz = sizeof(union a_union),
|
||||
.enum_sz = sizeof(enum an_enum),
|
||||
|
@ -780,6 +797,45 @@ static const struct core_reloc_test_case test_cases[] = {
|
|||
TYPE_BASED_CASE(type_based___all_missing, {
|
||||
/* all zeros */
|
||||
}),
|
||||
TYPE_BASED_CASE(type_based___diff, {
|
||||
.struct_exists = 1,
|
||||
.complex_struct_exists = 1,
|
||||
.union_exists = 1,
|
||||
.enum_exists = 1,
|
||||
.typedef_named_struct_exists = 1,
|
||||
.typedef_anon_struct_exists = 1,
|
||||
.typedef_struct_ptr_exists = 1,
|
||||
.typedef_int_exists = 1,
|
||||
.typedef_enum_exists = 1,
|
||||
.typedef_void_ptr_exists = 1,
|
||||
.typedef_func_proto_exists = 1,
|
||||
.typedef_arr_exists = 1,
|
||||
|
||||
.struct_matches = 1,
|
||||
.complex_struct_matches = 1,
|
||||
.union_matches = 1,
|
||||
.enum_matches = 1,
|
||||
.typedef_named_struct_matches = 1,
|
||||
.typedef_anon_struct_matches = 1,
|
||||
.typedef_struct_ptr_matches = 1,
|
||||
.typedef_int_matches = 0,
|
||||
.typedef_enum_matches = 1,
|
||||
.typedef_void_ptr_matches = 1,
|
||||
.typedef_func_proto_matches = 0,
|
||||
.typedef_arr_matches = 0,
|
||||
|
||||
.struct_sz = sizeof(struct a_struct___diff),
|
||||
.union_sz = sizeof(union a_union___diff),
|
||||
.enum_sz = sizeof(enum an_enum___diff),
|
||||
.typedef_named_struct_sz = sizeof(named_struct_typedef___diff),
|
||||
.typedef_anon_struct_sz = sizeof(anon_struct_typedef___diff),
|
||||
.typedef_struct_ptr_sz = sizeof(struct_ptr_typedef___diff),
|
||||
.typedef_int_sz = sizeof(int_typedef___diff),
|
||||
.typedef_enum_sz = sizeof(enum_typedef___diff),
|
||||
.typedef_void_ptr_sz = sizeof(void_ptr_typedef___diff),
|
||||
.typedef_func_proto_sz = sizeof(func_proto_typedef___diff),
|
||||
.typedef_arr_sz = sizeof(arr_typedef___diff),
|
||||
}),
|
||||
TYPE_BASED_CASE(type_based___diff_sz, {
|
||||
.struct_exists = 1,
|
||||
.union_exists = 1,
|
||||
|
@ -792,6 +848,19 @@ static const struct core_reloc_test_case test_cases[] = {
|
|||
.typedef_void_ptr_exists = 1,
|
||||
.typedef_func_proto_exists = 1,
|
||||
.typedef_arr_exists = 1,
|
||||
|
||||
.struct_matches = 0,
|
||||
.union_matches = 0,
|
||||
.enum_matches = 0,
|
||||
.typedef_named_struct_matches = 0,
|
||||
.typedef_anon_struct_matches = 0,
|
||||
.typedef_struct_ptr_matches = 1,
|
||||
.typedef_int_matches = 0,
|
||||
.typedef_enum_matches = 0,
|
||||
.typedef_void_ptr_matches = 1,
|
||||
.typedef_func_proto_matches = 0,
|
||||
.typedef_arr_matches = 0,
|
||||
|
||||
.struct_sz = sizeof(struct a_struct___diff_sz),
|
||||
.union_sz = sizeof(union a_union___diff_sz),
|
||||
.enum_sz = sizeof(enum an_enum___diff_sz),
|
||||
|
@ -806,10 +875,12 @@ static const struct core_reloc_test_case test_cases[] = {
|
|||
}),
|
||||
TYPE_BASED_CASE(type_based___incompat, {
|
||||
.enum_exists = 1,
|
||||
.enum_matches = 1,
|
||||
.enum_sz = sizeof(enum an_enum),
|
||||
}),
|
||||
TYPE_BASED_CASE(type_based___fn_wrong_args, {
|
||||
.struct_exists = 1,
|
||||
.struct_matches = 1,
|
||||
.struct_sz = sizeof(struct a_struct),
|
||||
}),
|
||||
|
||||
|
|
|
@ -329,7 +329,7 @@ static int get_syms(char ***symsp, size_t *cntp)
|
|||
struct hashmap *map;
|
||||
char buf[256];
|
||||
FILE *f;
|
||||
int err;
|
||||
int err = 0;
|
||||
|
||||
/*
|
||||
* The available_filter_functions contains many duplicates,
|
||||
|
@ -407,7 +407,7 @@ static void test_bench_attach(void)
|
|||
double attach_delta, detach_delta;
|
||||
struct bpf_link *link = NULL;
|
||||
char **syms = NULL;
|
||||
size_t cnt, i;
|
||||
size_t cnt = 0, i;
|
||||
|
||||
if (!ASSERT_OK(get_syms(&syms, &cnt), "get_syms"))
|
||||
return;
|
||||
|
|
|
@ -0,0 +1,313 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <sys/socket.h>
|
||||
#include <test_progs.h>
|
||||
#include <bpf/btf.h>
|
||||
|
||||
#include "lsm_cgroup.skel.h"
|
||||
#include "lsm_cgroup_nonvoid.skel.h"
|
||||
#include "cgroup_helpers.h"
|
||||
#include "network_helpers.h"
|
||||
|
||||
#ifndef ENOTSUPP
|
||||
#define ENOTSUPP 524
|
||||
#endif
|
||||
|
||||
static struct btf *btf;
|
||||
|
||||
static __u32 query_prog_cnt(int cgroup_fd, const char *attach_func)
|
||||
{
|
||||
LIBBPF_OPTS(bpf_prog_query_opts, p);
|
||||
int cnt = 0;
|
||||
int i;
|
||||
|
||||
ASSERT_OK(bpf_prog_query_opts(cgroup_fd, BPF_LSM_CGROUP, &p), "prog_query");
|
||||
|
||||
if (!attach_func)
|
||||
return p.prog_cnt;
|
||||
|
||||
/* When attach_func is provided, count the number of progs that
|
||||
* attach to the given symbol.
|
||||
*/
|
||||
|
||||
if (!btf)
|
||||
btf = btf__load_vmlinux_btf();
|
||||
if (!ASSERT_OK(libbpf_get_error(btf), "btf_vmlinux"))
|
||||
return -1;
|
||||
|
||||
p.prog_ids = malloc(sizeof(u32) * p.prog_cnt);
|
||||
p.prog_attach_flags = malloc(sizeof(u32) * p.prog_cnt);
|
||||
ASSERT_OK(bpf_prog_query_opts(cgroup_fd, BPF_LSM_CGROUP, &p), "prog_query");
|
||||
|
||||
for (i = 0; i < p.prog_cnt; i++) {
|
||||
struct bpf_prog_info info = {};
|
||||
__u32 info_len = sizeof(info);
|
||||
int fd;
|
||||
|
||||
fd = bpf_prog_get_fd_by_id(p.prog_ids[i]);
|
||||
ASSERT_GE(fd, 0, "prog_get_fd_by_id");
|
||||
ASSERT_OK(bpf_obj_get_info_by_fd(fd, &info, &info_len), "prog_info_by_fd");
|
||||
close(fd);
|
||||
|
||||
if (info.attach_btf_id ==
|
||||
btf__find_by_name_kind(btf, attach_func, BTF_KIND_FUNC))
|
||||
cnt++;
|
||||
}
|
||||
|
||||
free(p.prog_ids);
|
||||
free(p.prog_attach_flags);
|
||||
|
||||
return cnt;
|
||||
}
|
||||
|
||||
static void test_lsm_cgroup_functional(void)
|
||||
{
|
||||
DECLARE_LIBBPF_OPTS(bpf_prog_attach_opts, attach_opts);
|
||||
DECLARE_LIBBPF_OPTS(bpf_link_update_opts, update_opts);
|
||||
int cgroup_fd = -1, cgroup_fd2 = -1, cgroup_fd3 = -1;
|
||||
int listen_fd, client_fd, accepted_fd;
|
||||
struct lsm_cgroup *skel = NULL;
|
||||
int post_create_prog_fd2 = -1;
|
||||
int post_create_prog_fd = -1;
|
||||
int bind_link_fd2 = -1;
|
||||
int bind_prog_fd2 = -1;
|
||||
int alloc_prog_fd = -1;
|
||||
int bind_prog_fd = -1;
|
||||
int bind_link_fd = -1;
|
||||
int clone_prog_fd = -1;
|
||||
int err, fd, prio;
|
||||
socklen_t socklen;
|
||||
|
||||
cgroup_fd3 = test__join_cgroup("/sock_policy_empty");
|
||||
if (!ASSERT_GE(cgroup_fd3, 0, "create empty cgroup"))
|
||||
goto close_cgroup;
|
||||
|
||||
cgroup_fd2 = test__join_cgroup("/sock_policy_reuse");
|
||||
if (!ASSERT_GE(cgroup_fd2, 0, "create cgroup for reuse"))
|
||||
goto close_cgroup;
|
||||
|
||||
cgroup_fd = test__join_cgroup("/sock_policy");
|
||||
if (!ASSERT_GE(cgroup_fd, 0, "join_cgroup"))
|
||||
goto close_cgroup;
|
||||
|
||||
skel = lsm_cgroup__open_and_load();
|
||||
if (!ASSERT_OK_PTR(skel, "open_and_load"))
|
||||
goto close_cgroup;
|
||||
|
||||
post_create_prog_fd = bpf_program__fd(skel->progs.socket_post_create);
|
||||
post_create_prog_fd2 = bpf_program__fd(skel->progs.socket_post_create2);
|
||||
bind_prog_fd = bpf_program__fd(skel->progs.socket_bind);
|
||||
bind_prog_fd2 = bpf_program__fd(skel->progs.socket_bind2);
|
||||
alloc_prog_fd = bpf_program__fd(skel->progs.socket_alloc);
|
||||
clone_prog_fd = bpf_program__fd(skel->progs.socket_clone);
|
||||
|
||||
ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_sk_alloc_security"), 0, "prog count");
|
||||
ASSERT_EQ(query_prog_cnt(cgroup_fd, NULL), 0, "total prog count");
|
||||
err = bpf_prog_attach(alloc_prog_fd, cgroup_fd, BPF_LSM_CGROUP, 0);
|
||||
if (err == -ENOTSUPP) {
|
||||
test__skip();
|
||||
goto close_cgroup;
|
||||
}
|
||||
if (!ASSERT_OK(err, "attach alloc_prog_fd"))
|
||||
goto detach_cgroup;
|
||||
ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_sk_alloc_security"), 1, "prog count");
|
||||
ASSERT_EQ(query_prog_cnt(cgroup_fd, NULL), 1, "total prog count");
|
||||
|
||||
ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_inet_csk_clone"), 0, "prog count");
|
||||
err = bpf_prog_attach(clone_prog_fd, cgroup_fd, BPF_LSM_CGROUP, 0);
|
||||
if (!ASSERT_OK(err, "attach clone_prog_fd"))
|
||||
goto detach_cgroup;
|
||||
ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_inet_csk_clone"), 1, "prog count");
|
||||
ASSERT_EQ(query_prog_cnt(cgroup_fd, NULL), 2, "total prog count");
|
||||
|
||||
/* Make sure replacing works. */
|
||||
|
||||
ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_socket_post_create"), 0, "prog count");
|
||||
err = bpf_prog_attach(post_create_prog_fd, cgroup_fd,
|
||||
BPF_LSM_CGROUP, 0);
|
||||
if (!ASSERT_OK(err, "attach post_create_prog_fd"))
|
||||
goto detach_cgroup;
|
||||
ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_socket_post_create"), 1, "prog count");
|
||||
ASSERT_EQ(query_prog_cnt(cgroup_fd, NULL), 3, "total prog count");
|
||||
|
||||
attach_opts.replace_prog_fd = post_create_prog_fd;
|
||||
err = bpf_prog_attach_opts(post_create_prog_fd2, cgroup_fd,
|
||||
BPF_LSM_CGROUP, &attach_opts);
|
||||
if (!ASSERT_OK(err, "prog replace post_create_prog_fd"))
|
||||
goto detach_cgroup;
|
||||
ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_socket_post_create"), 1, "prog count");
|
||||
ASSERT_EQ(query_prog_cnt(cgroup_fd, NULL), 3, "total prog count");
|
||||
|
||||
/* Try the same attach/replace via link API. */
|
||||
|
||||
ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_socket_bind"), 0, "prog count");
|
||||
bind_link_fd = bpf_link_create(bind_prog_fd, cgroup_fd,
|
||||
BPF_LSM_CGROUP, NULL);
|
||||
if (!ASSERT_GE(bind_link_fd, 0, "link create bind_prog_fd"))
|
||||
goto detach_cgroup;
|
||||
ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_socket_bind"), 1, "prog count");
|
||||
ASSERT_EQ(query_prog_cnt(cgroup_fd, NULL), 4, "total prog count");
|
||||
|
||||
update_opts.old_prog_fd = bind_prog_fd;
|
||||
update_opts.flags = BPF_F_REPLACE;
|
||||
|
||||
err = bpf_link_update(bind_link_fd, bind_prog_fd2, &update_opts);
|
||||
if (!ASSERT_OK(err, "link update bind_prog_fd"))
|
||||
goto detach_cgroup;
|
||||
ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_socket_bind"), 1, "prog count");
|
||||
ASSERT_EQ(query_prog_cnt(cgroup_fd, NULL), 4, "total prog count");
|
||||
|
||||
/* Attach another instance of bind program to another cgroup.
|
||||
* This should trigger the reuse of the trampoline shim (two
|
||||
* programs attaching to the same btf_id).
|
||||
*/
|
||||
|
||||
ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_socket_bind"), 1, "prog count");
|
||||
ASSERT_EQ(query_prog_cnt(cgroup_fd2, "bpf_lsm_socket_bind"), 0, "prog count");
|
||||
bind_link_fd2 = bpf_link_create(bind_prog_fd2, cgroup_fd2,
|
||||
BPF_LSM_CGROUP, NULL);
|
||||
if (!ASSERT_GE(bind_link_fd2, 0, "link create bind_prog_fd2"))
|
||||
goto detach_cgroup;
|
||||
ASSERT_EQ(query_prog_cnt(cgroup_fd2, "bpf_lsm_socket_bind"), 1, "prog count");
|
||||
ASSERT_EQ(query_prog_cnt(cgroup_fd, NULL), 4, "total prog count");
|
||||
ASSERT_EQ(query_prog_cnt(cgroup_fd2, NULL), 1, "total prog count");
|
||||
|
||||
/* AF_UNIX is prohibited. */
|
||||
|
||||
fd = socket(AF_UNIX, SOCK_STREAM, 0);
|
||||
ASSERT_LT(fd, 0, "socket(AF_UNIX)");
|
||||
close(fd);
|
||||
|
||||
/* AF_INET6 gets default policy (sk_priority). */
|
||||
|
||||
fd = socket(AF_INET6, SOCK_STREAM, 0);
|
||||
if (!ASSERT_GE(fd, 0, "socket(SOCK_STREAM)"))
|
||||
goto detach_cgroup;
|
||||
|
||||
prio = 0;
|
||||
socklen = sizeof(prio);
|
||||
ASSERT_GE(getsockopt(fd, SOL_SOCKET, SO_PRIORITY, &prio, &socklen), 0,
|
||||
"getsockopt");
|
||||
ASSERT_EQ(prio, 123, "sk_priority");
|
||||
|
||||
close(fd);
|
||||
|
||||
/* TX-only AF_PACKET is allowed. */
|
||||
|
||||
ASSERT_LT(socket(AF_PACKET, SOCK_RAW, htons(ETH_P_ALL)), 0,
|
||||
"socket(AF_PACKET, ..., ETH_P_ALL)");
|
||||
|
||||
fd = socket(AF_PACKET, SOCK_RAW, 0);
|
||||
ASSERT_GE(fd, 0, "socket(AF_PACKET, ..., 0)");
|
||||
|
||||
/* TX-only AF_PACKET can not be rebound. */
|
||||
|
||||
struct sockaddr_ll sa = {
|
||||
.sll_family = AF_PACKET,
|
||||
.sll_protocol = htons(ETH_P_ALL),
|
||||
};
|
||||
ASSERT_LT(bind(fd, (struct sockaddr *)&sa, sizeof(sa)), 0,
|
||||
"bind(ETH_P_ALL)");
|
||||
|
||||
close(fd);
|
||||
|
||||
/* Trigger passive open. */
|
||||
|
||||
listen_fd = start_server(AF_INET6, SOCK_STREAM, "::1", 0, 0);
|
||||
ASSERT_GE(listen_fd, 0, "start_server");
|
||||
client_fd = connect_to_fd(listen_fd, 0);
|
||||
ASSERT_GE(client_fd, 0, "connect_to_fd");
|
||||
accepted_fd = accept(listen_fd, NULL, NULL);
|
||||
ASSERT_GE(accepted_fd, 0, "accept");
|
||||
|
||||
prio = 0;
|
||||
socklen = sizeof(prio);
|
||||
ASSERT_GE(getsockopt(accepted_fd, SOL_SOCKET, SO_PRIORITY, &prio, &socklen), 0,
|
||||
"getsockopt");
|
||||
ASSERT_EQ(prio, 234, "sk_priority");
|
||||
|
||||
/* These are replaced and never called. */
|
||||
ASSERT_EQ(skel->bss->called_socket_post_create, 0, "called_create");
|
||||
ASSERT_EQ(skel->bss->called_socket_bind, 0, "called_bind");
|
||||
|
||||
/* AF_INET6+SOCK_STREAM
|
||||
* AF_PACKET+SOCK_RAW
|
||||
* listen_fd
|
||||
* client_fd
|
||||
* accepted_fd
|
||||
*/
|
||||
ASSERT_EQ(skel->bss->called_socket_post_create2, 5, "called_create2");
|
||||
|
||||
/* start_server
|
||||
* bind(ETH_P_ALL)
|
||||
*/
|
||||
ASSERT_EQ(skel->bss->called_socket_bind2, 2, "called_bind2");
|
||||
/* Single accept(). */
|
||||
ASSERT_EQ(skel->bss->called_socket_clone, 1, "called_clone");
|
||||
|
||||
/* AF_UNIX+SOCK_STREAM (failed)
|
||||
* AF_INET6+SOCK_STREAM
|
||||
* AF_PACKET+SOCK_RAW (failed)
|
||||
* AF_PACKET+SOCK_RAW
|
||||
* listen_fd
|
||||
* client_fd
|
||||
* accepted_fd
|
||||
*/
|
||||
ASSERT_EQ(skel->bss->called_socket_alloc, 7, "called_alloc");
|
||||
|
||||
close(listen_fd);
|
||||
close(client_fd);
|
||||
close(accepted_fd);
|
||||
|
||||
/* Make sure other cgroup doesn't trigger the programs. */
|
||||
|
||||
if (!ASSERT_OK(join_cgroup("/sock_policy_empty"), "join root cgroup"))
|
||||
goto detach_cgroup;
|
||||
|
||||
fd = socket(AF_INET6, SOCK_STREAM, 0);
|
||||
if (!ASSERT_GE(fd, 0, "socket(SOCK_STREAM)"))
|
||||
goto detach_cgroup;
|
||||
|
||||
prio = 0;
|
||||
socklen = sizeof(prio);
|
||||
ASSERT_GE(getsockopt(fd, SOL_SOCKET, SO_PRIORITY, &prio, &socklen), 0,
|
||||
"getsockopt");
|
||||
ASSERT_EQ(prio, 0, "sk_priority");
|
||||
|
||||
close(fd);
|
||||
|
||||
detach_cgroup:
|
||||
ASSERT_GE(bpf_prog_detach2(post_create_prog_fd2, cgroup_fd,
|
||||
BPF_LSM_CGROUP), 0, "detach_create");
|
||||
close(bind_link_fd);
|
||||
/* Don't close bind_link_fd2, exercise cgroup release cleanup. */
|
||||
ASSERT_GE(bpf_prog_detach2(alloc_prog_fd, cgroup_fd,
|
||||
BPF_LSM_CGROUP), 0, "detach_alloc");
|
||||
ASSERT_GE(bpf_prog_detach2(clone_prog_fd, cgroup_fd,
|
||||
BPF_LSM_CGROUP), 0, "detach_clone");
|
||||
|
||||
close_cgroup:
|
||||
close(cgroup_fd);
|
||||
close(cgroup_fd2);
|
||||
close(cgroup_fd3);
|
||||
lsm_cgroup__destroy(skel);
|
||||
}
|
||||
|
||||
static void test_lsm_cgroup_nonvoid(void)
|
||||
{
|
||||
struct lsm_cgroup_nonvoid *skel = NULL;
|
||||
|
||||
skel = lsm_cgroup_nonvoid__open_and_load();
|
||||
ASSERT_NULL(skel, "open succeeds");
|
||||
lsm_cgroup_nonvoid__destroy(skel);
|
||||
}
|
||||
|
||||
void test_lsm_cgroup(void)
|
||||
{
|
||||
if (test__start_subtest("functional"))
|
||||
test_lsm_cgroup_functional();
|
||||
if (test__start_subtest("nonvoid"))
|
||||
test_lsm_cgroup_nonvoid();
|
||||
btf__free(btf);
|
||||
}
|
|
@ -44,7 +44,7 @@ BTF_ID(union, U)
|
|||
BTF_ID(func, func)
|
||||
|
||||
extern __u32 test_list_global[];
|
||||
BTF_ID_LIST_GLOBAL(test_list_global)
|
||||
BTF_ID_LIST_GLOBAL(test_list_global, 1)
|
||||
BTF_ID_UNUSED
|
||||
BTF_ID(typedef, S)
|
||||
BTF_ID(typedef, T)
|
||||
|
|
|
@ -394,7 +394,6 @@ void serial_test_sock_fields(void)
|
|||
test();
|
||||
|
||||
done:
|
||||
test_sock_fields__detach(skel);
|
||||
test_sock_fields__destroy(skel);
|
||||
if (child_cg_fd >= 0)
|
||||
close(child_cg_fd);
|
||||
|
|
|
@ -12,7 +12,7 @@ int lets_test_this(int);
|
|||
|
||||
static volatile int idx = 2;
|
||||
static volatile __u64 bla = 0xFEDCBA9876543210ULL;
|
||||
static volatile short nums[] = {-1, -2, -3, };
|
||||
static volatile short nums[] = {-1, -2, -3, -4};
|
||||
|
||||
static volatile struct {
|
||||
int x;
|
||||
|
|
|
@ -63,7 +63,7 @@ static bool expect_str(char *buf, size_t size, const char *str, const char *name
|
|||
static void test_synproxy(bool xdp)
|
||||
{
|
||||
int server_fd = -1, client_fd = -1, accept_fd = -1;
|
||||
char *prog_id, *prog_id_end;
|
||||
char *prog_id = NULL, *prog_id_end;
|
||||
struct nstoken *ns = NULL;
|
||||
FILE *ctrl_file = NULL;
|
||||
char buf[CMD_OUT_BUF_SIZE];
|
||||
|
|
|
@ -11,11 +11,19 @@ struct callback_ctx {
|
|||
int output;
|
||||
};
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__uint(max_entries, 32);
|
||||
__type(key, int);
|
||||
__type(value, int);
|
||||
} map1 SEC(".maps");
|
||||
|
||||
/* These should be set by the user program */
|
||||
u32 nested_callback_nr_loops;
|
||||
u32 stop_index = -1;
|
||||
u32 nr_loops;
|
||||
int pid;
|
||||
int callback_selector;
|
||||
|
||||
/* Making these global variables so that the userspace program
|
||||
* can verify the output through the skeleton
|
||||
|
@ -111,3 +119,109 @@ int prog_nested_calls(void *ctx)
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int callback_set_f0(int i, void *ctx)
|
||||
{
|
||||
g_output = 0xF0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int callback_set_0f(int i, void *ctx)
|
||||
{
|
||||
g_output = 0x0F;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* non-constant callback is a corner case for bpf_loop inline logic
|
||||
*/
|
||||
SEC("fentry/" SYS_PREFIX "sys_nanosleep")
|
||||
int prog_non_constant_callback(void *ctx)
|
||||
{
|
||||
struct callback_ctx data = {};
|
||||
|
||||
if (bpf_get_current_pid_tgid() >> 32 != pid)
|
||||
return 0;
|
||||
|
||||
int (*callback)(int i, void *ctx);
|
||||
|
||||
g_output = 0;
|
||||
|
||||
if (callback_selector == 0x0F)
|
||||
callback = callback_set_0f;
|
||||
else
|
||||
callback = callback_set_f0;
|
||||
|
||||
bpf_loop(1, callback, NULL, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int stack_check_inner_callback(void *ctx)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int map1_lookup_elem(int key)
|
||||
{
|
||||
int *val = bpf_map_lookup_elem(&map1, &key);
|
||||
|
||||
return val ? *val : -1;
|
||||
}
|
||||
|
||||
static void map1_update_elem(int key, int val)
|
||||
{
|
||||
bpf_map_update_elem(&map1, &key, &val, BPF_ANY);
|
||||
}
|
||||
|
||||
static int stack_check_outer_callback(void *ctx)
|
||||
{
|
||||
int a = map1_lookup_elem(1);
|
||||
int b = map1_lookup_elem(2);
|
||||
int c = map1_lookup_elem(3);
|
||||
int d = map1_lookup_elem(4);
|
||||
int e = map1_lookup_elem(5);
|
||||
int f = map1_lookup_elem(6);
|
||||
|
||||
bpf_loop(1, stack_check_inner_callback, NULL, 0);
|
||||
|
||||
map1_update_elem(1, a + 1);
|
||||
map1_update_elem(2, b + 1);
|
||||
map1_update_elem(3, c + 1);
|
||||
map1_update_elem(4, d + 1);
|
||||
map1_update_elem(5, e + 1);
|
||||
map1_update_elem(6, f + 1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Some of the local variables in stack_check and
|
||||
* stack_check_outer_callback would be allocated on stack by
|
||||
* compiler. This test should verify that stack content for these
|
||||
* variables is preserved between calls to bpf_loop (might be an issue
|
||||
* if loop inlining allocates stack slots incorrectly).
|
||||
*/
|
||||
SEC("fentry/" SYS_PREFIX "sys_nanosleep")
|
||||
int stack_check(void *ctx)
|
||||
{
|
||||
if (bpf_get_current_pid_tgid() >> 32 != pid)
|
||||
return 0;
|
||||
|
||||
int a = map1_lookup_elem(7);
|
||||
int b = map1_lookup_elem(8);
|
||||
int c = map1_lookup_elem(9);
|
||||
int d = map1_lookup_elem(10);
|
||||
int e = map1_lookup_elem(11);
|
||||
int f = map1_lookup_elem(12);
|
||||
|
||||
bpf_loop(1, stack_check_outer_callback, NULL, 0);
|
||||
|
||||
map1_update_elem(7, a + 1);
|
||||
map1_update_elem(8, b + 1);
|
||||
map1_update_elem(9, c + 1);
|
||||
map1_update_elem(10, d + 1);
|
||||
map1_update_elem(11, e + 1);
|
||||
map1_update_elem(12, f + 1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue