2018-11-24 08:44:33 +08:00
|
|
|
LIBBPF_0.0.1 {
|
|
|
|
global:
|
|
|
|
bpf_btf_get_fd_by_id;
|
|
|
|
bpf_create_map;
|
|
|
|
bpf_create_map_in_map;
|
|
|
|
bpf_create_map_in_map_node;
|
|
|
|
bpf_create_map_name;
|
|
|
|
bpf_create_map_node;
|
|
|
|
bpf_create_map_xattr;
|
|
|
|
bpf_load_btf;
|
|
|
|
bpf_load_program;
|
|
|
|
bpf_load_program_xattr;
|
|
|
|
bpf_map__btf_key_type_id;
|
|
|
|
bpf_map__btf_value_type_id;
|
|
|
|
bpf_map__def;
|
|
|
|
bpf_map__fd;
|
|
|
|
bpf_map__is_offload_neutral;
|
|
|
|
bpf_map__name;
|
|
|
|
bpf_map__next;
|
|
|
|
bpf_map__pin;
|
|
|
|
bpf_map__prev;
|
|
|
|
bpf_map__priv;
|
|
|
|
bpf_map__reuse_fd;
|
|
|
|
bpf_map__set_ifindex;
|
|
|
|
bpf_map__set_inner_map_fd;
|
|
|
|
bpf_map__set_priv;
|
|
|
|
bpf_map__unpin;
|
|
|
|
bpf_map_delete_elem;
|
|
|
|
bpf_map_get_fd_by_id;
|
|
|
|
bpf_map_get_next_id;
|
|
|
|
bpf_map_get_next_key;
|
|
|
|
bpf_map_lookup_and_delete_elem;
|
|
|
|
bpf_map_lookup_elem;
|
|
|
|
bpf_map_update_elem;
|
|
|
|
bpf_obj_get;
|
|
|
|
bpf_obj_get_info_by_fd;
|
|
|
|
bpf_obj_pin;
|
|
|
|
bpf_object__btf_fd;
|
|
|
|
bpf_object__close;
|
|
|
|
bpf_object__find_map_by_name;
|
|
|
|
bpf_object__find_map_by_offset;
|
|
|
|
bpf_object__find_program_by_title;
|
|
|
|
bpf_object__kversion;
|
|
|
|
bpf_object__load;
|
|
|
|
bpf_object__name;
|
|
|
|
bpf_object__next;
|
|
|
|
bpf_object__open;
|
|
|
|
bpf_object__open_buffer;
|
|
|
|
bpf_object__open_xattr;
|
|
|
|
bpf_object__pin;
|
|
|
|
bpf_object__pin_maps;
|
|
|
|
bpf_object__pin_programs;
|
|
|
|
bpf_object__priv;
|
|
|
|
bpf_object__set_priv;
|
|
|
|
bpf_object__unload;
|
|
|
|
bpf_object__unpin_maps;
|
|
|
|
bpf_object__unpin_programs;
|
|
|
|
bpf_perf_event_read_simple;
|
|
|
|
bpf_prog_attach;
|
|
|
|
bpf_prog_detach;
|
|
|
|
bpf_prog_detach2;
|
|
|
|
bpf_prog_get_fd_by_id;
|
|
|
|
bpf_prog_get_next_id;
|
|
|
|
bpf_prog_load;
|
|
|
|
bpf_prog_load_xattr;
|
|
|
|
bpf_prog_query;
|
|
|
|
bpf_prog_test_run;
|
2018-12-03 19:31:25 +08:00
|
|
|
bpf_prog_test_run_xattr;
|
2018-11-24 08:44:33 +08:00
|
|
|
bpf_program__fd;
|
|
|
|
bpf_program__is_kprobe;
|
|
|
|
bpf_program__is_perf_event;
|
|
|
|
bpf_program__is_raw_tracepoint;
|
|
|
|
bpf_program__is_sched_act;
|
|
|
|
bpf_program__is_sched_cls;
|
|
|
|
bpf_program__is_socket_filter;
|
|
|
|
bpf_program__is_tracepoint;
|
|
|
|
bpf_program__is_xdp;
|
|
|
|
bpf_program__load;
|
|
|
|
bpf_program__next;
|
|
|
|
bpf_program__nth_fd;
|
|
|
|
bpf_program__pin;
|
|
|
|
bpf_program__pin_instance;
|
|
|
|
bpf_program__prev;
|
|
|
|
bpf_program__priv;
|
|
|
|
bpf_program__set_expected_attach_type;
|
|
|
|
bpf_program__set_ifindex;
|
|
|
|
bpf_program__set_kprobe;
|
|
|
|
bpf_program__set_perf_event;
|
|
|
|
bpf_program__set_prep;
|
|
|
|
bpf_program__set_priv;
|
|
|
|
bpf_program__set_raw_tracepoint;
|
|
|
|
bpf_program__set_sched_act;
|
|
|
|
bpf_program__set_sched_cls;
|
|
|
|
bpf_program__set_socket_filter;
|
|
|
|
bpf_program__set_tracepoint;
|
|
|
|
bpf_program__set_type;
|
|
|
|
bpf_program__set_xdp;
|
|
|
|
bpf_program__title;
|
|
|
|
bpf_program__unload;
|
|
|
|
bpf_program__unpin;
|
|
|
|
bpf_program__unpin_instance;
|
bpf: libbpf: bpftool: Print bpf_line_info during prog dump
This patch adds print bpf_line_info function in 'prog dump jitted'
and 'prog dump xlated':
[root@arch-fb-vm1 bpf]# ~/devshare/fb-kernel/linux/tools/bpf/bpftool/bpftool prog dump jited pinned /sys/fs/bpf/test_btf_haskv
[...]
int test_long_fname_2(struct dummy_tracepoint_args * arg):
bpf_prog_44a040bf25481309_test_long_fname_2:
; static int test_long_fname_2(struct dummy_tracepoint_args *arg)
0: push %rbp
1: mov %rsp,%rbp
4: sub $0x30,%rsp
b: sub $0x28,%rbp
f: mov %rbx,0x0(%rbp)
13: mov %r13,0x8(%rbp)
17: mov %r14,0x10(%rbp)
1b: mov %r15,0x18(%rbp)
1f: xor %eax,%eax
21: mov %rax,0x20(%rbp)
25: xor %esi,%esi
; int key = 0;
27: mov %esi,-0x4(%rbp)
; if (!arg->sock)
2a: mov 0x8(%rdi),%rdi
; if (!arg->sock)
2e: cmp $0x0,%rdi
32: je 0x0000000000000070
34: mov %rbp,%rsi
; counts = bpf_map_lookup_elem(&btf_map, &key);
37: add $0xfffffffffffffffc,%rsi
3b: movabs $0xffff8881139d7480,%rdi
45: add $0x110,%rdi
4c: mov 0x0(%rsi),%eax
4f: cmp $0x4,%rax
53: jae 0x000000000000005e
55: shl $0x3,%rax
59: add %rdi,%rax
5c: jmp 0x0000000000000060
5e: xor %eax,%eax
; if (!counts)
60: cmp $0x0,%rax
64: je 0x0000000000000070
; counts->v6++;
66: mov 0x4(%rax),%edi
69: add $0x1,%rdi
6d: mov %edi,0x4(%rax)
70: mov 0x0(%rbp),%rbx
74: mov 0x8(%rbp),%r13
78: mov 0x10(%rbp),%r14
7c: mov 0x18(%rbp),%r15
80: add $0x28,%rbp
84: leaveq
85: retq
[...]
With linum:
[root@arch-fb-vm1 bpf]# ~/devshare/fb-kernel/linux/tools/bpf/bpftool/bpftool prog dump jited pinned /sys/fs/bpf/test_btf_haskv linum
int _dummy_tracepoint(struct dummy_tracepoint_args * arg):
bpf_prog_b07ccb89267cf242__dummy_tracepoint:
; return test_long_fname_1(arg); [file:/data/users/kafai/fb-kernel/linux/tools/testing/selftests/bpf/test_btf_haskv.c line_num:54 line_col:9]
0: push %rbp
1: mov %rsp,%rbp
4: sub $0x28,%rsp
b: sub $0x28,%rbp
f: mov %rbx,0x0(%rbp)
13: mov %r13,0x8(%rbp)
17: mov %r14,0x10(%rbp)
1b: mov %r15,0x18(%rbp)
1f: xor %eax,%eax
21: mov %rax,0x20(%rbp)
25: callq 0x000000000000851e
; return test_long_fname_1(arg); [file:/data/users/kafai/fb-kernel/linux/tools/testing/selftests/bpf/test_btf_haskv.c line_num:54 line_col:2]
2a: xor %eax,%eax
2c: mov 0x0(%rbp),%rbx
30: mov 0x8(%rbp),%r13
34: mov 0x10(%rbp),%r14
38: mov 0x18(%rbp),%r15
3c: add $0x28,%rbp
40: leaveq
41: retq
[...]
Signed-off-by: Martin KaFai Lau <kafai@fb.com>
Acked-by: Yonghong Song <yhs@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2018-12-08 08:42:32 +08:00
|
|
|
bpf_prog_linfo__free;
|
|
|
|
bpf_prog_linfo__new;
|
|
|
|
bpf_prog_linfo__lfind_addr_func;
|
|
|
|
bpf_prog_linfo__lfind;
|
2018-11-24 08:44:33 +08:00
|
|
|
bpf_raw_tracepoint_open;
|
|
|
|
bpf_set_link_xdp_fd;
|
|
|
|
bpf_task_fd_query;
|
|
|
|
bpf_verify_program;
|
|
|
|
btf__fd;
|
|
|
|
btf__find_by_name;
|
|
|
|
btf__free;
|
|
|
|
btf__get_from_id;
|
|
|
|
btf__name_by_offset;
|
|
|
|
btf__new;
|
|
|
|
btf__resolve_size;
|
|
|
|
btf__resolve_type;
|
|
|
|
btf__type_by_id;
|
|
|
|
libbpf_attach_type_by_name;
|
|
|
|
libbpf_get_error;
|
|
|
|
libbpf_prog_type_by_name;
|
|
|
|
libbpf_set_print;
|
|
|
|
libbpf_strerror;
|
|
|
|
local:
|
|
|
|
*;
|
|
|
|
};
|
2019-01-17 23:27:53 +08:00
|
|
|
|
|
|
|
LIBBPF_0.0.2 {
|
|
|
|
global:
|
tools: bpftool: add probes for eBPF helper functions
Similarly to what was done for program types and map types, add a set of
probes to test the availability of the different eBPF helper functions
on the current system.
For each known program type, all known helpers are tested, in order to
establish a compatibility matrix. Output is provided as a set of lists
of available helpers, one per program type.
Sample output:
# bpftool feature probe kernel
...
Scanning eBPF helper functions...
eBPF helpers supported for program type socket_filter:
- bpf_map_lookup_elem
- bpf_map_update_elem
- bpf_map_delete_elem
...
eBPF helpers supported for program type kprobe:
- bpf_map_lookup_elem
- bpf_map_update_elem
- bpf_map_delete_elem
...
# bpftool --json --pretty feature probe kernel
{
...
"helpers": {
"socket_filter_available_helpers": ["bpf_map_lookup_elem", \
"bpf_map_update_elem","bpf_map_delete_elem", ...
],
"kprobe_available_helpers": ["bpf_map_lookup_elem", \
"bpf_map_update_elem","bpf_map_delete_elem", ...
],
...
}
}
v5:
- In libbpf.map, move global symbol to the new LIBBPF_0.0.2 section.
v4:
- Use "enum bpf_func_id" instead of "__u32" in bpf_probe_helper()
declaration for the type of the argument used to pass the id of
the helper to probe.
- Undef BPF_HELPER_MAKE_ENTRY after using it.
v3:
- Do not pass kernel version from bpftool to libbpf probes (kernel
version for testing program with kprobes is retrieved directly from
libbpf).
- Dump one list of available helpers per program type (instead of one
list of compatible program types per helper).
v2:
- Move probes from bpftool to libbpf.
- Test all program types for each helper, print a list of working prog
types for each helper.
- Fall back on include/uapi/linux/bpf.h for names and ids of helpers.
- Remove C-style macros output from this patch.
Signed-off-by: Quentin Monnet <quentin.monnet@netronome.com>
Reviewed-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Reviewed-by: Stanislav Fomichev <sdf@google.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2019-01-17 23:27:55 +08:00
|
|
|
bpf_probe_helper;
|
2019-01-17 23:27:54 +08:00
|
|
|
bpf_probe_map_type;
|
2019-01-17 23:27:53 +08:00
|
|
|
bpf_probe_prog_type;
|
2019-02-15 07:01:42 +08:00
|
|
|
bpf_map__resize;
|
2019-02-01 07:40:11 +08:00
|
|
|
bpf_map_lookup_elem_flags;
|
2019-02-15 07:01:43 +08:00
|
|
|
bpf_object__btf;
|
2019-02-02 05:42:23 +08:00
|
|
|
bpf_object__find_map_fd_by_name;
|
2019-02-02 05:42:29 +08:00
|
|
|
bpf_get_link_xdp_id;
|
2019-02-05 09:29:45 +08:00
|
|
|
btf__dedup;
|
2019-02-05 03:00:58 +08:00
|
|
|
btf__get_map_kv_tids;
|
2019-02-05 09:29:46 +08:00
|
|
|
btf__get_nr_types;
|
2019-02-09 03:19:37 +08:00
|
|
|
btf__get_raw_data;
|
2019-02-09 03:19:36 +08:00
|
|
|
btf__load;
|
2019-02-05 03:00:57 +08:00
|
|
|
btf_ext__free;
|
|
|
|
btf_ext__func_info_rec_size;
|
2019-02-09 03:19:38 +08:00
|
|
|
btf_ext__get_raw_data;
|
2019-02-05 03:00:57 +08:00
|
|
|
btf_ext__line_info_rec_size;
|
|
|
|
btf_ext__new;
|
|
|
|
btf_ext__reloc_func_info;
|
|
|
|
btf_ext__reloc_line_info;
|
2019-02-21 17:21:26 +08:00
|
|
|
xsk_umem__create;
|
|
|
|
xsk_socket__create;
|
|
|
|
xsk_umem__delete;
|
|
|
|
xsk_socket__delete;
|
|
|
|
xsk_umem__fd;
|
|
|
|
xsk_socket__fd;
|
2019-03-12 13:30:38 +08:00
|
|
|
bpf_program__get_prog_info_linear;
|
|
|
|
bpf_program__bpil_addr_to_offs;
|
|
|
|
bpf_program__bpil_offs_to_addr;
|
2019-01-17 23:27:53 +08:00
|
|
|
} LIBBPF_0.0.1;
|
bpf, libbpf: support global data/bss/rodata sections
This work adds BPF loader support for global data sections
to libbpf. This allows to write BPF programs in more natural
C-like way by being able to define global variables and const
data.
Back at LPC 2018 [0] we presented a first prototype which
implemented support for global data sections by extending BPF
syscall where union bpf_attr would get additional memory/size
pair for each section passed during prog load in order to later
add this base address into the ldimm64 instruction along with
the user provided offset when accessing a variable. Consensus
from LPC was that for proper upstream support, it would be
more desirable to use maps instead of bpf_attr extension as
this would allow for introspection of these sections as well
as potential live updates of their content. This work follows
this path by taking the following steps from loader side:
1) In bpf_object__elf_collect() step we pick up ".data",
".rodata", and ".bss" section information.
2) If present, in bpf_object__init_internal_map() we add
maps to the obj's map array that corresponds to each
of the present sections. Given section size and access
properties can differ, a single entry array map is
created with value size that is corresponding to the
ELF section size of .data, .bss or .rodata. These
internal maps are integrated into the normal map
handling of libbpf such that when user traverses all
obj maps, they can be differentiated from user-created
ones via bpf_map__is_internal(). In later steps when
we actually create these maps in the kernel via
bpf_object__create_maps(), then for .data and .rodata
sections their content is copied into the map through
bpf_map_update_elem(). For .bss this is not necessary
since array map is already zero-initialized by default.
Additionally, for .rodata the map is frozen as read-only
after setup, such that neither from program nor syscall
side writes would be possible.
3) In bpf_program__collect_reloc() step, we record the
corresponding map, insn index, and relocation type for
the global data.
4) And last but not least in the actual relocation step in
bpf_program__relocate(), we mark the ldimm64 instruction
with src_reg = BPF_PSEUDO_MAP_VALUE where in the first
imm field the map's file descriptor is stored as similarly
done as in BPF_PSEUDO_MAP_FD, and in the second imm field
(as ldimm64 is 2-insn wide) we store the access offset
into the section. Given these maps have only single element
ldimm64's off remains zero in both parts.
5) On kernel side, this special marked BPF_PSEUDO_MAP_VALUE
load will then store the actual target address in order
to have a 'map-lookup'-free access. That is, the actual
map value base address + offset. The destination register
in the verifier will then be marked as PTR_TO_MAP_VALUE,
containing the fixed offset as reg->off and backing BPF
map as reg->map_ptr. Meaning, it's treated as any other
normal map value from verification side, only with
efficient, direct value access instead of actual call to
map lookup helper as in the typical case.
Currently, only support for static global variables has been
added, and libbpf rejects non-static global variables from
loading. This can be lifted until we have proper semantics
for how BPF will treat multi-object BPF loads. From BTF side,
libbpf will set the value type id of the types corresponding
to the ".bss", ".data" and ".rodata" names which LLVM will
emit without the object name prefix. The key type will be
left as zero, thus making use of the key-less BTF option in
array maps.
Simple example dump of program using globals vars in each
section:
# bpftool prog
[...]
6784: sched_cls name load_static_dat tag a7e1291567277844 gpl
loaded_at 2019-03-11T15:39:34+0000 uid 0
xlated 1776B jited 993B memlock 4096B map_ids 2238,2237,2235,2236,2239,2240
# bpftool map show id 2237
2237: array name test_glo.bss flags 0x0
key 4B value 64B max_entries 1 memlock 4096B
# bpftool map show id 2235
2235: array name test_glo.data flags 0x0
key 4B value 64B max_entries 1 memlock 4096B
# bpftool map show id 2236
2236: array name test_glo.rodata flags 0x80
key 4B value 96B max_entries 1 memlock 4096B
# bpftool prog dump xlated id 6784
int load_static_data(struct __sk_buff * skb):
; int load_static_data(struct __sk_buff *skb)
0: (b7) r6 = 0
; test_reloc(number, 0, &num0);
1: (63) *(u32 *)(r10 -4) = r6
2: (bf) r2 = r10
; int load_static_data(struct __sk_buff *skb)
3: (07) r2 += -4
; test_reloc(number, 0, &num0);
4: (18) r1 = map[id:2238]
6: (18) r3 = map[id:2237][0]+0 <-- direct addr in .bss area
8: (b7) r4 = 0
9: (85) call array_map_update_elem#100464
10: (b7) r1 = 1
; test_reloc(number, 1, &num1);
[...]
; test_reloc(string, 2, str2);
120: (18) r8 = map[id:2237][0]+16 <-- same here at offset +16
122: (18) r1 = map[id:2239]
124: (18) r3 = map[id:2237][0]+16
126: (b7) r4 = 0
127: (85) call array_map_update_elem#100464
128: (b7) r1 = 120
; str1[5] = 'x';
129: (73) *(u8 *)(r9 +5) = r1
; test_reloc(string, 3, str1);
130: (b7) r1 = 3
131: (63) *(u32 *)(r10 -4) = r1
132: (b7) r9 = 3
133: (bf) r2 = r10
; int load_static_data(struct __sk_buff *skb)
134: (07) r2 += -4
; test_reloc(string, 3, str1);
135: (18) r1 = map[id:2239]
137: (18) r3 = map[id:2235][0]+16 <-- direct addr in .data area
139: (b7) r4 = 0
140: (85) call array_map_update_elem#100464
141: (b7) r1 = 111
; __builtin_memcpy(&str2[2], "hello", sizeof("hello"));
142: (73) *(u8 *)(r8 +6) = r1 <-- further access based on .bss data
143: (b7) r1 = 108
144: (73) *(u8 *)(r8 +5) = r1
[...]
For Cilium use-case in particular, this enables migrating configuration
constants from Cilium daemon's generated header defines into global
data sections such that expensive runtime recompilations with LLVM can
be avoided altogether. Instead, the ELF file becomes effectively a
"template", meaning, it is compiled only once (!) and the Cilium daemon
will then rewrite relevant configuration data from the ELF's .data or
.rodata sections directly instead of recompiling the program. The
updated ELF is then loaded into the kernel and atomically replaces
the existing program in the networking datapath. More info in [0].
Based upon recent fix in LLVM, commit c0db6b6bd444 ("[BPF] Don't fail
for static variables").
[0] LPC 2018, BPF track, "ELF relocation for static data in BPF",
http://vger.kernel.org/lpc-bpf2018.html#session-3
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Andrii Nakryiko <andriin@fb.com>
Acked-by: Martin KaFai Lau <kafai@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2019-04-10 05:20:13 +08:00
|
|
|
|
|
|
|
LIBBPF_0.0.3 {
|
|
|
|
global:
|
|
|
|
bpf_map__is_internal;
|
|
|
|
bpf_map_freeze;
|
2019-04-10 05:20:14 +08:00
|
|
|
btf__finalize_data;
|
bpf, libbpf: support global data/bss/rodata sections
This work adds BPF loader support for global data sections
to libbpf. This allows to write BPF programs in more natural
C-like way by being able to define global variables and const
data.
Back at LPC 2018 [0] we presented a first prototype which
implemented support for global data sections by extending BPF
syscall where union bpf_attr would get additional memory/size
pair for each section passed during prog load in order to later
add this base address into the ldimm64 instruction along with
the user provided offset when accessing a variable. Consensus
from LPC was that for proper upstream support, it would be
more desirable to use maps instead of bpf_attr extension as
this would allow for introspection of these sections as well
as potential live updates of their content. This work follows
this path by taking the following steps from loader side:
1) In bpf_object__elf_collect() step we pick up ".data",
".rodata", and ".bss" section information.
2) If present, in bpf_object__init_internal_map() we add
maps to the obj's map array that corresponds to each
of the present sections. Given section size and access
properties can differ, a single entry array map is
created with value size that is corresponding to the
ELF section size of .data, .bss or .rodata. These
internal maps are integrated into the normal map
handling of libbpf such that when user traverses all
obj maps, they can be differentiated from user-created
ones via bpf_map__is_internal(). In later steps when
we actually create these maps in the kernel via
bpf_object__create_maps(), then for .data and .rodata
sections their content is copied into the map through
bpf_map_update_elem(). For .bss this is not necessary
since array map is already zero-initialized by default.
Additionally, for .rodata the map is frozen as read-only
after setup, such that neither from program nor syscall
side writes would be possible.
3) In bpf_program__collect_reloc() step, we record the
corresponding map, insn index, and relocation type for
the global data.
4) And last but not least in the actual relocation step in
bpf_program__relocate(), we mark the ldimm64 instruction
with src_reg = BPF_PSEUDO_MAP_VALUE where in the first
imm field the map's file descriptor is stored as similarly
done as in BPF_PSEUDO_MAP_FD, and in the second imm field
(as ldimm64 is 2-insn wide) we store the access offset
into the section. Given these maps have only single element
ldimm64's off remains zero in both parts.
5) On kernel side, this special marked BPF_PSEUDO_MAP_VALUE
load will then store the actual target address in order
to have a 'map-lookup'-free access. That is, the actual
map value base address + offset. The destination register
in the verifier will then be marked as PTR_TO_MAP_VALUE,
containing the fixed offset as reg->off and backing BPF
map as reg->map_ptr. Meaning, it's treated as any other
normal map value from verification side, only with
efficient, direct value access instead of actual call to
map lookup helper as in the typical case.
Currently, only support for static global variables has been
added, and libbpf rejects non-static global variables from
loading. This can be lifted until we have proper semantics
for how BPF will treat multi-object BPF loads. From BTF side,
libbpf will set the value type id of the types corresponding
to the ".bss", ".data" and ".rodata" names which LLVM will
emit without the object name prefix. The key type will be
left as zero, thus making use of the key-less BTF option in
array maps.
Simple example dump of program using globals vars in each
section:
# bpftool prog
[...]
6784: sched_cls name load_static_dat tag a7e1291567277844 gpl
loaded_at 2019-03-11T15:39:34+0000 uid 0
xlated 1776B jited 993B memlock 4096B map_ids 2238,2237,2235,2236,2239,2240
# bpftool map show id 2237
2237: array name test_glo.bss flags 0x0
key 4B value 64B max_entries 1 memlock 4096B
# bpftool map show id 2235
2235: array name test_glo.data flags 0x0
key 4B value 64B max_entries 1 memlock 4096B
# bpftool map show id 2236
2236: array name test_glo.rodata flags 0x80
key 4B value 96B max_entries 1 memlock 4096B
# bpftool prog dump xlated id 6784
int load_static_data(struct __sk_buff * skb):
; int load_static_data(struct __sk_buff *skb)
0: (b7) r6 = 0
; test_reloc(number, 0, &num0);
1: (63) *(u32 *)(r10 -4) = r6
2: (bf) r2 = r10
; int load_static_data(struct __sk_buff *skb)
3: (07) r2 += -4
; test_reloc(number, 0, &num0);
4: (18) r1 = map[id:2238]
6: (18) r3 = map[id:2237][0]+0 <-- direct addr in .bss area
8: (b7) r4 = 0
9: (85) call array_map_update_elem#100464
10: (b7) r1 = 1
; test_reloc(number, 1, &num1);
[...]
; test_reloc(string, 2, str2);
120: (18) r8 = map[id:2237][0]+16 <-- same here at offset +16
122: (18) r1 = map[id:2239]
124: (18) r3 = map[id:2237][0]+16
126: (b7) r4 = 0
127: (85) call array_map_update_elem#100464
128: (b7) r1 = 120
; str1[5] = 'x';
129: (73) *(u8 *)(r9 +5) = r1
; test_reloc(string, 3, str1);
130: (b7) r1 = 3
131: (63) *(u32 *)(r10 -4) = r1
132: (b7) r9 = 3
133: (bf) r2 = r10
; int load_static_data(struct __sk_buff *skb)
134: (07) r2 += -4
; test_reloc(string, 3, str1);
135: (18) r1 = map[id:2239]
137: (18) r3 = map[id:2235][0]+16 <-- direct addr in .data area
139: (b7) r4 = 0
140: (85) call array_map_update_elem#100464
141: (b7) r1 = 111
; __builtin_memcpy(&str2[2], "hello", sizeof("hello"));
142: (73) *(u8 *)(r8 +6) = r1 <-- further access based on .bss data
143: (b7) r1 = 108
144: (73) *(u8 *)(r8 +5) = r1
[...]
For Cilium use-case in particular, this enables migrating configuration
constants from Cilium daemon's generated header defines into global
data sections such that expensive runtime recompilations with LLVM can
be avoided altogether. Instead, the ELF file becomes effectively a
"template", meaning, it is compiled only once (!) and the Cilium daemon
will then rewrite relevant configuration data from the ELF's .data or
.rodata sections directly instead of recompiling the program. The
updated ELF is then loaded into the kernel and atomically replaces
the existing program in the networking datapath. More info in [0].
Based upon recent fix in LLVM, commit c0db6b6bd444 ("[BPF] Don't fail
for static variables").
[0] LPC 2018, BPF track, "ELF relocation for static data in BPF",
http://vger.kernel.org/lpc-bpf2018.html#session-3
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Andrii Nakryiko <andriin@fb.com>
Acked-by: Martin KaFai Lau <kafai@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2019-04-10 05:20:13 +08:00
|
|
|
} LIBBPF_0.0.2;
|
2019-05-25 02:58:57 +08:00
|
|
|
|
|
|
|
LIBBPF_0.0.4 {
|
|
|
|
global:
|
2019-07-02 07:58:56 +08:00
|
|
|
bpf_link__destroy;
|
|
|
|
bpf_object__load_xattr;
|
2019-07-02 07:58:58 +08:00
|
|
|
bpf_program__attach_kprobe;
|
2019-07-02 07:58:57 +08:00
|
|
|
bpf_program__attach_perf_event;
|
2019-07-02 07:59:00 +08:00
|
|
|
bpf_program__attach_raw_tracepoint;
|
2019-07-02 07:58:59 +08:00
|
|
|
bpf_program__attach_tracepoint;
|
2019-07-02 07:58:58 +08:00
|
|
|
bpf_program__attach_uprobe;
|
2019-05-25 02:59:03 +08:00
|
|
|
btf_dump__dump_type;
|
|
|
|
btf_dump__free;
|
|
|
|
btf_dump__new;
|
2019-05-25 02:58:57 +08:00
|
|
|
btf__parse_elf;
|
2019-06-11 08:56:50 +08:00
|
|
|
libbpf_num_possible_cpus;
|
2019-07-07 02:06:24 +08:00
|
|
|
perf_buffer__free;
|
|
|
|
perf_buffer__new;
|
|
|
|
perf_buffer__new_raw;
|
|
|
|
perf_buffer__poll;
|
2019-08-27 10:25:27 +08:00
|
|
|
xsk_umem__create;
|
2019-05-25 02:58:57 +08:00
|
|
|
} LIBBPF_0.0.3;
|
2019-08-15 04:05:48 +08:00
|
|
|
|
|
|
|
LIBBPF_0.0.5 {
|
2019-08-20 17:31:53 +08:00
|
|
|
global:
|
|
|
|
bpf_btf_get_next_id;
|
2019-08-15 04:05:48 +08:00
|
|
|
} LIBBPF_0.0.4;
|
2019-10-01 06:25:03 +08:00
|
|
|
|
|
|
|
LIBBPF_0.0.6 {
|
libbpf: add bpf_object__open_{file, mem} w/ extensible opts
Add new set of bpf_object__open APIs using new approach to optional
parameters extensibility allowing simpler ABI compatibility approach.
This patch demonstrates an approach to implementing libbpf APIs that
makes it easy to extend existing APIs with extra optional parameters in
such a way, that ABI compatibility is preserved without having to do
symbol versioning and generating lots of boilerplate code to handle it.
To facilitate succinct code for working with options, add OPTS_VALID,
OPTS_HAS, and OPTS_GET macros that hide all the NULL, size, and zero
checks.
Additionally, newly added libbpf APIs are encouraged to follow similar
pattern of having all mandatory parameters as formal function parameters
and always have optional (NULL-able) xxx_opts struct, which should
always have real struct size as a first field and the rest would be
optional parameters added over time, which tune the behavior of existing
API, if specified by user.
Signed-off-by: Andrii Nakryiko <andriin@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2019-10-05 06:40:35 +08:00
|
|
|
global:
|
2019-11-10 04:37:31 +08:00
|
|
|
bpf_get_link_xdp_info;
|
2019-11-02 19:09:38 +08:00
|
|
|
bpf_map__get_pin_path;
|
|
|
|
bpf_map__is_pinned;
|
|
|
|
bpf_map__set_pin_path;
|
libbpf: add bpf_object__open_{file, mem} w/ extensible opts
Add new set of bpf_object__open APIs using new approach to optional
parameters extensibility allowing simpler ABI compatibility approach.
This patch demonstrates an approach to implementing libbpf APIs that
makes it easy to extend existing APIs with extra optional parameters in
such a way, that ABI compatibility is preserved without having to do
symbol versioning and generating lots of boilerplate code to handle it.
To facilitate succinct code for working with options, add OPTS_VALID,
OPTS_HAS, and OPTS_GET macros that hide all the NULL, size, and zero
checks.
Additionally, newly added libbpf APIs are encouraged to follow similar
pattern of having all mandatory parameters as formal function parameters
and always have optional (NULL-able) xxx_opts struct, which should
always have real struct size as a first field and the rest would be
optional parameters added over time, which tune the behavior of existing
API, if specified by user.
Signed-off-by: Andrii Nakryiko <andriin@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2019-10-05 06:40:35 +08:00
|
|
|
bpf_object__open_file;
|
|
|
|
bpf_object__open_mem;
|
2019-11-15 02:57:06 +08:00
|
|
|
bpf_program__attach_trace;
|
2019-10-21 11:38:57 +08:00
|
|
|
bpf_program__get_expected_attach_type;
|
|
|
|
bpf_program__get_type;
|
2019-10-31 06:32:12 +08:00
|
|
|
bpf_program__is_tracing;
|
|
|
|
bpf_program__set_tracing;
|
2019-11-10 04:37:32 +08:00
|
|
|
bpf_program__size;
|
2019-11-15 02:57:05 +08:00
|
|
|
btf__find_by_name_kind;
|
2019-11-15 02:57:06 +08:00
|
|
|
libbpf_find_vmlinux_btf_id;
|
2019-10-01 06:25:03 +08:00
|
|
|
} LIBBPF_0.0.5;
|
2019-12-10 06:40:22 +08:00
|
|
|
|
|
|
|
LIBBPF_0.0.7 {
|
2019-12-14 09:43:26 +08:00
|
|
|
global:
|
2019-12-14 09:43:31 +08:00
|
|
|
btf_dump__emit_type_decl;
|
libbpf: Add bpf_link__disconnect() API to preserve underlying BPF resource
There are cases in which BPF resource (program, map, etc) has to outlive
userspace program that "installed" it in the system in the first place.
When BPF program is attached, libbpf returns bpf_link object, which
is supposed to be destroyed after no longer necessary through
bpf_link__destroy() API. Currently, bpf_link destruction causes both automatic
detachment and frees up any resources allocated to for bpf_link in-memory
representation. This is inconvenient for the case described above because of
coupling of detachment and resource freeing.
This patch introduces bpf_link__disconnect() API call, which marks bpf_link as
disconnected from its underlying BPF resouces. This means that when bpf_link
is destroyed later, all its memory resources will be freed, but BPF resource
itself won't be detached.
This design allows to follow strict and resource-leak-free design by default,
while giving easy and straightforward way for user code to opt for keeping BPF
resource attached beyond lifetime of a bpf_link. For some BPF programs (i.e.,
FS-based tracepoints, kprobes, raw tracepoint, etc), user has to make sure to
pin BPF program to prevent kernel to automatically detach it on process exit.
This should typically be achived by pinning BPF program (or map in some cases)
in BPF FS.
Signed-off-by: Andrii Nakryiko <andriin@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Martin KaFai Lau <kafai@fb.com>
Link: https://lore.kernel.org/bpf/20191218225039.2668205-1-andriin@fb.com
2019-12-19 06:50:39 +08:00
|
|
|
bpf_link__disconnect;
|
bpf: libbpf: Add STRUCT_OPS support
This patch adds BPF STRUCT_OPS support to libbpf.
The only sec_name convention is SEC(".struct_ops") to identify the
struct_ops implemented in BPF,
e.g. To implement a tcp_congestion_ops:
SEC(".struct_ops")
struct tcp_congestion_ops dctcp = {
.init = (void *)dctcp_init, /* <-- a bpf_prog */
/* ... some more func prts ... */
.name = "bpf_dctcp",
};
Each struct_ops is defined as a global variable under SEC(".struct_ops")
as above. libbpf creates a map for each variable and the variable name
is the map's name. Multiple struct_ops is supported under
SEC(".struct_ops").
In the bpf_object__open phase, libbpf will look for the SEC(".struct_ops")
section and find out what is the btf-type the struct_ops is
implementing. Note that the btf-type here is referring to
a type in the bpf_prog.o's btf. A "struct bpf_map" is added
by bpf_object__add_map() as other maps do. It will then
collect (through SHT_REL) where are the bpf progs that the
func ptrs are referring to. No btf_vmlinux is needed in
the open phase.
In the bpf_object__load phase, the map-fields, which depend
on the btf_vmlinux, are initialized (in bpf_map__init_kern_struct_ops()).
It will also set the prog->type, prog->attach_btf_id, and
prog->expected_attach_type. Thus, the prog's properties do
not rely on its section name.
[ Currently, the bpf_prog's btf-type ==> btf_vmlinux's btf-type matching
process is as simple as: member-name match + btf-kind match + size match.
If these matching conditions fail, libbpf will reject.
The current targeting support is "struct tcp_congestion_ops" which
most of its members are function pointers.
The member ordering of the bpf_prog's btf-type can be different from
the btf_vmlinux's btf-type. ]
Then, all obj->maps are created as usual (in bpf_object__create_maps()).
Once the maps are created and prog's properties are all set,
the libbpf will proceed to load all the progs.
bpf_map__attach_struct_ops() is added to register a struct_ops
map to a kernel subsystem.
Signed-off-by: Martin KaFai Lau <kafai@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20200109003514.3856730-1-kafai@fb.com
2020-01-09 08:35:14 +08:00
|
|
|
bpf_map__attach_struct_ops;
|
2020-01-16 02:43:06 +08:00
|
|
|
bpf_map_delete_batch;
|
|
|
|
bpf_map_lookup_and_delete_batch;
|
|
|
|
bpf_map_lookup_batch;
|
|
|
|
bpf_map_update_batch;
|
2019-12-14 09:43:32 +08:00
|
|
|
bpf_object__find_program_by_name;
|
2019-12-14 09:43:36 +08:00
|
|
|
bpf_object__attach_skeleton;
|
|
|
|
bpf_object__destroy_skeleton;
|
|
|
|
bpf_object__detach_skeleton;
|
|
|
|
bpf_object__load_skeleton;
|
|
|
|
bpf_object__open_skeleton;
|
2020-01-09 00:23:52 +08:00
|
|
|
bpf_probe_large_insn_limit;
|
2019-12-19 15:44:36 +08:00
|
|
|
bpf_prog_attach_xattr;
|
2019-12-14 09:43:26 +08:00
|
|
|
bpf_program__attach;
|
2019-12-14 09:43:32 +08:00
|
|
|
bpf_program__name;
|
2020-01-21 08:53:47 +08:00
|
|
|
bpf_program__is_extension;
|
bpf: libbpf: Add STRUCT_OPS support
This patch adds BPF STRUCT_OPS support to libbpf.
The only sec_name convention is SEC(".struct_ops") to identify the
struct_ops implemented in BPF,
e.g. To implement a tcp_congestion_ops:
SEC(".struct_ops")
struct tcp_congestion_ops dctcp = {
.init = (void *)dctcp_init, /* <-- a bpf_prog */
/* ... some more func prts ... */
.name = "bpf_dctcp",
};
Each struct_ops is defined as a global variable under SEC(".struct_ops")
as above. libbpf creates a map for each variable and the variable name
is the map's name. Multiple struct_ops is supported under
SEC(".struct_ops").
In the bpf_object__open phase, libbpf will look for the SEC(".struct_ops")
section and find out what is the btf-type the struct_ops is
implementing. Note that the btf-type here is referring to
a type in the bpf_prog.o's btf. A "struct bpf_map" is added
by bpf_object__add_map() as other maps do. It will then
collect (through SHT_REL) where are the bpf progs that the
func ptrs are referring to. No btf_vmlinux is needed in
the open phase.
In the bpf_object__load phase, the map-fields, which depend
on the btf_vmlinux, are initialized (in bpf_map__init_kern_struct_ops()).
It will also set the prog->type, prog->attach_btf_id, and
prog->expected_attach_type. Thus, the prog's properties do
not rely on its section name.
[ Currently, the bpf_prog's btf-type ==> btf_vmlinux's btf-type matching
process is as simple as: member-name match + btf-kind match + size match.
If these matching conditions fail, libbpf will reject.
The current targeting support is "struct tcp_congestion_ops" which
most of its members are function pointers.
The member ordering of the bpf_prog's btf-type can be different from
the btf_vmlinux's btf-type. ]
Then, all obj->maps are created as usual (in bpf_object__create_maps()).
Once the maps are created and prog's properties are all set,
the libbpf will proceed to load all the progs.
bpf_map__attach_struct_ops() is added to register a struct_ops
map to a kernel subsystem.
Signed-off-by: Martin KaFai Lau <kafai@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20200109003514.3856730-1-kafai@fb.com
2020-01-09 08:35:14 +08:00
|
|
|
bpf_program__is_struct_ops;
|
2020-01-21 08:53:47 +08:00
|
|
|
bpf_program__set_extension;
|
bpf: libbpf: Add STRUCT_OPS support
This patch adds BPF STRUCT_OPS support to libbpf.
The only sec_name convention is SEC(".struct_ops") to identify the
struct_ops implemented in BPF,
e.g. To implement a tcp_congestion_ops:
SEC(".struct_ops")
struct tcp_congestion_ops dctcp = {
.init = (void *)dctcp_init, /* <-- a bpf_prog */
/* ... some more func prts ... */
.name = "bpf_dctcp",
};
Each struct_ops is defined as a global variable under SEC(".struct_ops")
as above. libbpf creates a map for each variable and the variable name
is the map's name. Multiple struct_ops is supported under
SEC(".struct_ops").
In the bpf_object__open phase, libbpf will look for the SEC(".struct_ops")
section and find out what is the btf-type the struct_ops is
implementing. Note that the btf-type here is referring to
a type in the bpf_prog.o's btf. A "struct bpf_map" is added
by bpf_object__add_map() as other maps do. It will then
collect (through SHT_REL) where are the bpf progs that the
func ptrs are referring to. No btf_vmlinux is needed in
the open phase.
In the bpf_object__load phase, the map-fields, which depend
on the btf_vmlinux, are initialized (in bpf_map__init_kern_struct_ops()).
It will also set the prog->type, prog->attach_btf_id, and
prog->expected_attach_type. Thus, the prog's properties do
not rely on its section name.
[ Currently, the bpf_prog's btf-type ==> btf_vmlinux's btf-type matching
process is as simple as: member-name match + btf-kind match + size match.
If these matching conditions fail, libbpf will reject.
The current targeting support is "struct tcp_congestion_ops" which
most of its members are function pointers.
The member ordering of the bpf_prog's btf-type can be different from
the btf_vmlinux's btf-type. ]
Then, all obj->maps are created as usual (in bpf_object__create_maps()).
Once the maps are created and prog's properties are all set,
the libbpf will proceed to load all the progs.
bpf_map__attach_struct_ops() is added to register a struct_ops
map to a kernel subsystem.
Signed-off-by: Martin KaFai Lau <kafai@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20200109003514.3856730-1-kafai@fb.com
2020-01-09 08:35:14 +08:00
|
|
|
bpf_program__set_struct_ops;
|
2019-12-14 09:43:30 +08:00
|
|
|
btf__align_of;
|
2020-01-16 07:00:31 +08:00
|
|
|
libbpf_find_kernel_btf;
|
2019-12-10 06:40:22 +08:00
|
|
|
} LIBBPF_0.0.6;
|
2020-02-20 21:26:24 +08:00
|
|
|
|
|
|
|
LIBBPF_0.0.8 {
|
2020-02-20 21:26:35 +08:00
|
|
|
global:
|
libbpf: Add bpf_link pinning/unpinning
With bpf_link abstraction supported by kernel explicitly, add
pinning/unpinning API for links. Also allow to create (open) bpf_link from BPF
FS file.
This API allows to have an "ephemeral" FD-based BPF links (like raw tracepoint
or fexit/freplace attachments) surviving user process exit, by pinning them in
a BPF FS, which is an important use case for long-running BPF programs.
As part of this, expose underlying FD for bpf_link. While legacy bpf_link's
might not have a FD associated with them (which will be expressed as
a bpf_link with fd=-1), kernel's abstraction is based around FD-based usage,
so match it closely. This, subsequently, allows to have a generic
pinning/unpinning API for generalized bpf_link. For some types of bpf_links
kernel might not support pinning, in which case bpf_link__pin() will return
error.
With FD being part of generic bpf_link, also get rid of bpf_link_fd in favor
of using vanialla bpf_link.
Signed-off-by: Andrii Nakryiko <andriin@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20200303043159.323675-3-andriin@fb.com
2020-03-03 12:31:58 +08:00
|
|
|
bpf_link__fd;
|
|
|
|
bpf_link__open;
|
|
|
|
bpf_link__pin;
|
|
|
|
bpf_link__pin_path;
|
|
|
|
bpf_link__unpin;
|
2020-03-30 11:00:00 +08:00
|
|
|
bpf_link__update_program;
|
|
|
|
bpf_link_create;
|
|
|
|
bpf_link_update;
|
2020-03-29 21:22:52 +08:00
|
|
|
bpf_map__set_initial_value;
|
2020-03-30 11:00:00 +08:00
|
|
|
bpf_program__attach_cgroup;
|
2020-03-29 08:43:54 +08:00
|
|
|
bpf_program__attach_lsm;
|
|
|
|
bpf_program__is_lsm;
|
2020-02-20 21:26:35 +08:00
|
|
|
bpf_program__set_attach_target;
|
2020-03-29 08:43:54 +08:00
|
|
|
bpf_program__set_lsm;
|
2020-03-26 01:23:28 +08:00
|
|
|
bpf_set_link_xdp_fd_opts;
|
2020-02-20 21:26:24 +08:00
|
|
|
} LIBBPF_0.0.7;
|
2020-04-29 08:16:09 +08:00
|
|
|
|
|
|
|
LIBBPF_0.0.9 {
|
|
|
|
global:
|
2020-04-30 15:15:05 +08:00
|
|
|
bpf_enable_stats;
|
2020-05-10 01:59:17 +08:00
|
|
|
bpf_iter_create;
|
2020-04-29 08:16:09 +08:00
|
|
|
bpf_link_get_fd_by_id;
|
|
|
|
bpf_link_get_next_id;
|
2020-05-10 01:59:17 +08:00
|
|
|
bpf_program__attach_iter;
|
2020-05-31 16:28:40 +08:00
|
|
|
bpf_program__attach_netns;
|
2020-05-26 17:21:42 +08:00
|
|
|
perf_buffer__consume;
|
libbpf: Add BPF ring buffer support
Declaring and instantiating BPF ring buffer doesn't require any changes to
libbpf, as it's just another type of maps. So using existing BTF-defined maps
syntax with __uint(type, BPF_MAP_TYPE_RINGBUF) and __uint(max_elements,
<size-of-ring-buf>) is all that's necessary to create and use BPF ring buffer.
This patch adds BPF ring buffer consumer to libbpf. It is very similar to
perf_buffer implementation in terms of API, but also attempts to fix some
minor problems and inconveniences with existing perf_buffer API.
ring_buffer support both single ring buffer use case (with just using
ring_buffer__new()), as well as allows to add more ring buffers, each with its
own callback and context. This allows to efficiently poll and consume
multiple, potentially completely independent, ring buffers, using single
epoll instance.
The latter is actually a problem in practice for applications
that are using multiple sets of perf buffers. They have to create multiple
instances for struct perf_buffer and poll them independently or in a loop,
each approach having its own problems (e.g., inability to use a common poll
timeout). struct ring_buffer eliminates this problem by aggregating many
independent ring buffer instances under the single "ring buffer manager".
Second, perf_buffer's callback can't return error, so applications that need
to stop polling due to error in data or data signalling the end, have to use
extra mechanisms to signal that polling has to stop. ring_buffer's callback
can return error, which will be passed through back to user code and can be
acted upon appropariately.
Two APIs allow to consume ring buffer data:
- ring_buffer__poll(), which will wait for data availability notification
and will consume data only from reported ring buffer(s); this API allows
to efficiently use resources by reading data only when it becomes
available;
- ring_buffer__consume(), will attempt to read new records regardless of
data availablity notification sub-system. This API is useful for cases
when lowest latency is required, in expense of burning CPU resources.
Signed-off-by: Andrii Nakryiko <andriin@fb.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Link: https://lore.kernel.org/bpf/20200529075424.3139988-3-andriin@fb.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2020-05-29 15:54:21 +08:00
|
|
|
ring_buffer__add;
|
|
|
|
ring_buffer__consume;
|
|
|
|
ring_buffer__free;
|
|
|
|
ring_buffer__new;
|
|
|
|
ring_buffer__poll;
|
2020-04-29 08:16:09 +08:00
|
|
|
} LIBBPF_0.0.8;
|
2020-06-18 02:31:32 +08:00
|
|
|
|
|
|
|
LIBBPF_0.1.0 {
|
libbpf: Add a bunch of attribute getters/setters for map definitions
Add a bunch of getter for various aspects of BPF map. Some of these attribute
(e.g., key_size, value_size, type, etc) are available right now in struct
bpf_map_def, but this patch adds getter allowing to fetch them individually.
bpf_map_def approach isn't very scalable, when ABI stability requirements are
taken into account. It's much easier to extend libbpf and add support for new
features, when each aspect of BPF map has separate getter/setter.
Getters follow the common naming convention of not explicitly having "get" in
its name: bpf_map__type() returns map type, bpf_map__key_size() returns
key_size. Setters, though, explicitly have set in their name:
bpf_map__set_type(), bpf_map__set_key_size().
This patch ensures we now have a getter and a setter for the following
map attributes:
- type;
- max_entries;
- map_flags;
- numa_node;
- key_size;
- value_size;
- ifindex.
bpf_map__resize() enforces unnecessary restriction of max_entries > 0. It is
unnecessary, because libbpf actually supports zero max_entries for some cases
(e.g., for PERF_EVENT_ARRAY map) and treats it specially during map creation
time. To allow setting max_entries=0, new bpf_map__set_max_entries() setter is
added. bpf_map__resize()'s behavior is preserved for backwards compatibility
reasons.
Map ifindex getter is added as well. There is a setter already, but no
corresponding getter. Fix this assymetry as well. bpf_map__set_ifindex()
itself is converted from void function into error-returning one, similar to
other setters. The only error returned right now is -EBUSY, if BPF map is
already loaded and has corresponding FD.
One lacking attribute with no ability to get/set or even specify it
declaratively is numa_node. This patch fixes this gap and both adds
programmatic getter/setter, as well as adds support for numa_node field in
BTF-defined map.
Signed-off-by: Andrii Nakryiko <andriin@fb.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Toke Høiland-Jørgensen <toke@redhat.com>
Link: https://lore.kernel.org/bpf/20200621062112.3006313-1-andriin@fb.com
2020-06-21 14:21:12 +08:00
|
|
|
global:
|
2020-08-01 02:28:27 +08:00
|
|
|
bpf_link__detach;
|
|
|
|
bpf_link_detach;
|
libbpf: Add a bunch of attribute getters/setters for map definitions
Add a bunch of getter for various aspects of BPF map. Some of these attribute
(e.g., key_size, value_size, type, etc) are available right now in struct
bpf_map_def, but this patch adds getter allowing to fetch them individually.
bpf_map_def approach isn't very scalable, when ABI stability requirements are
taken into account. It's much easier to extend libbpf and add support for new
features, when each aspect of BPF map has separate getter/setter.
Getters follow the common naming convention of not explicitly having "get" in
its name: bpf_map__type() returns map type, bpf_map__key_size() returns
key_size. Setters, though, explicitly have set in their name:
bpf_map__set_type(), bpf_map__set_key_size().
This patch ensures we now have a getter and a setter for the following
map attributes:
- type;
- max_entries;
- map_flags;
- numa_node;
- key_size;
- value_size;
- ifindex.
bpf_map__resize() enforces unnecessary restriction of max_entries > 0. It is
unnecessary, because libbpf actually supports zero max_entries for some cases
(e.g., for PERF_EVENT_ARRAY map) and treats it specially during map creation
time. To allow setting max_entries=0, new bpf_map__set_max_entries() setter is
added. bpf_map__resize()'s behavior is preserved for backwards compatibility
reasons.
Map ifindex getter is added as well. There is a setter already, but no
corresponding getter. Fix this assymetry as well. bpf_map__set_ifindex()
itself is converted from void function into error-returning one, similar to
other setters. The only error returned right now is -EBUSY, if BPF map is
already loaded and has corresponding FD.
One lacking attribute with no ability to get/set or even specify it
declaratively is numa_node. This patch fixes this gap and both adds
programmatic getter/setter, as well as adds support for numa_node field in
BTF-defined map.
Signed-off-by: Andrii Nakryiko <andriin@fb.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Toke Høiland-Jørgensen <toke@redhat.com>
Link: https://lore.kernel.org/bpf/20200621062112.3006313-1-andriin@fb.com
2020-06-21 14:21:12 +08:00
|
|
|
bpf_map__ifindex;
|
|
|
|
bpf_map__key_size;
|
|
|
|
bpf_map__map_flags;
|
|
|
|
bpf_map__max_entries;
|
|
|
|
bpf_map__numa_node;
|
|
|
|
bpf_map__set_key_size;
|
|
|
|
bpf_map__set_map_flags;
|
|
|
|
bpf_map__set_max_entries;
|
|
|
|
bpf_map__set_numa_node;
|
|
|
|
bpf_map__set_type;
|
|
|
|
bpf_map__set_value_size;
|
|
|
|
bpf_map__type;
|
|
|
|
bpf_map__value_size;
|
2020-07-22 14:46:00 +08:00
|
|
|
bpf_program__attach_xdp;
|
libbpf: Support disabling auto-loading BPF programs
Currently, bpf_object__load() (and by induction skeleton's load), will always
attempt to prepare, relocate, and load into kernel every single BPF program
found inside the BPF object file. This is often convenient and the right thing
to do and what users expect.
But there are plenty of cases (especially with BPF development constantly
picking up the pace), where BPF application is intended to work with old
kernels, with potentially reduced set of features. But on kernels supporting
extra features, it would like to take a full advantage of them, by employing
extra BPF program. This could be a choice of using fentry/fexit over
kprobe/kretprobe, if kernel is recent enough and is built with BTF. Or BPF
program might be providing optimized bpf_iter-based solution that user-space
might want to use, whenever available. And so on.
With libbpf and BPF CO-RE in particular, it's advantageous to not have to
maintain two separate BPF object files to achieve this. So to enable such use
cases, this patch adds ability to request not auto-loading chosen BPF
programs. In such case, libbpf won't attempt to perform relocations (which
might fail due to old kernel), won't try to resolve BTF types for
BTF-aware (tp_btf/fentry/fexit/etc) program types, because BTF might not be
present, and so on. Skeleton will also automatically skip auto-attachment step
for such not loaded BPF programs.
Overall, this feature allows to simplify development and deployment of
real-world BPF applications with complicated compatibility requirements.
Signed-off-by: Andrii Nakryiko <andriin@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20200625232629.3444003-2-andriin@fb.com
2020-06-26 07:26:28 +08:00
|
|
|
bpf_program__autoload;
|
2020-07-17 18:35:33 +08:00
|
|
|
bpf_program__is_sk_lookup;
|
libbpf: Support disabling auto-loading BPF programs
Currently, bpf_object__load() (and by induction skeleton's load), will always
attempt to prepare, relocate, and load into kernel every single BPF program
found inside the BPF object file. This is often convenient and the right thing
to do and what users expect.
But there are plenty of cases (especially with BPF development constantly
picking up the pace), where BPF application is intended to work with old
kernels, with potentially reduced set of features. But on kernels supporting
extra features, it would like to take a full advantage of them, by employing
extra BPF program. This could be a choice of using fentry/fexit over
kprobe/kretprobe, if kernel is recent enough and is built with BTF. Or BPF
program might be providing optimized bpf_iter-based solution that user-space
might want to use, whenever available. And so on.
With libbpf and BPF CO-RE in particular, it's advantageous to not have to
maintain two separate BPF object files to achieve this. So to enable such use
cases, this patch adds ability to request not auto-loading chosen BPF
programs. In such case, libbpf won't attempt to perform relocations (which
might fail due to old kernel), won't try to resolve BTF types for
BTF-aware (tp_btf/fentry/fexit/etc) program types, because BTF might not be
present, and so on. Skeleton will also automatically skip auto-attachment step
for such not loaded BPF programs.
Overall, this feature allows to simplify development and deployment of
real-world BPF applications with complicated compatibility requirements.
Signed-off-by: Andrii Nakryiko <andriin@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20200625232629.3444003-2-andriin@fb.com
2020-06-26 07:26:28 +08:00
|
|
|
bpf_program__set_autoload;
|
2020-07-17 18:35:33 +08:00
|
|
|
bpf_program__set_sk_lookup;
|
2020-08-02 09:32:17 +08:00
|
|
|
btf__parse;
|
|
|
|
btf__parse_raw;
|
libbpf: Handle BTF pointer sizes more carefully
With libbpf and BTF it is pretty common to have libbpf built for one
architecture, while BTF information was generated for a different architecture
(typically, but not always, BPF). In such case, the size of a pointer might
differ betweem architectures. libbpf previously was always making an
assumption that pointer size for BTF is the same as native architecture
pointer size, but that breaks for cases where libbpf is built as 32-bit
library, while BTF is for 64-bit architecture.
To solve this, add heuristic to determine pointer size by searching for `long`
or `unsigned long` integer type and using its size as a pointer size. Also,
allow to override the pointer size with a new API btf__set_pointer_size(), for
cases where application knows which pointer size should be used. User
application can check what libbpf "guessed" by looking at the result of
btf__pointer_size(). If it's not 0, then libbpf successfully determined a
pointer size, otherwise native arch pointer size will be used.
For cases where BTF is parsed from ELF file, use ELF's class (32-bit or
64-bit) to determine pointer size.
Fixes: 8a138aed4a80 ("bpf: btf: Add BTF support to libbpf")
Fixes: 351131b51c7a ("libbpf: add btf_dump API for BTF-to-C conversion")
Signed-off-by: Andrii Nakryiko <andriin@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20200813204945.1020225-5-andriin@fb.com
2020-08-14 04:49:40 +08:00
|
|
|
btf__pointer_size;
|
2020-07-08 09:53:14 +08:00
|
|
|
btf__set_fd;
|
libbpf: Handle BTF pointer sizes more carefully
With libbpf and BTF it is pretty common to have libbpf built for one
architecture, while BTF information was generated for a different architecture
(typically, but not always, BPF). In such case, the size of a pointer might
differ betweem architectures. libbpf previously was always making an
assumption that pointer size for BTF is the same as native architecture
pointer size, but that breaks for cases where libbpf is built as 32-bit
library, while BTF is for 64-bit architecture.
To solve this, add heuristic to determine pointer size by searching for `long`
or `unsigned long` integer type and using its size as a pointer size. Also,
allow to override the pointer size with a new API btf__set_pointer_size(), for
cases where application knows which pointer size should be used. User
application can check what libbpf "guessed" by looking at the result of
btf__pointer_size(). If it's not 0, then libbpf successfully determined a
pointer size, otherwise native arch pointer size will be used.
For cases where BTF is parsed from ELF file, use ELF's class (32-bit or
64-bit) to determine pointer size.
Fixes: 8a138aed4a80 ("bpf: btf: Add BTF support to libbpf")
Fixes: 351131b51c7a ("libbpf: add btf_dump API for BTF-to-C conversion")
Signed-off-by: Andrii Nakryiko <andriin@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20200813204945.1020225-5-andriin@fb.com
2020-08-14 04:49:40 +08:00
|
|
|
btf__set_pointer_size;
|
2020-06-18 02:31:32 +08:00
|
|
|
} LIBBPF_0.0.9;
|
libbpf: Add perf_buffer APIs for better integration with outside epoll loop
Add a set of APIs to perf_buffer manage to allow applications to integrate
perf buffer polling into existing epoll-based infrastructure. One example is
applications using libevent already and wanting to plug perf_buffer polling,
instead of relying on perf_buffer__poll() and waste an extra thread to do it.
But perf_buffer is still extremely useful to set up and consume perf buffer
rings even for such use cases.
So to accomodate such new use cases, add three new APIs:
- perf_buffer__buffer_cnt() returns number of per-CPU buffers maintained by
given instance of perf_buffer manager;
- perf_buffer__buffer_fd() returns FD of perf_event corresponding to
a specified per-CPU buffer; this FD is then polled independently;
- perf_buffer__consume_buffer() consumes data from single per-CPU buffer,
identified by its slot index.
To support a simpler, but less efficient, way to integrate perf_buffer into
external polling logic, also expose underlying epoll FD through
perf_buffer__epoll_fd() API. It will need to be followed by
perf_buffer__poll(), wasting extra syscall, or perf_buffer__consume(), wasting
CPU to iterate buffers with no data. But could be simpler and more convenient
for some cases.
These APIs allow for great flexiblity, but do not sacrifice general usability
of perf_buffer.
Also exercise and check new APIs in perf_buffer selftest.
Signed-off-by: Andrii Nakryiko <andriin@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Reviewed-by: Alan Maguire <alan.maguire@oracle.com>
Link: https://lore.kernel.org/bpf/20200821165927.849538-1-andriin@fb.com
2020-08-22 00:59:27 +08:00
|
|
|
|
|
|
|
LIBBPF_0.2.0 {
|
|
|
|
global:
|
2020-09-16 07:45:41 +08:00
|
|
|
bpf_prog_bind_map;
|
2020-09-26 04:54:30 +08:00
|
|
|
bpf_prog_test_run_opts;
|
2020-09-29 20:45:53 +08:00
|
|
|
bpf_program__attach_freplace;
|
2020-09-04 04:35:38 +08:00
|
|
|
bpf_program__section_name;
|
libbpf: Add BTF writing APIs
Add APIs for appending new BTF types at the end of BTF object.
Each BTF kind has either one API of the form btf__add_<kind>(). For types
that have variable amount of additional items (struct/union, enum, func_proto,
datasec), additional API is provided to emit each such item. E.g., for
emitting a struct, one would use the following sequence of API calls:
btf__add_struct(...);
btf__add_field(...);
...
btf__add_field(...);
Each btf__add_field() will ensure that the last BTF type is of STRUCT or
UNION kind and will automatically increment that type's vlen field.
All the strings are provided as C strings (const char *), not a string offset.
This significantly improves usability of BTF writer APIs. All such strings
will be automatically appended to string section or existing string will be
re-used, if such string was already added previously.
Each API attempts to do all the reasonable validations, like enforcing
non-empty names for entities with required names, proper value bounds, various
bit offset restrictions, etc.
Type ID validation is minimal because it's possible to emit a type that refers
to type that will be emitted later, so libbpf has no way to enforce such
cases. User must be careful to properly emit all the necessary types and
specify type IDs that will be valid in the finally generated BTF.
Each of btf__add_<kind>() APIs return new type ID on success or negative
value on error. APIs like btf__add_field() that emit additional items
return zero on success and negative value on error.
Signed-off-by: Andrii Nakryiko <andriin@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: John Fastabend <john.fastabend@gmail.com>
Link: https://lore.kernel.org/bpf/20200929020533.711288-2-andriin@fb.com
2020-09-29 10:05:30 +08:00
|
|
|
btf__add_array;
|
|
|
|
btf__add_const;
|
|
|
|
btf__add_enum;
|
|
|
|
btf__add_enum_value;
|
|
|
|
btf__add_datasec;
|
|
|
|
btf__add_datasec_var_info;
|
|
|
|
btf__add_field;
|
|
|
|
btf__add_func;
|
|
|
|
btf__add_func_param;
|
|
|
|
btf__add_func_proto;
|
|
|
|
btf__add_fwd;
|
|
|
|
btf__add_int;
|
|
|
|
btf__add_ptr;
|
|
|
|
btf__add_restrict;
|
libbpf: Allow modification of BTF and add btf__add_str API
Allow internal BTF representation to switch from default read-only mode, in
which raw BTF data is a single non-modifiable block of memory with BTF header,
types, and strings layed out sequentially and contiguously in memory, into
a writable representation with types and strings data split out into separate
memory regions, that can be dynamically expanded.
Such writable internal representation is transparent to users of libbpf APIs,
but allows to append new types and strings at the end of BTF, which is
a typical use case when generating BTF programmatically. All the basic
guarantees of BTF types and strings layout is preserved, i.e., user can get
`struct btf_type *` pointer and read it directly. Such btf_type pointers might
be invalidated if BTF is modified, so some care is required in such mixed
read/write scenarios.
Switch from read-only to writable configuration happens automatically the
first time when user attempts to modify BTF by either adding a new type or new
string. It is still possible to get raw BTF data, which is a single piece of
memory that can be persisted in ELF section or into a file as raw BTF. Such
raw data memory is also still owned by BTF and will be freed either when BTF
object is freed or if another modification to BTF happens, as any modification
invalidates BTF raw representation.
This patch adds the first two BTF manipulation APIs: btf__add_str(), which
allows to add arbitrary strings to BTF string section, and btf__find_str()
which allows to find existing string offset, but not add it if it's missing.
All the added strings are automatically deduplicated. This is achieved by
maintaining an additional string lookup index for all unique strings. Such
index is built when BTF is switched to modifiable mode. If at that time BTF
strings section contained duplicate strings, they are not de-duplicated. This
is done specifically to not modify the existing content of BTF (types, their
string offsets, etc), which can cause confusion and is especially important
property if there is struct btf_ext associated with struct btf. By following
this "imperfect deduplication" process, btf_ext is kept consitent and correct.
If deduplication of strings is necessary, it can be forced by doing BTF
deduplication, at which point all the strings will be eagerly deduplicated and
all string offsets both in struct btf and struct btf_ext will be updated.
Signed-off-by: Andrii Nakryiko <andriin@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: John Fastabend <john.fastabend@gmail.com>
Link: https://lore.kernel.org/bpf/20200926011357.2366158-6-andriin@fb.com
2020-09-26 09:13:53 +08:00
|
|
|
btf__add_str;
|
libbpf: Add BTF writing APIs
Add APIs for appending new BTF types at the end of BTF object.
Each BTF kind has either one API of the form btf__add_<kind>(). For types
that have variable amount of additional items (struct/union, enum, func_proto,
datasec), additional API is provided to emit each such item. E.g., for
emitting a struct, one would use the following sequence of API calls:
btf__add_struct(...);
btf__add_field(...);
...
btf__add_field(...);
Each btf__add_field() will ensure that the last BTF type is of STRUCT or
UNION kind and will automatically increment that type's vlen field.
All the strings are provided as C strings (const char *), not a string offset.
This significantly improves usability of BTF writer APIs. All such strings
will be automatically appended to string section or existing string will be
re-used, if such string was already added previously.
Each API attempts to do all the reasonable validations, like enforcing
non-empty names for entities with required names, proper value bounds, various
bit offset restrictions, etc.
Type ID validation is minimal because it's possible to emit a type that refers
to type that will be emitted later, so libbpf has no way to enforce such
cases. User must be careful to properly emit all the necessary types and
specify type IDs that will be valid in the finally generated BTF.
Each of btf__add_<kind>() APIs return new type ID on success or negative
value on error. APIs like btf__add_field() that emit additional items
return zero on success and negative value on error.
Signed-off-by: Andrii Nakryiko <andriin@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: John Fastabend <john.fastabend@gmail.com>
Link: https://lore.kernel.org/bpf/20200929020533.711288-2-andriin@fb.com
2020-09-29 10:05:30 +08:00
|
|
|
btf__add_struct;
|
|
|
|
btf__add_typedef;
|
|
|
|
btf__add_union;
|
|
|
|
btf__add_var;
|
|
|
|
btf__add_volatile;
|
libbpf: Support BTF loading and raw data output in both endianness
Teach BTF to recognized wrong endianness and transparently convert it
internally to host endianness. Original endianness of BTF will be preserved
and used during btf__get_raw_data() to convert resulting raw data to the same
endianness and a source raw_data. This means that little-endian host can parse
big-endian BTF with no issues, all the type data will be presented to the
client application in native endianness, but when it's time for emitting BTF
to persist it in a file (e.g., after BTF deduplication), original non-native
endianness will be preserved and stored.
It's possible to query original endianness of BTF data with new
btf__endianness() API. It's also possible to override desired output
endianness with btf__set_endianness(), so that if application needs to load,
say, big-endian BTF and store it as little-endian BTF, it's possible to
manually override this. If btf__set_endianness() was used to change
endianness, btf__endianness() will reflect overridden endianness.
Given there are no known use cases for supporting cross-endianness for
.BTF.ext, loading .BTF.ext in non-native endianness is not supported.
Signed-off-by: Andrii Nakryiko <andriin@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20200929043046.1324350-3-andriin@fb.com
2020-09-29 12:30:45 +08:00
|
|
|
btf__endianness;
|
libbpf: Allow modification of BTF and add btf__add_str API
Allow internal BTF representation to switch from default read-only mode, in
which raw BTF data is a single non-modifiable block of memory with BTF header,
types, and strings layed out sequentially and contiguously in memory, into
a writable representation with types and strings data split out into separate
memory regions, that can be dynamically expanded.
Such writable internal representation is transparent to users of libbpf APIs,
but allows to append new types and strings at the end of BTF, which is
a typical use case when generating BTF programmatically. All the basic
guarantees of BTF types and strings layout is preserved, i.e., user can get
`struct btf_type *` pointer and read it directly. Such btf_type pointers might
be invalidated if BTF is modified, so some care is required in such mixed
read/write scenarios.
Switch from read-only to writable configuration happens automatically the
first time when user attempts to modify BTF by either adding a new type or new
string. It is still possible to get raw BTF data, which is a single piece of
memory that can be persisted in ELF section or into a file as raw BTF. Such
raw data memory is also still owned by BTF and will be freed either when BTF
object is freed or if another modification to BTF happens, as any modification
invalidates BTF raw representation.
This patch adds the first two BTF manipulation APIs: btf__add_str(), which
allows to add arbitrary strings to BTF string section, and btf__find_str()
which allows to find existing string offset, but not add it if it's missing.
All the added strings are automatically deduplicated. This is achieved by
maintaining an additional string lookup index for all unique strings. Such
index is built when BTF is switched to modifiable mode. If at that time BTF
strings section contained duplicate strings, they are not de-duplicated. This
is done specifically to not modify the existing content of BTF (types, their
string offsets, etc), which can cause confusion and is especially important
property if there is struct btf_ext associated with struct btf. By following
this "imperfect deduplication" process, btf_ext is kept consitent and correct.
If deduplication of strings is necessary, it can be forced by doing BTF
deduplication, at which point all the strings will be eagerly deduplicated and
all string offsets both in struct btf and struct btf_ext will be updated.
Signed-off-by: Andrii Nakryiko <andriin@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: John Fastabend <john.fastabend@gmail.com>
Link: https://lore.kernel.org/bpf/20200926011357.2366158-6-andriin@fb.com
2020-09-26 09:13:53 +08:00
|
|
|
btf__find_str;
|
2020-09-26 09:13:54 +08:00
|
|
|
btf__new_empty;
|
libbpf: Support BTF loading and raw data output in both endianness
Teach BTF to recognized wrong endianness and transparently convert it
internally to host endianness. Original endianness of BTF will be preserved
and used during btf__get_raw_data() to convert resulting raw data to the same
endianness and a source raw_data. This means that little-endian host can parse
big-endian BTF with no issues, all the type data will be presented to the
client application in native endianness, but when it's time for emitting BTF
to persist it in a file (e.g., after BTF deduplication), original non-native
endianness will be preserved and stored.
It's possible to query original endianness of BTF data with new
btf__endianness() API. It's also possible to override desired output
endianness with btf__set_endianness(), so that if application needs to load,
say, big-endian BTF and store it as little-endian BTF, it's possible to
manually override this. If btf__set_endianness() was used to change
endianness, btf__endianness() will reflect overridden endianness.
Given there are no known use cases for supporting cross-endianness for
.BTF.ext, loading .BTF.ext in non-native endianness is not supported.
Signed-off-by: Andrii Nakryiko <andriin@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20200929043046.1324350-3-andriin@fb.com
2020-09-29 12:30:45 +08:00
|
|
|
btf__set_endianness;
|
2020-09-29 10:05:31 +08:00
|
|
|
btf__str_by_offset;
|
libbpf: Add perf_buffer APIs for better integration with outside epoll loop
Add a set of APIs to perf_buffer manage to allow applications to integrate
perf buffer polling into existing epoll-based infrastructure. One example is
applications using libevent already and wanting to plug perf_buffer polling,
instead of relying on perf_buffer__poll() and waste an extra thread to do it.
But perf_buffer is still extremely useful to set up and consume perf buffer
rings even for such use cases.
So to accomodate such new use cases, add three new APIs:
- perf_buffer__buffer_cnt() returns number of per-CPU buffers maintained by
given instance of perf_buffer manager;
- perf_buffer__buffer_fd() returns FD of perf_event corresponding to
a specified per-CPU buffer; this FD is then polled independently;
- perf_buffer__consume_buffer() consumes data from single per-CPU buffer,
identified by its slot index.
To support a simpler, but less efficient, way to integrate perf_buffer into
external polling logic, also expose underlying epoll FD through
perf_buffer__epoll_fd() API. It will need to be followed by
perf_buffer__poll(), wasting extra syscall, or perf_buffer__consume(), wasting
CPU to iterate buffers with no data. But could be simpler and more convenient
for some cases.
These APIs allow for great flexiblity, but do not sacrifice general usability
of perf_buffer.
Also exercise and check new APIs in perf_buffer selftest.
Signed-off-by: Andrii Nakryiko <andriin@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Reviewed-by: Alan Maguire <alan.maguire@oracle.com>
Link: https://lore.kernel.org/bpf/20200821165927.849538-1-andriin@fb.com
2020-08-22 00:59:27 +08:00
|
|
|
perf_buffer__buffer_cnt;
|
|
|
|
perf_buffer__buffer_fd;
|
|
|
|
perf_buffer__epoll_fd;
|
|
|
|
perf_buffer__consume_buffer;
|
2020-08-28 16:26:27 +08:00
|
|
|
xsk_socket__create_shared;
|
libbpf: Add perf_buffer APIs for better integration with outside epoll loop
Add a set of APIs to perf_buffer manage to allow applications to integrate
perf buffer polling into existing epoll-based infrastructure. One example is
applications using libevent already and wanting to plug perf_buffer polling,
instead of relying on perf_buffer__poll() and waste an extra thread to do it.
But perf_buffer is still extremely useful to set up and consume perf buffer
rings even for such use cases.
So to accomodate such new use cases, add three new APIs:
- perf_buffer__buffer_cnt() returns number of per-CPU buffers maintained by
given instance of perf_buffer manager;
- perf_buffer__buffer_fd() returns FD of perf_event corresponding to
a specified per-CPU buffer; this FD is then polled independently;
- perf_buffer__consume_buffer() consumes data from single per-CPU buffer,
identified by its slot index.
To support a simpler, but less efficient, way to integrate perf_buffer into
external polling logic, also expose underlying epoll FD through
perf_buffer__epoll_fd() API. It will need to be followed by
perf_buffer__poll(), wasting extra syscall, or perf_buffer__consume(), wasting
CPU to iterate buffers with no data. But could be simpler and more convenient
for some cases.
These APIs allow for great flexiblity, but do not sacrifice general usability
of perf_buffer.
Also exercise and check new APIs in perf_buffer selftest.
Signed-off-by: Andrii Nakryiko <andriin@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Reviewed-by: Alan Maguire <alan.maguire@oracle.com>
Link: https://lore.kernel.org/bpf/20200821165927.849538-1-andriin@fb.com
2020-08-22 00:59:27 +08:00
|
|
|
} LIBBPF_0.1.0;
|
libbpf: Implement basic split BTF support
Support split BTF operation, in which one BTF (base BTF) provides basic set of
types and strings, while another one (split BTF) builds on top of base's types
and strings and adds its own new types and strings. From API standpoint, the
fact that the split BTF is built on top of the base BTF is transparent.
Type numeration is transparent. If the base BTF had last type ID #N, then all
types in the split BTF start at type ID N+1. Any type in split BTF can
reference base BTF types, but not vice versa. Programmatically construction of
a split BTF on top of a base BTF is supported: one can create an empty split
BTF with btf__new_empty_split() and pass base BTF as an input, or pass raw
binary data to btf__new_split(), or use btf__parse_xxx_split() variants to get
initial set of split types/strings from the ELF file with .BTF section.
String offsets are similarly transparent and are a logical continuation of
base BTF's strings. When building BTF programmatically and adding a new string
(explicitly with btf__add_str() or implicitly through appending new
types/members), string-to-be-added would first be looked up from the base
BTF's string section and re-used if it's there. If not, it will be looked up
and/or added to the split BTF string section. Similarly to type IDs, types in
split BTF can refer to strings from base BTF absolutely transparently (but not
vice versa, of course, because base BTF doesn't "know" about existence of
split BTF).
Internal type index is slightly adjusted to be zero-indexed, ignoring a fake
[0] VOID type. This allows to handle split/base BTF type lookups transparently
by using btf->start_id type ID offset, which is always 1 for base/non-split
BTF and equals btf__get_nr_types(base_btf) + 1 for the split BTF.
BTF deduplication is not yet supported for split BTF and support for it will
be added in separate patch.
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Song Liu <songliubraving@fb.com>
Link: https://lore.kernel.org/bpf/20201105043402.2530976-5-andrii@kernel.org
2020-11-05 12:33:54 +08:00
|
|
|
|
|
|
|
LIBBPF_0.3.0 {
|
|
|
|
global:
|
2020-12-02 14:52:42 +08:00
|
|
|
btf__base_btf;
|
libbpf: Implement basic split BTF support
Support split BTF operation, in which one BTF (base BTF) provides basic set of
types and strings, while another one (split BTF) builds on top of base's types
and strings and adds its own new types and strings. From API standpoint, the
fact that the split BTF is built on top of the base BTF is transparent.
Type numeration is transparent. If the base BTF had last type ID #N, then all
types in the split BTF start at type ID N+1. Any type in split BTF can
reference base BTF types, but not vice versa. Programmatically construction of
a split BTF on top of a base BTF is supported: one can create an empty split
BTF with btf__new_empty_split() and pass base BTF as an input, or pass raw
binary data to btf__new_split(), or use btf__parse_xxx_split() variants to get
initial set of split types/strings from the ELF file with .BTF section.
String offsets are similarly transparent and are a logical continuation of
base BTF's strings. When building BTF programmatically and adding a new string
(explicitly with btf__add_str() or implicitly through appending new
types/members), string-to-be-added would first be looked up from the base
BTF's string section and re-used if it's there. If not, it will be looked up
and/or added to the split BTF string section. Similarly to type IDs, types in
split BTF can refer to strings from base BTF absolutely transparently (but not
vice versa, of course, because base BTF doesn't "know" about existence of
split BTF).
Internal type index is slightly adjusted to be zero-indexed, ignoring a fake
[0] VOID type. This allows to handle split/base BTF type lookups transparently
by using btf->start_id type ID offset, which is always 1 for base/non-split
BTF and equals btf__get_nr_types(base_btf) + 1 for the split BTF.
BTF deduplication is not yet supported for split BTF and support for it will
be added in separate patch.
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Song Liu <songliubraving@fb.com>
Link: https://lore.kernel.org/bpf/20201105043402.2530976-5-andrii@kernel.org
2020-11-05 12:33:54 +08:00
|
|
|
btf__parse_elf_split;
|
|
|
|
btf__parse_raw_split;
|
|
|
|
btf__parse_split;
|
|
|
|
btf__new_empty_split;
|
|
|
|
btf__new_split;
|
2020-12-14 19:38:12 +08:00
|
|
|
ring_buffer__epoll_fd;
|
2020-12-03 17:05:45 +08:00
|
|
|
xsk_setup_xdp_prog;
|
|
|
|
xsk_socket__update_xskmap;
|
libbpf: Implement basic split BTF support
Support split BTF operation, in which one BTF (base BTF) provides basic set of
types and strings, while another one (split BTF) builds on top of base's types
and strings and adds its own new types and strings. From API standpoint, the
fact that the split BTF is built on top of the base BTF is transparent.
Type numeration is transparent. If the base BTF had last type ID #N, then all
types in the split BTF start at type ID N+1. Any type in split BTF can
reference base BTF types, but not vice versa. Programmatically construction of
a split BTF on top of a base BTF is supported: one can create an empty split
BTF with btf__new_empty_split() and pass base BTF as an input, or pass raw
binary data to btf__new_split(), or use btf__parse_xxx_split() variants to get
initial set of split types/strings from the ELF file with .BTF section.
String offsets are similarly transparent and are a logical continuation of
base BTF's strings. When building BTF programmatically and adding a new string
(explicitly with btf__add_str() or implicitly through appending new
types/members), string-to-be-added would first be looked up from the base
BTF's string section and re-used if it's there. If not, it will be looked up
and/or added to the split BTF string section. Similarly to type IDs, types in
split BTF can refer to strings from base BTF absolutely transparently (but not
vice versa, of course, because base BTF doesn't "know" about existence of
split BTF).
Internal type index is slightly adjusted to be zero-indexed, ignoring a fake
[0] VOID type. This allows to handle split/base BTF type lookups transparently
by using btf->start_id type ID offset, which is always 1 for base/non-split
BTF and equals btf__get_nr_types(base_btf) + 1 for the split BTF.
BTF deduplication is not yet supported for split BTF and support for it will
be added in separate patch.
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Song Liu <songliubraving@fb.com>
Link: https://lore.kernel.org/bpf/20201105043402.2530976-5-andrii@kernel.org
2020-11-05 12:33:54 +08:00
|
|
|
} LIBBPF_0.2.0;
|
2021-02-27 04:22:49 +08:00
|
|
|
|
|
|
|
LIBBPF_0.4.0 {
|
|
|
|
global:
|
|
|
|
btf__add_float;
|
2021-03-19 03:40:29 +08:00
|
|
|
btf__add_type;
|
libbpf: Add BPF static linker APIs
Introduce BPF static linker APIs to libbpf. BPF static linker allows to
perform static linking of multiple BPF object files into a single combined
resulting object file, preserving all the BPF programs, maps, global
variables, etc.
Data sections (.bss, .data, .rodata, .maps, maps, etc) with the same name are
concatenated together. Similarly, code sections are also concatenated. All the
symbols and ELF relocations are also concatenated in their respective ELF
sections and are adjusted accordingly to the new object file layout.
Static variables and functions are handled correctly as well, adjusting BPF
instructions offsets to reflect new variable/function offset within the
combined ELF section. Such relocations are referencing STT_SECTION symbols and
that stays intact.
Data sections in different files can have different alignment requirements, so
that is taken care of as well, adjusting sizes and offsets as necessary to
satisfy both old and new alignment requirements.
DWARF data sections are stripped out, currently. As well as LLLVM_ADDRSIG
section, which is ignored by libbpf in bpf_object__open() anyways. So, in
a way, BPF static linker is an analogue to `llvm-strip -g`, which is a pretty
nice property, especially if resulting .o file is then used to generate BPF
skeleton.
Original string sections are ignored and instead we construct our own set of
unique strings using libbpf-internal `struct strset` API.
To reduce the size of the patch, all the .BTF and .BTF.ext processing was
moved into a separate patch.
The high-level API consists of just 4 functions:
- bpf_linker__new() creates an instance of BPF static linker. It accepts
output filename and (currently empty) options struct;
- bpf_linker__add_file() takes input filename and appends it to the already
processed ELF data; it can be called multiple times, one for each BPF
ELF object file that needs to be linked in;
- bpf_linker__finalize() needs to be called to dump final ELF contents into
the output file, specified when bpf_linker was created; after
bpf_linker__finalize() is called, no more bpf_linker__add_file() and
bpf_linker__finalize() calls are allowed, they will return error;
- regardless of whether bpf_linker__finalize() was called or not,
bpf_linker__free() will free up all the used resources.
Currently, BPF static linker doesn't resolve cross-object file references
(extern variables and/or functions). This will be added in the follow up patch
set.
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20210318194036.3521577-7-andrii@kernel.org
2021-03-19 03:40:30 +08:00
|
|
|
bpf_linker__add_file;
|
|
|
|
bpf_linker__finalize;
|
|
|
|
bpf_linker__free;
|
|
|
|
bpf_linker__new;
|
2021-04-08 14:13:08 +08:00
|
|
|
bpf_map__inner_map;
|
2021-03-23 12:09:52 +08:00
|
|
|
bpf_object__set_kversion;
|
2021-02-27 04:22:49 +08:00
|
|
|
} LIBBPF_0.3.0;
|