2018-11-24 08:44:33 +08:00
|
|
|
LIBBPF_0.0.1 {
|
|
|
|
global:
|
|
|
|
bpf_btf_get_fd_by_id;
|
|
|
|
bpf_create_map;
|
|
|
|
bpf_create_map_in_map;
|
|
|
|
bpf_create_map_in_map_node;
|
|
|
|
bpf_create_map_name;
|
|
|
|
bpf_create_map_node;
|
|
|
|
bpf_create_map_xattr;
|
|
|
|
bpf_load_btf;
|
|
|
|
bpf_load_program;
|
|
|
|
bpf_load_program_xattr;
|
|
|
|
bpf_map__btf_key_type_id;
|
|
|
|
bpf_map__btf_value_type_id;
|
|
|
|
bpf_map__def;
|
|
|
|
bpf_map__fd;
|
|
|
|
bpf_map__is_offload_neutral;
|
|
|
|
bpf_map__name;
|
|
|
|
bpf_map__next;
|
|
|
|
bpf_map__pin;
|
|
|
|
bpf_map__prev;
|
|
|
|
bpf_map__priv;
|
|
|
|
bpf_map__reuse_fd;
|
|
|
|
bpf_map__set_ifindex;
|
|
|
|
bpf_map__set_inner_map_fd;
|
|
|
|
bpf_map__set_priv;
|
|
|
|
bpf_map__unpin;
|
|
|
|
bpf_map_delete_elem;
|
|
|
|
bpf_map_get_fd_by_id;
|
|
|
|
bpf_map_get_next_id;
|
|
|
|
bpf_map_get_next_key;
|
|
|
|
bpf_map_lookup_and_delete_elem;
|
|
|
|
bpf_map_lookup_elem;
|
|
|
|
bpf_map_update_elem;
|
|
|
|
bpf_obj_get;
|
|
|
|
bpf_obj_get_info_by_fd;
|
|
|
|
bpf_obj_pin;
|
|
|
|
bpf_object__btf_fd;
|
|
|
|
bpf_object__close;
|
|
|
|
bpf_object__find_map_by_name;
|
|
|
|
bpf_object__find_map_by_offset;
|
|
|
|
bpf_object__find_program_by_title;
|
|
|
|
bpf_object__kversion;
|
|
|
|
bpf_object__load;
|
|
|
|
bpf_object__name;
|
|
|
|
bpf_object__next;
|
|
|
|
bpf_object__open;
|
|
|
|
bpf_object__open_buffer;
|
|
|
|
bpf_object__open_xattr;
|
|
|
|
bpf_object__pin;
|
|
|
|
bpf_object__pin_maps;
|
|
|
|
bpf_object__pin_programs;
|
|
|
|
bpf_object__priv;
|
|
|
|
bpf_object__set_priv;
|
|
|
|
bpf_object__unload;
|
|
|
|
bpf_object__unpin_maps;
|
|
|
|
bpf_object__unpin_programs;
|
|
|
|
bpf_perf_event_read_simple;
|
|
|
|
bpf_prog_attach;
|
|
|
|
bpf_prog_detach;
|
|
|
|
bpf_prog_detach2;
|
|
|
|
bpf_prog_get_fd_by_id;
|
|
|
|
bpf_prog_get_next_id;
|
|
|
|
bpf_prog_load;
|
|
|
|
bpf_prog_load_xattr;
|
|
|
|
bpf_prog_query;
|
|
|
|
bpf_prog_test_run;
|
2018-12-03 19:31:25 +08:00
|
|
|
bpf_prog_test_run_xattr;
|
2018-11-24 08:44:33 +08:00
|
|
|
bpf_program__fd;
|
|
|
|
bpf_program__is_kprobe;
|
|
|
|
bpf_program__is_perf_event;
|
|
|
|
bpf_program__is_raw_tracepoint;
|
|
|
|
bpf_program__is_sched_act;
|
|
|
|
bpf_program__is_sched_cls;
|
|
|
|
bpf_program__is_socket_filter;
|
|
|
|
bpf_program__is_tracepoint;
|
|
|
|
bpf_program__is_xdp;
|
|
|
|
bpf_program__load;
|
|
|
|
bpf_program__next;
|
|
|
|
bpf_program__nth_fd;
|
|
|
|
bpf_program__pin;
|
|
|
|
bpf_program__pin_instance;
|
|
|
|
bpf_program__prev;
|
|
|
|
bpf_program__priv;
|
|
|
|
bpf_program__set_expected_attach_type;
|
|
|
|
bpf_program__set_ifindex;
|
|
|
|
bpf_program__set_kprobe;
|
|
|
|
bpf_program__set_perf_event;
|
|
|
|
bpf_program__set_prep;
|
|
|
|
bpf_program__set_priv;
|
|
|
|
bpf_program__set_raw_tracepoint;
|
|
|
|
bpf_program__set_sched_act;
|
|
|
|
bpf_program__set_sched_cls;
|
|
|
|
bpf_program__set_socket_filter;
|
|
|
|
bpf_program__set_tracepoint;
|
|
|
|
bpf_program__set_type;
|
|
|
|
bpf_program__set_xdp;
|
|
|
|
bpf_program__title;
|
|
|
|
bpf_program__unload;
|
|
|
|
bpf_program__unpin;
|
|
|
|
bpf_program__unpin_instance;
|
bpf: libbpf: bpftool: Print bpf_line_info during prog dump
This patch adds print bpf_line_info function in 'prog dump jitted'
and 'prog dump xlated':
[root@arch-fb-vm1 bpf]# ~/devshare/fb-kernel/linux/tools/bpf/bpftool/bpftool prog dump jited pinned /sys/fs/bpf/test_btf_haskv
[...]
int test_long_fname_2(struct dummy_tracepoint_args * arg):
bpf_prog_44a040bf25481309_test_long_fname_2:
; static int test_long_fname_2(struct dummy_tracepoint_args *arg)
0: push %rbp
1: mov %rsp,%rbp
4: sub $0x30,%rsp
b: sub $0x28,%rbp
f: mov %rbx,0x0(%rbp)
13: mov %r13,0x8(%rbp)
17: mov %r14,0x10(%rbp)
1b: mov %r15,0x18(%rbp)
1f: xor %eax,%eax
21: mov %rax,0x20(%rbp)
25: xor %esi,%esi
; int key = 0;
27: mov %esi,-0x4(%rbp)
; if (!arg->sock)
2a: mov 0x8(%rdi),%rdi
; if (!arg->sock)
2e: cmp $0x0,%rdi
32: je 0x0000000000000070
34: mov %rbp,%rsi
; counts = bpf_map_lookup_elem(&btf_map, &key);
37: add $0xfffffffffffffffc,%rsi
3b: movabs $0xffff8881139d7480,%rdi
45: add $0x110,%rdi
4c: mov 0x0(%rsi),%eax
4f: cmp $0x4,%rax
53: jae 0x000000000000005e
55: shl $0x3,%rax
59: add %rdi,%rax
5c: jmp 0x0000000000000060
5e: xor %eax,%eax
; if (!counts)
60: cmp $0x0,%rax
64: je 0x0000000000000070
; counts->v6++;
66: mov 0x4(%rax),%edi
69: add $0x1,%rdi
6d: mov %edi,0x4(%rax)
70: mov 0x0(%rbp),%rbx
74: mov 0x8(%rbp),%r13
78: mov 0x10(%rbp),%r14
7c: mov 0x18(%rbp),%r15
80: add $0x28,%rbp
84: leaveq
85: retq
[...]
With linum:
[root@arch-fb-vm1 bpf]# ~/devshare/fb-kernel/linux/tools/bpf/bpftool/bpftool prog dump jited pinned /sys/fs/bpf/test_btf_haskv linum
int _dummy_tracepoint(struct dummy_tracepoint_args * arg):
bpf_prog_b07ccb89267cf242__dummy_tracepoint:
; return test_long_fname_1(arg); [file:/data/users/kafai/fb-kernel/linux/tools/testing/selftests/bpf/test_btf_haskv.c line_num:54 line_col:9]
0: push %rbp
1: mov %rsp,%rbp
4: sub $0x28,%rsp
b: sub $0x28,%rbp
f: mov %rbx,0x0(%rbp)
13: mov %r13,0x8(%rbp)
17: mov %r14,0x10(%rbp)
1b: mov %r15,0x18(%rbp)
1f: xor %eax,%eax
21: mov %rax,0x20(%rbp)
25: callq 0x000000000000851e
; return test_long_fname_1(arg); [file:/data/users/kafai/fb-kernel/linux/tools/testing/selftests/bpf/test_btf_haskv.c line_num:54 line_col:2]
2a: xor %eax,%eax
2c: mov 0x0(%rbp),%rbx
30: mov 0x8(%rbp),%r13
34: mov 0x10(%rbp),%r14
38: mov 0x18(%rbp),%r15
3c: add $0x28,%rbp
40: leaveq
41: retq
[...]
Signed-off-by: Martin KaFai Lau <kafai@fb.com>
Acked-by: Yonghong Song <yhs@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2018-12-08 08:42:32 +08:00
|
|
|
bpf_prog_linfo__free;
|
|
|
|
bpf_prog_linfo__new;
|
|
|
|
bpf_prog_linfo__lfind_addr_func;
|
|
|
|
bpf_prog_linfo__lfind;
|
2018-11-24 08:44:33 +08:00
|
|
|
bpf_raw_tracepoint_open;
|
|
|
|
bpf_set_link_xdp_fd;
|
|
|
|
bpf_task_fd_query;
|
|
|
|
bpf_verify_program;
|
|
|
|
btf__fd;
|
|
|
|
btf__find_by_name;
|
|
|
|
btf__free;
|
|
|
|
btf__get_from_id;
|
|
|
|
btf__name_by_offset;
|
|
|
|
btf__new;
|
|
|
|
btf__resolve_size;
|
|
|
|
btf__resolve_type;
|
|
|
|
btf__type_by_id;
|
|
|
|
libbpf_attach_type_by_name;
|
|
|
|
libbpf_get_error;
|
|
|
|
libbpf_prog_type_by_name;
|
|
|
|
libbpf_set_print;
|
|
|
|
libbpf_strerror;
|
|
|
|
local:
|
|
|
|
*;
|
|
|
|
};
|
2019-01-17 23:27:53 +08:00
|
|
|
|
|
|
|
LIBBPF_0.0.2 {
|
|
|
|
global:
|
tools: bpftool: add probes for eBPF helper functions
Similarly to what was done for program types and map types, add a set of
probes to test the availability of the different eBPF helper functions
on the current system.
For each known program type, all known helpers are tested, in order to
establish a compatibility matrix. Output is provided as a set of lists
of available helpers, one per program type.
Sample output:
# bpftool feature probe kernel
...
Scanning eBPF helper functions...
eBPF helpers supported for program type socket_filter:
- bpf_map_lookup_elem
- bpf_map_update_elem
- bpf_map_delete_elem
...
eBPF helpers supported for program type kprobe:
- bpf_map_lookup_elem
- bpf_map_update_elem
- bpf_map_delete_elem
...
# bpftool --json --pretty feature probe kernel
{
...
"helpers": {
"socket_filter_available_helpers": ["bpf_map_lookup_elem", \
"bpf_map_update_elem","bpf_map_delete_elem", ...
],
"kprobe_available_helpers": ["bpf_map_lookup_elem", \
"bpf_map_update_elem","bpf_map_delete_elem", ...
],
...
}
}
v5:
- In libbpf.map, move global symbol to the new LIBBPF_0.0.2 section.
v4:
- Use "enum bpf_func_id" instead of "__u32" in bpf_probe_helper()
declaration for the type of the argument used to pass the id of
the helper to probe.
- Undef BPF_HELPER_MAKE_ENTRY after using it.
v3:
- Do not pass kernel version from bpftool to libbpf probes (kernel
version for testing program with kprobes is retrieved directly from
libbpf).
- Dump one list of available helpers per program type (instead of one
list of compatible program types per helper).
v2:
- Move probes from bpftool to libbpf.
- Test all program types for each helper, print a list of working prog
types for each helper.
- Fall back on include/uapi/linux/bpf.h for names and ids of helpers.
- Remove C-style macros output from this patch.
Signed-off-by: Quentin Monnet <quentin.monnet@netronome.com>
Reviewed-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Reviewed-by: Stanislav Fomichev <sdf@google.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2019-01-17 23:27:55 +08:00
|
|
|
bpf_probe_helper;
|
2019-01-17 23:27:54 +08:00
|
|
|
bpf_probe_map_type;
|
2019-01-17 23:27:53 +08:00
|
|
|
bpf_probe_prog_type;
|
2019-02-15 07:01:42 +08:00
|
|
|
bpf_map__resize;
|
2019-02-01 07:40:11 +08:00
|
|
|
bpf_map_lookup_elem_flags;
|
2019-02-15 07:01:43 +08:00
|
|
|
bpf_object__btf;
|
2019-02-02 05:42:23 +08:00
|
|
|
bpf_object__find_map_fd_by_name;
|
2019-02-02 05:42:29 +08:00
|
|
|
bpf_get_link_xdp_id;
|
2019-02-05 09:29:45 +08:00
|
|
|
btf__dedup;
|
2019-02-05 03:00:58 +08:00
|
|
|
btf__get_map_kv_tids;
|
2019-02-05 09:29:46 +08:00
|
|
|
btf__get_nr_types;
|
2019-02-09 03:19:37 +08:00
|
|
|
btf__get_raw_data;
|
2019-02-09 03:19:36 +08:00
|
|
|
btf__load;
|
2019-02-05 03:00:57 +08:00
|
|
|
btf_ext__free;
|
|
|
|
btf_ext__func_info_rec_size;
|
2019-02-09 03:19:38 +08:00
|
|
|
btf_ext__get_raw_data;
|
2019-02-05 03:00:57 +08:00
|
|
|
btf_ext__line_info_rec_size;
|
|
|
|
btf_ext__new;
|
|
|
|
btf_ext__reloc_func_info;
|
|
|
|
btf_ext__reloc_line_info;
|
2019-02-21 17:21:26 +08:00
|
|
|
xsk_umem__create;
|
|
|
|
xsk_socket__create;
|
|
|
|
xsk_umem__delete;
|
|
|
|
xsk_socket__delete;
|
|
|
|
xsk_umem__fd;
|
|
|
|
xsk_socket__fd;
|
2019-03-12 13:30:38 +08:00
|
|
|
bpf_program__get_prog_info_linear;
|
|
|
|
bpf_program__bpil_addr_to_offs;
|
|
|
|
bpf_program__bpil_offs_to_addr;
|
2019-01-17 23:27:53 +08:00
|
|
|
} LIBBPF_0.0.1;
|
bpf, libbpf: support global data/bss/rodata sections
This work adds BPF loader support for global data sections
to libbpf. This allows to write BPF programs in more natural
C-like way by being able to define global variables and const
data.
Back at LPC 2018 [0] we presented a first prototype which
implemented support for global data sections by extending BPF
syscall where union bpf_attr would get additional memory/size
pair for each section passed during prog load in order to later
add this base address into the ldimm64 instruction along with
the user provided offset when accessing a variable. Consensus
from LPC was that for proper upstream support, it would be
more desirable to use maps instead of bpf_attr extension as
this would allow for introspection of these sections as well
as potential live updates of their content. This work follows
this path by taking the following steps from loader side:
1) In bpf_object__elf_collect() step we pick up ".data",
".rodata", and ".bss" section information.
2) If present, in bpf_object__init_internal_map() we add
maps to the obj's map array that corresponds to each
of the present sections. Given section size and access
properties can differ, a single entry array map is
created with value size that is corresponding to the
ELF section size of .data, .bss or .rodata. These
internal maps are integrated into the normal map
handling of libbpf such that when user traverses all
obj maps, they can be differentiated from user-created
ones via bpf_map__is_internal(). In later steps when
we actually create these maps in the kernel via
bpf_object__create_maps(), then for .data and .rodata
sections their content is copied into the map through
bpf_map_update_elem(). For .bss this is not necessary
since array map is already zero-initialized by default.
Additionally, for .rodata the map is frozen as read-only
after setup, such that neither from program nor syscall
side writes would be possible.
3) In bpf_program__collect_reloc() step, we record the
corresponding map, insn index, and relocation type for
the global data.
4) And last but not least in the actual relocation step in
bpf_program__relocate(), we mark the ldimm64 instruction
with src_reg = BPF_PSEUDO_MAP_VALUE where in the first
imm field the map's file descriptor is stored as similarly
done as in BPF_PSEUDO_MAP_FD, and in the second imm field
(as ldimm64 is 2-insn wide) we store the access offset
into the section. Given these maps have only single element
ldimm64's off remains zero in both parts.
5) On kernel side, this special marked BPF_PSEUDO_MAP_VALUE
load will then store the actual target address in order
to have a 'map-lookup'-free access. That is, the actual
map value base address + offset. The destination register
in the verifier will then be marked as PTR_TO_MAP_VALUE,
containing the fixed offset as reg->off and backing BPF
map as reg->map_ptr. Meaning, it's treated as any other
normal map value from verification side, only with
efficient, direct value access instead of actual call to
map lookup helper as in the typical case.
Currently, only support for static global variables has been
added, and libbpf rejects non-static global variables from
loading. This can be lifted until we have proper semantics
for how BPF will treat multi-object BPF loads. From BTF side,
libbpf will set the value type id of the types corresponding
to the ".bss", ".data" and ".rodata" names which LLVM will
emit without the object name prefix. The key type will be
left as zero, thus making use of the key-less BTF option in
array maps.
Simple example dump of program using globals vars in each
section:
# bpftool prog
[...]
6784: sched_cls name load_static_dat tag a7e1291567277844 gpl
loaded_at 2019-03-11T15:39:34+0000 uid 0
xlated 1776B jited 993B memlock 4096B map_ids 2238,2237,2235,2236,2239,2240
# bpftool map show id 2237
2237: array name test_glo.bss flags 0x0
key 4B value 64B max_entries 1 memlock 4096B
# bpftool map show id 2235
2235: array name test_glo.data flags 0x0
key 4B value 64B max_entries 1 memlock 4096B
# bpftool map show id 2236
2236: array name test_glo.rodata flags 0x80
key 4B value 96B max_entries 1 memlock 4096B
# bpftool prog dump xlated id 6784
int load_static_data(struct __sk_buff * skb):
; int load_static_data(struct __sk_buff *skb)
0: (b7) r6 = 0
; test_reloc(number, 0, &num0);
1: (63) *(u32 *)(r10 -4) = r6
2: (bf) r2 = r10
; int load_static_data(struct __sk_buff *skb)
3: (07) r2 += -4
; test_reloc(number, 0, &num0);
4: (18) r1 = map[id:2238]
6: (18) r3 = map[id:2237][0]+0 <-- direct addr in .bss area
8: (b7) r4 = 0
9: (85) call array_map_update_elem#100464
10: (b7) r1 = 1
; test_reloc(number, 1, &num1);
[...]
; test_reloc(string, 2, str2);
120: (18) r8 = map[id:2237][0]+16 <-- same here at offset +16
122: (18) r1 = map[id:2239]
124: (18) r3 = map[id:2237][0]+16
126: (b7) r4 = 0
127: (85) call array_map_update_elem#100464
128: (b7) r1 = 120
; str1[5] = 'x';
129: (73) *(u8 *)(r9 +5) = r1
; test_reloc(string, 3, str1);
130: (b7) r1 = 3
131: (63) *(u32 *)(r10 -4) = r1
132: (b7) r9 = 3
133: (bf) r2 = r10
; int load_static_data(struct __sk_buff *skb)
134: (07) r2 += -4
; test_reloc(string, 3, str1);
135: (18) r1 = map[id:2239]
137: (18) r3 = map[id:2235][0]+16 <-- direct addr in .data area
139: (b7) r4 = 0
140: (85) call array_map_update_elem#100464
141: (b7) r1 = 111
; __builtin_memcpy(&str2[2], "hello", sizeof("hello"));
142: (73) *(u8 *)(r8 +6) = r1 <-- further access based on .bss data
143: (b7) r1 = 108
144: (73) *(u8 *)(r8 +5) = r1
[...]
For Cilium use-case in particular, this enables migrating configuration
constants from Cilium daemon's generated header defines into global
data sections such that expensive runtime recompilations with LLVM can
be avoided altogether. Instead, the ELF file becomes effectively a
"template", meaning, it is compiled only once (!) and the Cilium daemon
will then rewrite relevant configuration data from the ELF's .data or
.rodata sections directly instead of recompiling the program. The
updated ELF is then loaded into the kernel and atomically replaces
the existing program in the networking datapath. More info in [0].
Based upon recent fix in LLVM, commit c0db6b6bd444 ("[BPF] Don't fail
for static variables").
[0] LPC 2018, BPF track, "ELF relocation for static data in BPF",
http://vger.kernel.org/lpc-bpf2018.html#session-3
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Andrii Nakryiko <andriin@fb.com>
Acked-by: Martin KaFai Lau <kafai@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2019-04-10 05:20:13 +08:00
|
|
|
|
|
|
|
LIBBPF_0.0.3 {
|
|
|
|
global:
|
|
|
|
bpf_map__is_internal;
|
|
|
|
bpf_map_freeze;
|
2019-04-10 05:20:14 +08:00
|
|
|
btf__finalize_data;
|
bpf, libbpf: support global data/bss/rodata sections
This work adds BPF loader support for global data sections
to libbpf. This allows to write BPF programs in more natural
C-like way by being able to define global variables and const
data.
Back at LPC 2018 [0] we presented a first prototype which
implemented support for global data sections by extending BPF
syscall where union bpf_attr would get additional memory/size
pair for each section passed during prog load in order to later
add this base address into the ldimm64 instruction along with
the user provided offset when accessing a variable. Consensus
from LPC was that for proper upstream support, it would be
more desirable to use maps instead of bpf_attr extension as
this would allow for introspection of these sections as well
as potential live updates of their content. This work follows
this path by taking the following steps from loader side:
1) In bpf_object__elf_collect() step we pick up ".data",
".rodata", and ".bss" section information.
2) If present, in bpf_object__init_internal_map() we add
maps to the obj's map array that corresponds to each
of the present sections. Given section size and access
properties can differ, a single entry array map is
created with value size that is corresponding to the
ELF section size of .data, .bss or .rodata. These
internal maps are integrated into the normal map
handling of libbpf such that when user traverses all
obj maps, they can be differentiated from user-created
ones via bpf_map__is_internal(). In later steps when
we actually create these maps in the kernel via
bpf_object__create_maps(), then for .data and .rodata
sections their content is copied into the map through
bpf_map_update_elem(). For .bss this is not necessary
since array map is already zero-initialized by default.
Additionally, for .rodata the map is frozen as read-only
after setup, such that neither from program nor syscall
side writes would be possible.
3) In bpf_program__collect_reloc() step, we record the
corresponding map, insn index, and relocation type for
the global data.
4) And last but not least in the actual relocation step in
bpf_program__relocate(), we mark the ldimm64 instruction
with src_reg = BPF_PSEUDO_MAP_VALUE where in the first
imm field the map's file descriptor is stored as similarly
done as in BPF_PSEUDO_MAP_FD, and in the second imm field
(as ldimm64 is 2-insn wide) we store the access offset
into the section. Given these maps have only single element
ldimm64's off remains zero in both parts.
5) On kernel side, this special marked BPF_PSEUDO_MAP_VALUE
load will then store the actual target address in order
to have a 'map-lookup'-free access. That is, the actual
map value base address + offset. The destination register
in the verifier will then be marked as PTR_TO_MAP_VALUE,
containing the fixed offset as reg->off and backing BPF
map as reg->map_ptr. Meaning, it's treated as any other
normal map value from verification side, only with
efficient, direct value access instead of actual call to
map lookup helper as in the typical case.
Currently, only support for static global variables has been
added, and libbpf rejects non-static global variables from
loading. This can be lifted until we have proper semantics
for how BPF will treat multi-object BPF loads. From BTF side,
libbpf will set the value type id of the types corresponding
to the ".bss", ".data" and ".rodata" names which LLVM will
emit without the object name prefix. The key type will be
left as zero, thus making use of the key-less BTF option in
array maps.
Simple example dump of program using globals vars in each
section:
# bpftool prog
[...]
6784: sched_cls name load_static_dat tag a7e1291567277844 gpl
loaded_at 2019-03-11T15:39:34+0000 uid 0
xlated 1776B jited 993B memlock 4096B map_ids 2238,2237,2235,2236,2239,2240
# bpftool map show id 2237
2237: array name test_glo.bss flags 0x0
key 4B value 64B max_entries 1 memlock 4096B
# bpftool map show id 2235
2235: array name test_glo.data flags 0x0
key 4B value 64B max_entries 1 memlock 4096B
# bpftool map show id 2236
2236: array name test_glo.rodata flags 0x80
key 4B value 96B max_entries 1 memlock 4096B
# bpftool prog dump xlated id 6784
int load_static_data(struct __sk_buff * skb):
; int load_static_data(struct __sk_buff *skb)
0: (b7) r6 = 0
; test_reloc(number, 0, &num0);
1: (63) *(u32 *)(r10 -4) = r6
2: (bf) r2 = r10
; int load_static_data(struct __sk_buff *skb)
3: (07) r2 += -4
; test_reloc(number, 0, &num0);
4: (18) r1 = map[id:2238]
6: (18) r3 = map[id:2237][0]+0 <-- direct addr in .bss area
8: (b7) r4 = 0
9: (85) call array_map_update_elem#100464
10: (b7) r1 = 1
; test_reloc(number, 1, &num1);
[...]
; test_reloc(string, 2, str2);
120: (18) r8 = map[id:2237][0]+16 <-- same here at offset +16
122: (18) r1 = map[id:2239]
124: (18) r3 = map[id:2237][0]+16
126: (b7) r4 = 0
127: (85) call array_map_update_elem#100464
128: (b7) r1 = 120
; str1[5] = 'x';
129: (73) *(u8 *)(r9 +5) = r1
; test_reloc(string, 3, str1);
130: (b7) r1 = 3
131: (63) *(u32 *)(r10 -4) = r1
132: (b7) r9 = 3
133: (bf) r2 = r10
; int load_static_data(struct __sk_buff *skb)
134: (07) r2 += -4
; test_reloc(string, 3, str1);
135: (18) r1 = map[id:2239]
137: (18) r3 = map[id:2235][0]+16 <-- direct addr in .data area
139: (b7) r4 = 0
140: (85) call array_map_update_elem#100464
141: (b7) r1 = 111
; __builtin_memcpy(&str2[2], "hello", sizeof("hello"));
142: (73) *(u8 *)(r8 +6) = r1 <-- further access based on .bss data
143: (b7) r1 = 108
144: (73) *(u8 *)(r8 +5) = r1
[...]
For Cilium use-case in particular, this enables migrating configuration
constants from Cilium daemon's generated header defines into global
data sections such that expensive runtime recompilations with LLVM can
be avoided altogether. Instead, the ELF file becomes effectively a
"template", meaning, it is compiled only once (!) and the Cilium daemon
will then rewrite relevant configuration data from the ELF's .data or
.rodata sections directly instead of recompiling the program. The
updated ELF is then loaded into the kernel and atomically replaces
the existing program in the networking datapath. More info in [0].
Based upon recent fix in LLVM, commit c0db6b6bd444 ("[BPF] Don't fail
for static variables").
[0] LPC 2018, BPF track, "ELF relocation for static data in BPF",
http://vger.kernel.org/lpc-bpf2018.html#session-3
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Andrii Nakryiko <andriin@fb.com>
Acked-by: Martin KaFai Lau <kafai@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2019-04-10 05:20:13 +08:00
|
|
|
} LIBBPF_0.0.2;
|
2019-05-25 02:58:57 +08:00
|
|
|
|
|
|
|
LIBBPF_0.0.4 {
|
|
|
|
global:
|
2019-07-02 07:58:56 +08:00
|
|
|
bpf_link__destroy;
|
|
|
|
bpf_object__load_xattr;
|
2019-07-02 07:58:58 +08:00
|
|
|
bpf_program__attach_kprobe;
|
2019-07-02 07:58:57 +08:00
|
|
|
bpf_program__attach_perf_event;
|
2019-07-02 07:59:00 +08:00
|
|
|
bpf_program__attach_raw_tracepoint;
|
2019-07-02 07:58:59 +08:00
|
|
|
bpf_program__attach_tracepoint;
|
2019-07-02 07:58:58 +08:00
|
|
|
bpf_program__attach_uprobe;
|
2019-05-25 02:59:03 +08:00
|
|
|
btf_dump__dump_type;
|
|
|
|
btf_dump__free;
|
|
|
|
btf_dump__new;
|
2019-05-25 02:58:57 +08:00
|
|
|
btf__parse_elf;
|
2019-06-11 08:56:50 +08:00
|
|
|
libbpf_num_possible_cpus;
|
2019-07-07 02:06:24 +08:00
|
|
|
perf_buffer__free;
|
|
|
|
perf_buffer__new;
|
|
|
|
perf_buffer__new_raw;
|
|
|
|
perf_buffer__poll;
|
2019-08-27 10:25:27 +08:00
|
|
|
xsk_umem__create;
|
2019-05-25 02:58:57 +08:00
|
|
|
} LIBBPF_0.0.3;
|
2019-08-15 04:05:48 +08:00
|
|
|
|
|
|
|
LIBBPF_0.0.5 {
|
2019-08-20 17:31:53 +08:00
|
|
|
global:
|
|
|
|
bpf_btf_get_next_id;
|
2019-08-15 04:05:48 +08:00
|
|
|
} LIBBPF_0.0.4;
|
2019-10-01 06:25:03 +08:00
|
|
|
|
|
|
|
LIBBPF_0.0.6 {
|
libbpf: add bpf_object__open_{file, mem} w/ extensible opts
Add new set of bpf_object__open APIs using new approach to optional
parameters extensibility allowing simpler ABI compatibility approach.
This patch demonstrates an approach to implementing libbpf APIs that
makes it easy to extend existing APIs with extra optional parameters in
such a way, that ABI compatibility is preserved without having to do
symbol versioning and generating lots of boilerplate code to handle it.
To facilitate succinct code for working with options, add OPTS_VALID,
OPTS_HAS, and OPTS_GET macros that hide all the NULL, size, and zero
checks.
Additionally, newly added libbpf APIs are encouraged to follow similar
pattern of having all mandatory parameters as formal function parameters
and always have optional (NULL-able) xxx_opts struct, which should
always have real struct size as a first field and the rest would be
optional parameters added over time, which tune the behavior of existing
API, if specified by user.
Signed-off-by: Andrii Nakryiko <andriin@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2019-10-05 06:40:35 +08:00
|
|
|
global:
|
2019-11-10 04:37:31 +08:00
|
|
|
bpf_get_link_xdp_info;
|
2019-11-02 19:09:38 +08:00
|
|
|
bpf_map__get_pin_path;
|
|
|
|
bpf_map__is_pinned;
|
|
|
|
bpf_map__set_pin_path;
|
libbpf: add bpf_object__open_{file, mem} w/ extensible opts
Add new set of bpf_object__open APIs using new approach to optional
parameters extensibility allowing simpler ABI compatibility approach.
This patch demonstrates an approach to implementing libbpf APIs that
makes it easy to extend existing APIs with extra optional parameters in
such a way, that ABI compatibility is preserved without having to do
symbol versioning and generating lots of boilerplate code to handle it.
To facilitate succinct code for working with options, add OPTS_VALID,
OPTS_HAS, and OPTS_GET macros that hide all the NULL, size, and zero
checks.
Additionally, newly added libbpf APIs are encouraged to follow similar
pattern of having all mandatory parameters as formal function parameters
and always have optional (NULL-able) xxx_opts struct, which should
always have real struct size as a first field and the rest would be
optional parameters added over time, which tune the behavior of existing
API, if specified by user.
Signed-off-by: Andrii Nakryiko <andriin@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2019-10-05 06:40:35 +08:00
|
|
|
bpf_object__open_file;
|
|
|
|
bpf_object__open_mem;
|
2019-11-15 02:57:06 +08:00
|
|
|
bpf_program__attach_trace;
|
2019-10-21 11:38:57 +08:00
|
|
|
bpf_program__get_expected_attach_type;
|
|
|
|
bpf_program__get_type;
|
2019-10-31 06:32:12 +08:00
|
|
|
bpf_program__is_tracing;
|
|
|
|
bpf_program__set_tracing;
|
2019-11-10 04:37:32 +08:00
|
|
|
bpf_program__size;
|
2019-11-15 02:57:05 +08:00
|
|
|
btf__find_by_name_kind;
|
2019-11-15 02:57:06 +08:00
|
|
|
libbpf_find_vmlinux_btf_id;
|
2019-10-01 06:25:03 +08:00
|
|
|
} LIBBPF_0.0.5;
|
2019-12-10 06:40:22 +08:00
|
|
|
|
|
|
|
LIBBPF_0.0.7 {
|
2019-12-14 09:43:26 +08:00
|
|
|
global:
|
|
|
|
bpf_program__attach;
|
2019-12-10 06:40:22 +08:00
|
|
|
} LIBBPF_0.0.6;
|