OpenCloudOS-Kernel/tools/lib/bpf/libbpf.map

370 lines
7.8 KiB
Plaintext
Raw Normal View History

LIBBPF_0.0.1 {
global:
bpf_btf_get_fd_by_id;
bpf_create_map;
bpf_create_map_in_map;
bpf_create_map_in_map_node;
bpf_create_map_name;
bpf_create_map_node;
bpf_create_map_xattr;
bpf_load_btf;
bpf_load_program;
bpf_load_program_xattr;
bpf_map__btf_key_type_id;
bpf_map__btf_value_type_id;
bpf_map__def;
bpf_map__fd;
bpf_map__is_offload_neutral;
bpf_map__name;
bpf_map__next;
bpf_map__pin;
bpf_map__prev;
bpf_map__priv;
bpf_map__reuse_fd;
bpf_map__set_ifindex;
bpf_map__set_inner_map_fd;
bpf_map__set_priv;
bpf_map__unpin;
bpf_map_delete_elem;
bpf_map_get_fd_by_id;
bpf_map_get_next_id;
bpf_map_get_next_key;
bpf_map_lookup_and_delete_elem;
bpf_map_lookup_elem;
bpf_map_update_elem;
bpf_obj_get;
bpf_obj_get_info_by_fd;
bpf_obj_pin;
bpf_object__btf_fd;
bpf_object__close;
bpf_object__find_map_by_name;
bpf_object__find_map_by_offset;
bpf_object__find_program_by_title;
bpf_object__kversion;
bpf_object__load;
bpf_object__name;
bpf_object__next;
bpf_object__open;
bpf_object__open_buffer;
bpf_object__open_xattr;
bpf_object__pin;
bpf_object__pin_maps;
bpf_object__pin_programs;
bpf_object__priv;
bpf_object__set_priv;
bpf_object__unload;
bpf_object__unpin_maps;
bpf_object__unpin_programs;
bpf_perf_event_read_simple;
bpf_prog_attach;
bpf_prog_detach;
bpf_prog_detach2;
bpf_prog_get_fd_by_id;
bpf_prog_get_next_id;
bpf_prog_load;
bpf_prog_load_xattr;
bpf_prog_query;
bpf_prog_test_run;
bpf_prog_test_run_xattr;
bpf_program__fd;
bpf_program__is_kprobe;
bpf_program__is_perf_event;
bpf_program__is_raw_tracepoint;
bpf_program__is_sched_act;
bpf_program__is_sched_cls;
bpf_program__is_socket_filter;
bpf_program__is_tracepoint;
bpf_program__is_xdp;
bpf_program__load;
bpf_program__next;
bpf_program__nth_fd;
bpf_program__pin;
bpf_program__pin_instance;
bpf_program__prev;
bpf_program__priv;
bpf_program__set_expected_attach_type;
bpf_program__set_ifindex;
bpf_program__set_kprobe;
bpf_program__set_perf_event;
bpf_program__set_prep;
bpf_program__set_priv;
bpf_program__set_raw_tracepoint;
bpf_program__set_sched_act;
bpf_program__set_sched_cls;
bpf_program__set_socket_filter;
bpf_program__set_tracepoint;
bpf_program__set_type;
bpf_program__set_xdp;
bpf_program__title;
bpf_program__unload;
bpf_program__unpin;
bpf_program__unpin_instance;
bpf: libbpf: bpftool: Print bpf_line_info during prog dump This patch adds print bpf_line_info function in 'prog dump jitted' and 'prog dump xlated': [root@arch-fb-vm1 bpf]# ~/devshare/fb-kernel/linux/tools/bpf/bpftool/bpftool prog dump jited pinned /sys/fs/bpf/test_btf_haskv [...] int test_long_fname_2(struct dummy_tracepoint_args * arg): bpf_prog_44a040bf25481309_test_long_fname_2: ; static int test_long_fname_2(struct dummy_tracepoint_args *arg) 0: push %rbp 1: mov %rsp,%rbp 4: sub $0x30,%rsp b: sub $0x28,%rbp f: mov %rbx,0x0(%rbp) 13: mov %r13,0x8(%rbp) 17: mov %r14,0x10(%rbp) 1b: mov %r15,0x18(%rbp) 1f: xor %eax,%eax 21: mov %rax,0x20(%rbp) 25: xor %esi,%esi ; int key = 0; 27: mov %esi,-0x4(%rbp) ; if (!arg->sock) 2a: mov 0x8(%rdi),%rdi ; if (!arg->sock) 2e: cmp $0x0,%rdi 32: je 0x0000000000000070 34: mov %rbp,%rsi ; counts = bpf_map_lookup_elem(&btf_map, &key); 37: add $0xfffffffffffffffc,%rsi 3b: movabs $0xffff8881139d7480,%rdi 45: add $0x110,%rdi 4c: mov 0x0(%rsi),%eax 4f: cmp $0x4,%rax 53: jae 0x000000000000005e 55: shl $0x3,%rax 59: add %rdi,%rax 5c: jmp 0x0000000000000060 5e: xor %eax,%eax ; if (!counts) 60: cmp $0x0,%rax 64: je 0x0000000000000070 ; counts->v6++; 66: mov 0x4(%rax),%edi 69: add $0x1,%rdi 6d: mov %edi,0x4(%rax) 70: mov 0x0(%rbp),%rbx 74: mov 0x8(%rbp),%r13 78: mov 0x10(%rbp),%r14 7c: mov 0x18(%rbp),%r15 80: add $0x28,%rbp 84: leaveq 85: retq [...] With linum: [root@arch-fb-vm1 bpf]# ~/devshare/fb-kernel/linux/tools/bpf/bpftool/bpftool prog dump jited pinned /sys/fs/bpf/test_btf_haskv linum int _dummy_tracepoint(struct dummy_tracepoint_args * arg): bpf_prog_b07ccb89267cf242__dummy_tracepoint: ; return test_long_fname_1(arg); [file:/data/users/kafai/fb-kernel/linux/tools/testing/selftests/bpf/test_btf_haskv.c line_num:54 line_col:9] 0: push %rbp 1: mov %rsp,%rbp 4: sub $0x28,%rsp b: sub $0x28,%rbp f: mov %rbx,0x0(%rbp) 13: mov %r13,0x8(%rbp) 17: mov %r14,0x10(%rbp) 1b: mov %r15,0x18(%rbp) 1f: xor %eax,%eax 21: mov %rax,0x20(%rbp) 25: callq 0x000000000000851e ; return test_long_fname_1(arg); [file:/data/users/kafai/fb-kernel/linux/tools/testing/selftests/bpf/test_btf_haskv.c line_num:54 line_col:2] 2a: xor %eax,%eax 2c: mov 0x0(%rbp),%rbx 30: mov 0x8(%rbp),%r13 34: mov 0x10(%rbp),%r14 38: mov 0x18(%rbp),%r15 3c: add $0x28,%rbp 40: leaveq 41: retq [...] Signed-off-by: Martin KaFai Lau <kafai@fb.com> Acked-by: Yonghong Song <yhs@fb.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2018-12-08 08:42:32 +08:00
bpf_prog_linfo__free;
bpf_prog_linfo__new;
bpf_prog_linfo__lfind_addr_func;
bpf_prog_linfo__lfind;
bpf_raw_tracepoint_open;
bpf_set_link_xdp_fd;
bpf_task_fd_query;
bpf_verify_program;
btf__fd;
btf__find_by_name;
btf__free;
btf__get_from_id;
btf__name_by_offset;
btf__new;
btf__resolve_size;
btf__resolve_type;
btf__type_by_id;
libbpf_attach_type_by_name;
libbpf_get_error;
libbpf_prog_type_by_name;
libbpf_set_print;
libbpf_strerror;
local:
*;
};
tools: bpftool: add probes for eBPF program types Introduce probes for supported BPF program types in libbpf, and call it from bpftool to test what types are available on the system. The probe simply consists in loading a very basic program of that type and see if the verifier complains or not. Sample output: # bpftool feature probe kernel ... Scanning eBPF program types... eBPF program_type socket_filter is available eBPF program_type kprobe is available eBPF program_type sched_cls is available ... # bpftool --json --pretty feature probe kernel { ... "program_types": { "have_socket_filter_prog_type": true, "have_kprobe_prog_type": true, "have_sched_cls_prog_type": true, ... } } v5: - In libbpf.map, move global symbol to a new LIBBPF_0.0.2 section. - Rename (non-API function) prog_load() as probe_load(). v3: - Get kernel version for checking kprobes availability from libbpf instead of from bpftool. Do not pass kernel_version as an argument when calling libbpf probes. - Use a switch with all enum values for setting specific program parameters just before probing, so that gcc complains at compile time (-Wswitch-enum) if new prog types were added to the kernel but libbpf was not updated. - Add a comment in libbpf.h about setrlimit() usage to allow many consecutive probe attempts. v2: - Move probes from bpftool to libbpf. - Remove C-style macros output from this patch. Signed-off-by: Quentin Monnet <quentin.monnet@netronome.com> Reviewed-by: Jakub Kicinski <jakub.kicinski@netronome.com> Reviewed-by: Stanislav Fomichev <sdf@google.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2019-01-17 23:27:53 +08:00
LIBBPF_0.0.2 {
global:
tools: bpftool: add probes for eBPF helper functions Similarly to what was done for program types and map types, add a set of probes to test the availability of the different eBPF helper functions on the current system. For each known program type, all known helpers are tested, in order to establish a compatibility matrix. Output is provided as a set of lists of available helpers, one per program type. Sample output: # bpftool feature probe kernel ... Scanning eBPF helper functions... eBPF helpers supported for program type socket_filter: - bpf_map_lookup_elem - bpf_map_update_elem - bpf_map_delete_elem ... eBPF helpers supported for program type kprobe: - bpf_map_lookup_elem - bpf_map_update_elem - bpf_map_delete_elem ... # bpftool --json --pretty feature probe kernel { ... "helpers": { "socket_filter_available_helpers": ["bpf_map_lookup_elem", \ "bpf_map_update_elem","bpf_map_delete_elem", ... ], "kprobe_available_helpers": ["bpf_map_lookup_elem", \ "bpf_map_update_elem","bpf_map_delete_elem", ... ], ... } } v5: - In libbpf.map, move global symbol to the new LIBBPF_0.0.2 section. v4: - Use "enum bpf_func_id" instead of "__u32" in bpf_probe_helper() declaration for the type of the argument used to pass the id of the helper to probe. - Undef BPF_HELPER_MAKE_ENTRY after using it. v3: - Do not pass kernel version from bpftool to libbpf probes (kernel version for testing program with kprobes is retrieved directly from libbpf). - Dump one list of available helpers per program type (instead of one list of compatible program types per helper). v2: - Move probes from bpftool to libbpf. - Test all program types for each helper, print a list of working prog types for each helper. - Fall back on include/uapi/linux/bpf.h for names and ids of helpers. - Remove C-style macros output from this patch. Signed-off-by: Quentin Monnet <quentin.monnet@netronome.com> Reviewed-by: Jakub Kicinski <jakub.kicinski@netronome.com> Reviewed-by: Stanislav Fomichev <sdf@google.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2019-01-17 23:27:55 +08:00
bpf_probe_helper;
bpf_probe_map_type;
tools: bpftool: add probes for eBPF program types Introduce probes for supported BPF program types in libbpf, and call it from bpftool to test what types are available on the system. The probe simply consists in loading a very basic program of that type and see if the verifier complains or not. Sample output: # bpftool feature probe kernel ... Scanning eBPF program types... eBPF program_type socket_filter is available eBPF program_type kprobe is available eBPF program_type sched_cls is available ... # bpftool --json --pretty feature probe kernel { ... "program_types": { "have_socket_filter_prog_type": true, "have_kprobe_prog_type": true, "have_sched_cls_prog_type": true, ... } } v5: - In libbpf.map, move global symbol to a new LIBBPF_0.0.2 section. - Rename (non-API function) prog_load() as probe_load(). v3: - Get kernel version for checking kprobes availability from libbpf instead of from bpftool. Do not pass kernel_version as an argument when calling libbpf probes. - Use a switch with all enum values for setting specific program parameters just before probing, so that gcc complains at compile time (-Wswitch-enum) if new prog types were added to the kernel but libbpf was not updated. - Add a comment in libbpf.h about setrlimit() usage to allow many consecutive probe attempts. v2: - Move probes from bpftool to libbpf. - Remove C-style macros output from this patch. Signed-off-by: Quentin Monnet <quentin.monnet@netronome.com> Reviewed-by: Jakub Kicinski <jakub.kicinski@netronome.com> Reviewed-by: Stanislav Fomichev <sdf@google.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2019-01-17 23:27:53 +08:00
bpf_probe_prog_type;
bpf_map__resize;
bpf_map_lookup_elem_flags;
bpf_object__btf;
bpf_object__find_map_fd_by_name;
bpf_get_link_xdp_id;
btf__dedup;
btf__get_map_kv_tids;
btf__get_nr_types;
btf__get_raw_data;
btf__load;
btf_ext__free;
btf_ext__func_info_rec_size;
btf_ext__get_raw_data;
btf_ext__line_info_rec_size;
btf_ext__new;
btf_ext__reloc_func_info;
btf_ext__reloc_line_info;
xsk_umem__create;
xsk_socket__create;
xsk_umem__delete;
xsk_socket__delete;
xsk_umem__fd;
xsk_socket__fd;
bpf_program__get_prog_info_linear;
bpf_program__bpil_addr_to_offs;
bpf_program__bpil_offs_to_addr;
tools: bpftool: add probes for eBPF program types Introduce probes for supported BPF program types in libbpf, and call it from bpftool to test what types are available on the system. The probe simply consists in loading a very basic program of that type and see if the verifier complains or not. Sample output: # bpftool feature probe kernel ... Scanning eBPF program types... eBPF program_type socket_filter is available eBPF program_type kprobe is available eBPF program_type sched_cls is available ... # bpftool --json --pretty feature probe kernel { ... "program_types": { "have_socket_filter_prog_type": true, "have_kprobe_prog_type": true, "have_sched_cls_prog_type": true, ... } } v5: - In libbpf.map, move global symbol to a new LIBBPF_0.0.2 section. - Rename (non-API function) prog_load() as probe_load(). v3: - Get kernel version for checking kprobes availability from libbpf instead of from bpftool. Do not pass kernel_version as an argument when calling libbpf probes. - Use a switch with all enum values for setting specific program parameters just before probing, so that gcc complains at compile time (-Wswitch-enum) if new prog types were added to the kernel but libbpf was not updated. - Add a comment in libbpf.h about setrlimit() usage to allow many consecutive probe attempts. v2: - Move probes from bpftool to libbpf. - Remove C-style macros output from this patch. Signed-off-by: Quentin Monnet <quentin.monnet@netronome.com> Reviewed-by: Jakub Kicinski <jakub.kicinski@netronome.com> Reviewed-by: Stanislav Fomichev <sdf@google.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2019-01-17 23:27:53 +08:00
} LIBBPF_0.0.1;
bpf, libbpf: support global data/bss/rodata sections This work adds BPF loader support for global data sections to libbpf. This allows to write BPF programs in more natural C-like way by being able to define global variables and const data. Back at LPC 2018 [0] we presented a first prototype which implemented support for global data sections by extending BPF syscall where union bpf_attr would get additional memory/size pair for each section passed during prog load in order to later add this base address into the ldimm64 instruction along with the user provided offset when accessing a variable. Consensus from LPC was that for proper upstream support, it would be more desirable to use maps instead of bpf_attr extension as this would allow for introspection of these sections as well as potential live updates of their content. This work follows this path by taking the following steps from loader side: 1) In bpf_object__elf_collect() step we pick up ".data", ".rodata", and ".bss" section information. 2) If present, in bpf_object__init_internal_map() we add maps to the obj's map array that corresponds to each of the present sections. Given section size and access properties can differ, a single entry array map is created with value size that is corresponding to the ELF section size of .data, .bss or .rodata. These internal maps are integrated into the normal map handling of libbpf such that when user traverses all obj maps, they can be differentiated from user-created ones via bpf_map__is_internal(). In later steps when we actually create these maps in the kernel via bpf_object__create_maps(), then for .data and .rodata sections their content is copied into the map through bpf_map_update_elem(). For .bss this is not necessary since array map is already zero-initialized by default. Additionally, for .rodata the map is frozen as read-only after setup, such that neither from program nor syscall side writes would be possible. 3) In bpf_program__collect_reloc() step, we record the corresponding map, insn index, and relocation type for the global data. 4) And last but not least in the actual relocation step in bpf_program__relocate(), we mark the ldimm64 instruction with src_reg = BPF_PSEUDO_MAP_VALUE where in the first imm field the map's file descriptor is stored as similarly done as in BPF_PSEUDO_MAP_FD, and in the second imm field (as ldimm64 is 2-insn wide) we store the access offset into the section. Given these maps have only single element ldimm64's off remains zero in both parts. 5) On kernel side, this special marked BPF_PSEUDO_MAP_VALUE load will then store the actual target address in order to have a 'map-lookup'-free access. That is, the actual map value base address + offset. The destination register in the verifier will then be marked as PTR_TO_MAP_VALUE, containing the fixed offset as reg->off and backing BPF map as reg->map_ptr. Meaning, it's treated as any other normal map value from verification side, only with efficient, direct value access instead of actual call to map lookup helper as in the typical case. Currently, only support for static global variables has been added, and libbpf rejects non-static global variables from loading. This can be lifted until we have proper semantics for how BPF will treat multi-object BPF loads. From BTF side, libbpf will set the value type id of the types corresponding to the ".bss", ".data" and ".rodata" names which LLVM will emit without the object name prefix. The key type will be left as zero, thus making use of the key-less BTF option in array maps. Simple example dump of program using globals vars in each section: # bpftool prog [...] 6784: sched_cls name load_static_dat tag a7e1291567277844 gpl loaded_at 2019-03-11T15:39:34+0000 uid 0 xlated 1776B jited 993B memlock 4096B map_ids 2238,2237,2235,2236,2239,2240 # bpftool map show id 2237 2237: array name test_glo.bss flags 0x0 key 4B value 64B max_entries 1 memlock 4096B # bpftool map show id 2235 2235: array name test_glo.data flags 0x0 key 4B value 64B max_entries 1 memlock 4096B # bpftool map show id 2236 2236: array name test_glo.rodata flags 0x80 key 4B value 96B max_entries 1 memlock 4096B # bpftool prog dump xlated id 6784 int load_static_data(struct __sk_buff * skb): ; int load_static_data(struct __sk_buff *skb) 0: (b7) r6 = 0 ; test_reloc(number, 0, &num0); 1: (63) *(u32 *)(r10 -4) = r6 2: (bf) r2 = r10 ; int load_static_data(struct __sk_buff *skb) 3: (07) r2 += -4 ; test_reloc(number, 0, &num0); 4: (18) r1 = map[id:2238] 6: (18) r3 = map[id:2237][0]+0 <-- direct addr in .bss area 8: (b7) r4 = 0 9: (85) call array_map_update_elem#100464 10: (b7) r1 = 1 ; test_reloc(number, 1, &num1); [...] ; test_reloc(string, 2, str2); 120: (18) r8 = map[id:2237][0]+16 <-- same here at offset +16 122: (18) r1 = map[id:2239] 124: (18) r3 = map[id:2237][0]+16 126: (b7) r4 = 0 127: (85) call array_map_update_elem#100464 128: (b7) r1 = 120 ; str1[5] = 'x'; 129: (73) *(u8 *)(r9 +5) = r1 ; test_reloc(string, 3, str1); 130: (b7) r1 = 3 131: (63) *(u32 *)(r10 -4) = r1 132: (b7) r9 = 3 133: (bf) r2 = r10 ; int load_static_data(struct __sk_buff *skb) 134: (07) r2 += -4 ; test_reloc(string, 3, str1); 135: (18) r1 = map[id:2239] 137: (18) r3 = map[id:2235][0]+16 <-- direct addr in .data area 139: (b7) r4 = 0 140: (85) call array_map_update_elem#100464 141: (b7) r1 = 111 ; __builtin_memcpy(&str2[2], "hello", sizeof("hello")); 142: (73) *(u8 *)(r8 +6) = r1 <-- further access based on .bss data 143: (b7) r1 = 108 144: (73) *(u8 *)(r8 +5) = r1 [...] For Cilium use-case in particular, this enables migrating configuration constants from Cilium daemon's generated header defines into global data sections such that expensive runtime recompilations with LLVM can be avoided altogether. Instead, the ELF file becomes effectively a "template", meaning, it is compiled only once (!) and the Cilium daemon will then rewrite relevant configuration data from the ELF's .data or .rodata sections directly instead of recompiling the program. The updated ELF is then loaded into the kernel and atomically replaces the existing program in the networking datapath. More info in [0]. Based upon recent fix in LLVM, commit c0db6b6bd444 ("[BPF] Don't fail for static variables"). [0] LPC 2018, BPF track, "ELF relocation for static data in BPF", http://vger.kernel.org/lpc-bpf2018.html#session-3 Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Andrii Nakryiko <andriin@fb.com> Acked-by: Martin KaFai Lau <kafai@fb.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2019-04-10 05:20:13 +08:00
LIBBPF_0.0.3 {
global:
bpf_map__is_internal;
bpf_map_freeze;
btf__finalize_data;
bpf, libbpf: support global data/bss/rodata sections This work adds BPF loader support for global data sections to libbpf. This allows to write BPF programs in more natural C-like way by being able to define global variables and const data. Back at LPC 2018 [0] we presented a first prototype which implemented support for global data sections by extending BPF syscall where union bpf_attr would get additional memory/size pair for each section passed during prog load in order to later add this base address into the ldimm64 instruction along with the user provided offset when accessing a variable. Consensus from LPC was that for proper upstream support, it would be more desirable to use maps instead of bpf_attr extension as this would allow for introspection of these sections as well as potential live updates of their content. This work follows this path by taking the following steps from loader side: 1) In bpf_object__elf_collect() step we pick up ".data", ".rodata", and ".bss" section information. 2) If present, in bpf_object__init_internal_map() we add maps to the obj's map array that corresponds to each of the present sections. Given section size and access properties can differ, a single entry array map is created with value size that is corresponding to the ELF section size of .data, .bss or .rodata. These internal maps are integrated into the normal map handling of libbpf such that when user traverses all obj maps, they can be differentiated from user-created ones via bpf_map__is_internal(). In later steps when we actually create these maps in the kernel via bpf_object__create_maps(), then for .data and .rodata sections their content is copied into the map through bpf_map_update_elem(). For .bss this is not necessary since array map is already zero-initialized by default. Additionally, for .rodata the map is frozen as read-only after setup, such that neither from program nor syscall side writes would be possible. 3) In bpf_program__collect_reloc() step, we record the corresponding map, insn index, and relocation type for the global data. 4) And last but not least in the actual relocation step in bpf_program__relocate(), we mark the ldimm64 instruction with src_reg = BPF_PSEUDO_MAP_VALUE where in the first imm field the map's file descriptor is stored as similarly done as in BPF_PSEUDO_MAP_FD, and in the second imm field (as ldimm64 is 2-insn wide) we store the access offset into the section. Given these maps have only single element ldimm64's off remains zero in both parts. 5) On kernel side, this special marked BPF_PSEUDO_MAP_VALUE load will then store the actual target address in order to have a 'map-lookup'-free access. That is, the actual map value base address + offset. The destination register in the verifier will then be marked as PTR_TO_MAP_VALUE, containing the fixed offset as reg->off and backing BPF map as reg->map_ptr. Meaning, it's treated as any other normal map value from verification side, only with efficient, direct value access instead of actual call to map lookup helper as in the typical case. Currently, only support for static global variables has been added, and libbpf rejects non-static global variables from loading. This can be lifted until we have proper semantics for how BPF will treat multi-object BPF loads. From BTF side, libbpf will set the value type id of the types corresponding to the ".bss", ".data" and ".rodata" names which LLVM will emit without the object name prefix. The key type will be left as zero, thus making use of the key-less BTF option in array maps. Simple example dump of program using globals vars in each section: # bpftool prog [...] 6784: sched_cls name load_static_dat tag a7e1291567277844 gpl loaded_at 2019-03-11T15:39:34+0000 uid 0 xlated 1776B jited 993B memlock 4096B map_ids 2238,2237,2235,2236,2239,2240 # bpftool map show id 2237 2237: array name test_glo.bss flags 0x0 key 4B value 64B max_entries 1 memlock 4096B # bpftool map show id 2235 2235: array name test_glo.data flags 0x0 key 4B value 64B max_entries 1 memlock 4096B # bpftool map show id 2236 2236: array name test_glo.rodata flags 0x80 key 4B value 96B max_entries 1 memlock 4096B # bpftool prog dump xlated id 6784 int load_static_data(struct __sk_buff * skb): ; int load_static_data(struct __sk_buff *skb) 0: (b7) r6 = 0 ; test_reloc(number, 0, &num0); 1: (63) *(u32 *)(r10 -4) = r6 2: (bf) r2 = r10 ; int load_static_data(struct __sk_buff *skb) 3: (07) r2 += -4 ; test_reloc(number, 0, &num0); 4: (18) r1 = map[id:2238] 6: (18) r3 = map[id:2237][0]+0 <-- direct addr in .bss area 8: (b7) r4 = 0 9: (85) call array_map_update_elem#100464 10: (b7) r1 = 1 ; test_reloc(number, 1, &num1); [...] ; test_reloc(string, 2, str2); 120: (18) r8 = map[id:2237][0]+16 <-- same here at offset +16 122: (18) r1 = map[id:2239] 124: (18) r3 = map[id:2237][0]+16 126: (b7) r4 = 0 127: (85) call array_map_update_elem#100464 128: (b7) r1 = 120 ; str1[5] = 'x'; 129: (73) *(u8 *)(r9 +5) = r1 ; test_reloc(string, 3, str1); 130: (b7) r1 = 3 131: (63) *(u32 *)(r10 -4) = r1 132: (b7) r9 = 3 133: (bf) r2 = r10 ; int load_static_data(struct __sk_buff *skb) 134: (07) r2 += -4 ; test_reloc(string, 3, str1); 135: (18) r1 = map[id:2239] 137: (18) r3 = map[id:2235][0]+16 <-- direct addr in .data area 139: (b7) r4 = 0 140: (85) call array_map_update_elem#100464 141: (b7) r1 = 111 ; __builtin_memcpy(&str2[2], "hello", sizeof("hello")); 142: (73) *(u8 *)(r8 +6) = r1 <-- further access based on .bss data 143: (b7) r1 = 108 144: (73) *(u8 *)(r8 +5) = r1 [...] For Cilium use-case in particular, this enables migrating configuration constants from Cilium daemon's generated header defines into global data sections such that expensive runtime recompilations with LLVM can be avoided altogether. Instead, the ELF file becomes effectively a "template", meaning, it is compiled only once (!) and the Cilium daemon will then rewrite relevant configuration data from the ELF's .data or .rodata sections directly instead of recompiling the program. The updated ELF is then loaded into the kernel and atomically replaces the existing program in the networking datapath. More info in [0]. Based upon recent fix in LLVM, commit c0db6b6bd444 ("[BPF] Don't fail for static variables"). [0] LPC 2018, BPF track, "ELF relocation for static data in BPF", http://vger.kernel.org/lpc-bpf2018.html#session-3 Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Andrii Nakryiko <andriin@fb.com> Acked-by: Martin KaFai Lau <kafai@fb.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2019-04-10 05:20:13 +08:00
} LIBBPF_0.0.2;
LIBBPF_0.0.4 {
global:
bpf_link__destroy;
bpf_object__load_xattr;
bpf_program__attach_kprobe;
bpf_program__attach_perf_event;
bpf_program__attach_raw_tracepoint;
bpf_program__attach_tracepoint;
bpf_program__attach_uprobe;
btf_dump__dump_type;
btf_dump__free;
btf_dump__new;
btf__parse_elf;
libbpf_num_possible_cpus;
libbpf: add perf buffer API BPF_MAP_TYPE_PERF_EVENT_ARRAY map is often used to send data from BPF program to user space for additional processing. libbpf already has very low-level API to read single CPU perf buffer, bpf_perf_event_read_simple(), but it's hard to use and requires a lot of code to set everything up. This patch adds perf_buffer abstraction on top of it, abstracting setting up and polling per-CPU logic into simple and convenient API, similar to what BCC provides. perf_buffer__new() sets up per-CPU ring buffers and updates corresponding BPF map entries. It accepts two user-provided callbacks: one for handling raw samples and one for get notifications of lost samples due to buffer overflow. perf_buffer__new_raw() is similar, but provides more control over how perf events are set up (by accepting user-provided perf_event_attr), how they are handled (perf_event_header pointer is passed directly to user-provided callback), and on which CPUs ring buffers are created (it's possible to provide a list of CPUs and corresponding map keys to update). This API allows advanced users fuller control. perf_buffer__poll() is used to fetch ring buffer data across all CPUs, utilizing epoll instance. perf_buffer__free() does corresponding clean up and unsets FDs from BPF map. All APIs are not thread-safe. User should ensure proper locking/coordination if used in multi-threaded set up. Signed-off-by: Andrii Nakryiko <andriin@fb.com> Acked-by: Yonghong Song <yhs@fb.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
2019-07-07 02:06:24 +08:00
perf_buffer__free;
perf_buffer__new;
perf_buffer__new_raw;
perf_buffer__poll;
xsk_umem__create;
} LIBBPF_0.0.3;
LIBBPF_0.0.5 {
global:
bpf_btf_get_next_id;
} LIBBPF_0.0.4;
LIBBPF_0.0.6 {
global:
bpf_get_link_xdp_info;
bpf_map__get_pin_path;
bpf_map__is_pinned;
bpf_map__set_pin_path;
bpf_object__open_file;
bpf_object__open_mem;
bpf_program__attach_trace;
bpf_program__get_expected_attach_type;
bpf_program__get_type;
bpf_program__is_tracing;
bpf_program__set_tracing;
bpf_program__size;
btf__find_by_name_kind;
libbpf_find_vmlinux_btf_id;
} LIBBPF_0.0.5;
LIBBPF_0.0.7 {
global:
btf_dump__emit_type_decl;
libbpf: Add bpf_link__disconnect() API to preserve underlying BPF resource There are cases in which BPF resource (program, map, etc) has to outlive userspace program that "installed" it in the system in the first place. When BPF program is attached, libbpf returns bpf_link object, which is supposed to be destroyed after no longer necessary through bpf_link__destroy() API. Currently, bpf_link destruction causes both automatic detachment and frees up any resources allocated to for bpf_link in-memory representation. This is inconvenient for the case described above because of coupling of detachment and resource freeing. This patch introduces bpf_link__disconnect() API call, which marks bpf_link as disconnected from its underlying BPF resouces. This means that when bpf_link is destroyed later, all its memory resources will be freed, but BPF resource itself won't be detached. This design allows to follow strict and resource-leak-free design by default, while giving easy and straightforward way for user code to opt for keeping BPF resource attached beyond lifetime of a bpf_link. For some BPF programs (i.e., FS-based tracepoints, kprobes, raw tracepoint, etc), user has to make sure to pin BPF program to prevent kernel to automatically detach it on process exit. This should typically be achived by pinning BPF program (or map in some cases) in BPF FS. Signed-off-by: Andrii Nakryiko <andriin@fb.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Acked-by: Martin KaFai Lau <kafai@fb.com> Link: https://lore.kernel.org/bpf/20191218225039.2668205-1-andriin@fb.com
2019-12-19 06:50:39 +08:00
bpf_link__disconnect;
bpf: libbpf: Add STRUCT_OPS support This patch adds BPF STRUCT_OPS support to libbpf. The only sec_name convention is SEC(".struct_ops") to identify the struct_ops implemented in BPF, e.g. To implement a tcp_congestion_ops: SEC(".struct_ops") struct tcp_congestion_ops dctcp = { .init = (void *)dctcp_init, /* <-- a bpf_prog */ /* ... some more func prts ... */ .name = "bpf_dctcp", }; Each struct_ops is defined as a global variable under SEC(".struct_ops") as above. libbpf creates a map for each variable and the variable name is the map's name. Multiple struct_ops is supported under SEC(".struct_ops"). In the bpf_object__open phase, libbpf will look for the SEC(".struct_ops") section and find out what is the btf-type the struct_ops is implementing. Note that the btf-type here is referring to a type in the bpf_prog.o's btf. A "struct bpf_map" is added by bpf_object__add_map() as other maps do. It will then collect (through SHT_REL) where are the bpf progs that the func ptrs are referring to. No btf_vmlinux is needed in the open phase. In the bpf_object__load phase, the map-fields, which depend on the btf_vmlinux, are initialized (in bpf_map__init_kern_struct_ops()). It will also set the prog->type, prog->attach_btf_id, and prog->expected_attach_type. Thus, the prog's properties do not rely on its section name. [ Currently, the bpf_prog's btf-type ==> btf_vmlinux's btf-type matching process is as simple as: member-name match + btf-kind match + size match. If these matching conditions fail, libbpf will reject. The current targeting support is "struct tcp_congestion_ops" which most of its members are function pointers. The member ordering of the bpf_prog's btf-type can be different from the btf_vmlinux's btf-type. ] Then, all obj->maps are created as usual (in bpf_object__create_maps()). Once the maps are created and prog's properties are all set, the libbpf will proceed to load all the progs. bpf_map__attach_struct_ops() is added to register a struct_ops map to a kernel subsystem. Signed-off-by: Martin KaFai Lau <kafai@fb.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Link: https://lore.kernel.org/bpf/20200109003514.3856730-1-kafai@fb.com
2020-01-09 08:35:14 +08:00
bpf_map__attach_struct_ops;
bpf_map_delete_batch;
bpf_map_lookup_and_delete_batch;
bpf_map_lookup_batch;
bpf_map_update_batch;
bpf_object__find_program_by_name;
bpf_object__attach_skeleton;
bpf_object__destroy_skeleton;
bpf_object__detach_skeleton;
bpf_object__load_skeleton;
bpf_object__open_skeleton;
bpf_probe_large_insn_limit;
bpf_prog_attach_xattr;
bpf_program__attach;
bpf_program__name;
bpf_program__is_extension;
bpf: libbpf: Add STRUCT_OPS support This patch adds BPF STRUCT_OPS support to libbpf. The only sec_name convention is SEC(".struct_ops") to identify the struct_ops implemented in BPF, e.g. To implement a tcp_congestion_ops: SEC(".struct_ops") struct tcp_congestion_ops dctcp = { .init = (void *)dctcp_init, /* <-- a bpf_prog */ /* ... some more func prts ... */ .name = "bpf_dctcp", }; Each struct_ops is defined as a global variable under SEC(".struct_ops") as above. libbpf creates a map for each variable and the variable name is the map's name. Multiple struct_ops is supported under SEC(".struct_ops"). In the bpf_object__open phase, libbpf will look for the SEC(".struct_ops") section and find out what is the btf-type the struct_ops is implementing. Note that the btf-type here is referring to a type in the bpf_prog.o's btf. A "struct bpf_map" is added by bpf_object__add_map() as other maps do. It will then collect (through SHT_REL) where are the bpf progs that the func ptrs are referring to. No btf_vmlinux is needed in the open phase. In the bpf_object__load phase, the map-fields, which depend on the btf_vmlinux, are initialized (in bpf_map__init_kern_struct_ops()). It will also set the prog->type, prog->attach_btf_id, and prog->expected_attach_type. Thus, the prog's properties do not rely on its section name. [ Currently, the bpf_prog's btf-type ==> btf_vmlinux's btf-type matching process is as simple as: member-name match + btf-kind match + size match. If these matching conditions fail, libbpf will reject. The current targeting support is "struct tcp_congestion_ops" which most of its members are function pointers. The member ordering of the bpf_prog's btf-type can be different from the btf_vmlinux's btf-type. ] Then, all obj->maps are created as usual (in bpf_object__create_maps()). Once the maps are created and prog's properties are all set, the libbpf will proceed to load all the progs. bpf_map__attach_struct_ops() is added to register a struct_ops map to a kernel subsystem. Signed-off-by: Martin KaFai Lau <kafai@fb.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Link: https://lore.kernel.org/bpf/20200109003514.3856730-1-kafai@fb.com
2020-01-09 08:35:14 +08:00
bpf_program__is_struct_ops;
bpf_program__set_extension;
bpf: libbpf: Add STRUCT_OPS support This patch adds BPF STRUCT_OPS support to libbpf. The only sec_name convention is SEC(".struct_ops") to identify the struct_ops implemented in BPF, e.g. To implement a tcp_congestion_ops: SEC(".struct_ops") struct tcp_congestion_ops dctcp = { .init = (void *)dctcp_init, /* <-- a bpf_prog */ /* ... some more func prts ... */ .name = "bpf_dctcp", }; Each struct_ops is defined as a global variable under SEC(".struct_ops") as above. libbpf creates a map for each variable and the variable name is the map's name. Multiple struct_ops is supported under SEC(".struct_ops"). In the bpf_object__open phase, libbpf will look for the SEC(".struct_ops") section and find out what is the btf-type the struct_ops is implementing. Note that the btf-type here is referring to a type in the bpf_prog.o's btf. A "struct bpf_map" is added by bpf_object__add_map() as other maps do. It will then collect (through SHT_REL) where are the bpf progs that the func ptrs are referring to. No btf_vmlinux is needed in the open phase. In the bpf_object__load phase, the map-fields, which depend on the btf_vmlinux, are initialized (in bpf_map__init_kern_struct_ops()). It will also set the prog->type, prog->attach_btf_id, and prog->expected_attach_type. Thus, the prog's properties do not rely on its section name. [ Currently, the bpf_prog's btf-type ==> btf_vmlinux's btf-type matching process is as simple as: member-name match + btf-kind match + size match. If these matching conditions fail, libbpf will reject. The current targeting support is "struct tcp_congestion_ops" which most of its members are function pointers. The member ordering of the bpf_prog's btf-type can be different from the btf_vmlinux's btf-type. ] Then, all obj->maps are created as usual (in bpf_object__create_maps()). Once the maps are created and prog's properties are all set, the libbpf will proceed to load all the progs. bpf_map__attach_struct_ops() is added to register a struct_ops map to a kernel subsystem. Signed-off-by: Martin KaFai Lau <kafai@fb.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Link: https://lore.kernel.org/bpf/20200109003514.3856730-1-kafai@fb.com
2020-01-09 08:35:14 +08:00
bpf_program__set_struct_ops;
btf__align_of;
libbpf_find_kernel_btf;
} LIBBPF_0.0.6;
LIBBPF_0.0.8 {
global:
bpf_link__fd;
bpf_link__open;
bpf_link__pin;
bpf_link__pin_path;
bpf_link__unpin;
bpf_link__update_program;
bpf_link_create;
bpf_link_update;
bpf_map__set_initial_value;
bpf_program__attach_cgroup;
bpf_program__attach_lsm;
bpf_program__is_lsm;
bpf_program__set_attach_target;
bpf_program__set_lsm;
bpf_set_link_xdp_fd_opts;
} LIBBPF_0.0.7;
LIBBPF_0.0.9 {
global:
bpf_enable_stats;
bpf_iter_create;
bpf_link_get_fd_by_id;
bpf_link_get_next_id;
bpf_program__attach_iter;
bpf_program__attach_netns;
perf_buffer__consume;
libbpf: Add BPF ring buffer support Declaring and instantiating BPF ring buffer doesn't require any changes to libbpf, as it's just another type of maps. So using existing BTF-defined maps syntax with __uint(type, BPF_MAP_TYPE_RINGBUF) and __uint(max_elements, <size-of-ring-buf>) is all that's necessary to create and use BPF ring buffer. This patch adds BPF ring buffer consumer to libbpf. It is very similar to perf_buffer implementation in terms of API, but also attempts to fix some minor problems and inconveniences with existing perf_buffer API. ring_buffer support both single ring buffer use case (with just using ring_buffer__new()), as well as allows to add more ring buffers, each with its own callback and context. This allows to efficiently poll and consume multiple, potentially completely independent, ring buffers, using single epoll instance. The latter is actually a problem in practice for applications that are using multiple sets of perf buffers. They have to create multiple instances for struct perf_buffer and poll them independently or in a loop, each approach having its own problems (e.g., inability to use a common poll timeout). struct ring_buffer eliminates this problem by aggregating many independent ring buffer instances under the single "ring buffer manager". Second, perf_buffer's callback can't return error, so applications that need to stop polling due to error in data or data signalling the end, have to use extra mechanisms to signal that polling has to stop. ring_buffer's callback can return error, which will be passed through back to user code and can be acted upon appropariately. Two APIs allow to consume ring buffer data: - ring_buffer__poll(), which will wait for data availability notification and will consume data only from reported ring buffer(s); this API allows to efficiently use resources by reading data only when it becomes available; - ring_buffer__consume(), will attempt to read new records regardless of data availablity notification sub-system. This API is useful for cases when lowest latency is required, in expense of burning CPU resources. Signed-off-by: Andrii Nakryiko <andriin@fb.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Link: https://lore.kernel.org/bpf/20200529075424.3139988-3-andriin@fb.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2020-05-29 15:54:21 +08:00
ring_buffer__add;
ring_buffer__consume;
ring_buffer__free;
ring_buffer__new;
ring_buffer__poll;
} LIBBPF_0.0.8;
LIBBPF_0.1.0 {
libbpf: Add a bunch of attribute getters/setters for map definitions Add a bunch of getter for various aspects of BPF map. Some of these attribute (e.g., key_size, value_size, type, etc) are available right now in struct bpf_map_def, but this patch adds getter allowing to fetch them individually. bpf_map_def approach isn't very scalable, when ABI stability requirements are taken into account. It's much easier to extend libbpf and add support for new features, when each aspect of BPF map has separate getter/setter. Getters follow the common naming convention of not explicitly having "get" in its name: bpf_map__type() returns map type, bpf_map__key_size() returns key_size. Setters, though, explicitly have set in their name: bpf_map__set_type(), bpf_map__set_key_size(). This patch ensures we now have a getter and a setter for the following map attributes: - type; - max_entries; - map_flags; - numa_node; - key_size; - value_size; - ifindex. bpf_map__resize() enforces unnecessary restriction of max_entries > 0. It is unnecessary, because libbpf actually supports zero max_entries for some cases (e.g., for PERF_EVENT_ARRAY map) and treats it specially during map creation time. To allow setting max_entries=0, new bpf_map__set_max_entries() setter is added. bpf_map__resize()'s behavior is preserved for backwards compatibility reasons. Map ifindex getter is added as well. There is a setter already, but no corresponding getter. Fix this assymetry as well. bpf_map__set_ifindex() itself is converted from void function into error-returning one, similar to other setters. The only error returned right now is -EBUSY, if BPF map is already loaded and has corresponding FD. One lacking attribute with no ability to get/set or even specify it declaratively is numa_node. This patch fixes this gap and both adds programmatic getter/setter, as well as adds support for numa_node field in BTF-defined map. Signed-off-by: Andrii Nakryiko <andriin@fb.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Toke Høiland-Jørgensen <toke@redhat.com> Link: https://lore.kernel.org/bpf/20200621062112.3006313-1-andriin@fb.com
2020-06-21 14:21:12 +08:00
global:
bpf_link__detach;
bpf_link_detach;
libbpf: Add a bunch of attribute getters/setters for map definitions Add a bunch of getter for various aspects of BPF map. Some of these attribute (e.g., key_size, value_size, type, etc) are available right now in struct bpf_map_def, but this patch adds getter allowing to fetch them individually. bpf_map_def approach isn't very scalable, when ABI stability requirements are taken into account. It's much easier to extend libbpf and add support for new features, when each aspect of BPF map has separate getter/setter. Getters follow the common naming convention of not explicitly having "get" in its name: bpf_map__type() returns map type, bpf_map__key_size() returns key_size. Setters, though, explicitly have set in their name: bpf_map__set_type(), bpf_map__set_key_size(). This patch ensures we now have a getter and a setter for the following map attributes: - type; - max_entries; - map_flags; - numa_node; - key_size; - value_size; - ifindex. bpf_map__resize() enforces unnecessary restriction of max_entries > 0. It is unnecessary, because libbpf actually supports zero max_entries for some cases (e.g., for PERF_EVENT_ARRAY map) and treats it specially during map creation time. To allow setting max_entries=0, new bpf_map__set_max_entries() setter is added. bpf_map__resize()'s behavior is preserved for backwards compatibility reasons. Map ifindex getter is added as well. There is a setter already, but no corresponding getter. Fix this assymetry as well. bpf_map__set_ifindex() itself is converted from void function into error-returning one, similar to other setters. The only error returned right now is -EBUSY, if BPF map is already loaded and has corresponding FD. One lacking attribute with no ability to get/set or even specify it declaratively is numa_node. This patch fixes this gap and both adds programmatic getter/setter, as well as adds support for numa_node field in BTF-defined map. Signed-off-by: Andrii Nakryiko <andriin@fb.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Toke Høiland-Jørgensen <toke@redhat.com> Link: https://lore.kernel.org/bpf/20200621062112.3006313-1-andriin@fb.com
2020-06-21 14:21:12 +08:00
bpf_map__ifindex;
bpf_map__key_size;
bpf_map__map_flags;
bpf_map__max_entries;
bpf_map__numa_node;
bpf_map__set_key_size;
bpf_map__set_map_flags;
bpf_map__set_max_entries;
bpf_map__set_numa_node;
bpf_map__set_type;
bpf_map__set_value_size;
bpf_map__type;
bpf_map__value_size;
bpf_program__attach_xdp;
libbpf: Support disabling auto-loading BPF programs Currently, bpf_object__load() (and by induction skeleton's load), will always attempt to prepare, relocate, and load into kernel every single BPF program found inside the BPF object file. This is often convenient and the right thing to do and what users expect. But there are plenty of cases (especially with BPF development constantly picking up the pace), where BPF application is intended to work with old kernels, with potentially reduced set of features. But on kernels supporting extra features, it would like to take a full advantage of them, by employing extra BPF program. This could be a choice of using fentry/fexit over kprobe/kretprobe, if kernel is recent enough and is built with BTF. Or BPF program might be providing optimized bpf_iter-based solution that user-space might want to use, whenever available. And so on. With libbpf and BPF CO-RE in particular, it's advantageous to not have to maintain two separate BPF object files to achieve this. So to enable such use cases, this patch adds ability to request not auto-loading chosen BPF programs. In such case, libbpf won't attempt to perform relocations (which might fail due to old kernel), won't try to resolve BTF types for BTF-aware (tp_btf/fentry/fexit/etc) program types, because BTF might not be present, and so on. Skeleton will also automatically skip auto-attachment step for such not loaded BPF programs. Overall, this feature allows to simplify development and deployment of real-world BPF applications with complicated compatibility requirements. Signed-off-by: Andrii Nakryiko <andriin@fb.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Link: https://lore.kernel.org/bpf/20200625232629.3444003-2-andriin@fb.com
2020-06-26 07:26:28 +08:00
bpf_program__autoload;
bpf_program__is_sk_lookup;
libbpf: Support disabling auto-loading BPF programs Currently, bpf_object__load() (and by induction skeleton's load), will always attempt to prepare, relocate, and load into kernel every single BPF program found inside the BPF object file. This is often convenient and the right thing to do and what users expect. But there are plenty of cases (especially with BPF development constantly picking up the pace), where BPF application is intended to work with old kernels, with potentially reduced set of features. But on kernels supporting extra features, it would like to take a full advantage of them, by employing extra BPF program. This could be a choice of using fentry/fexit over kprobe/kretprobe, if kernel is recent enough and is built with BTF. Or BPF program might be providing optimized bpf_iter-based solution that user-space might want to use, whenever available. And so on. With libbpf and BPF CO-RE in particular, it's advantageous to not have to maintain two separate BPF object files to achieve this. So to enable such use cases, this patch adds ability to request not auto-loading chosen BPF programs. In such case, libbpf won't attempt to perform relocations (which might fail due to old kernel), won't try to resolve BTF types for BTF-aware (tp_btf/fentry/fexit/etc) program types, because BTF might not be present, and so on. Skeleton will also automatically skip auto-attachment step for such not loaded BPF programs. Overall, this feature allows to simplify development and deployment of real-world BPF applications with complicated compatibility requirements. Signed-off-by: Andrii Nakryiko <andriin@fb.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Link: https://lore.kernel.org/bpf/20200625232629.3444003-2-andriin@fb.com
2020-06-26 07:26:28 +08:00
bpf_program__set_autoload;
bpf_program__set_sk_lookup;
btf__parse;
btf__parse_raw;
btf__pointer_size;
btf__set_fd;
btf__set_pointer_size;
} LIBBPF_0.0.9;
libbpf: Add perf_buffer APIs for better integration with outside epoll loop Add a set of APIs to perf_buffer manage to allow applications to integrate perf buffer polling into existing epoll-based infrastructure. One example is applications using libevent already and wanting to plug perf_buffer polling, instead of relying on perf_buffer__poll() and waste an extra thread to do it. But perf_buffer is still extremely useful to set up and consume perf buffer rings even for such use cases. So to accomodate such new use cases, add three new APIs: - perf_buffer__buffer_cnt() returns number of per-CPU buffers maintained by given instance of perf_buffer manager; - perf_buffer__buffer_fd() returns FD of perf_event corresponding to a specified per-CPU buffer; this FD is then polled independently; - perf_buffer__consume_buffer() consumes data from single per-CPU buffer, identified by its slot index. To support a simpler, but less efficient, way to integrate perf_buffer into external polling logic, also expose underlying epoll FD through perf_buffer__epoll_fd() API. It will need to be followed by perf_buffer__poll(), wasting extra syscall, or perf_buffer__consume(), wasting CPU to iterate buffers with no data. But could be simpler and more convenient for some cases. These APIs allow for great flexiblity, but do not sacrifice general usability of perf_buffer. Also exercise and check new APIs in perf_buffer selftest. Signed-off-by: Andrii Nakryiko <andriin@fb.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Reviewed-by: Alan Maguire <alan.maguire@oracle.com> Link: https://lore.kernel.org/bpf/20200821165927.849538-1-andriin@fb.com
2020-08-22 00:59:27 +08:00
LIBBPF_0.2.0 {
global:
bpf_prog_bind_map;
bpf_prog_test_run_opts;
bpf_program__attach_freplace;
bpf_program__section_name;
libbpf: Add BTF writing APIs Add APIs for appending new BTF types at the end of BTF object. Each BTF kind has either one API of the form btf__add_<kind>(). For types that have variable amount of additional items (struct/union, enum, func_proto, datasec), additional API is provided to emit each such item. E.g., for emitting a struct, one would use the following sequence of API calls: btf__add_struct(...); btf__add_field(...); ... btf__add_field(...); Each btf__add_field() will ensure that the last BTF type is of STRUCT or UNION kind and will automatically increment that type's vlen field. All the strings are provided as C strings (const char *), not a string offset. This significantly improves usability of BTF writer APIs. All such strings will be automatically appended to string section or existing string will be re-used, if such string was already added previously. Each API attempts to do all the reasonable validations, like enforcing non-empty names for entities with required names, proper value bounds, various bit offset restrictions, etc. Type ID validation is minimal because it's possible to emit a type that refers to type that will be emitted later, so libbpf has no way to enforce such cases. User must be careful to properly emit all the necessary types and specify type IDs that will be valid in the finally generated BTF. Each of btf__add_<kind>() APIs return new type ID on success or negative value on error. APIs like btf__add_field() that emit additional items return zero on success and negative value on error. Signed-off-by: Andrii Nakryiko <andriin@fb.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Acked-by: John Fastabend <john.fastabend@gmail.com> Link: https://lore.kernel.org/bpf/20200929020533.711288-2-andriin@fb.com
2020-09-29 10:05:30 +08:00
btf__add_array;
btf__add_const;
btf__add_enum;
btf__add_enum_value;
btf__add_datasec;
btf__add_datasec_var_info;
btf__add_field;
btf__add_func;
btf__add_func_param;
btf__add_func_proto;
btf__add_fwd;
btf__add_int;
btf__add_ptr;
btf__add_restrict;
libbpf: Allow modification of BTF and add btf__add_str API Allow internal BTF representation to switch from default read-only mode, in which raw BTF data is a single non-modifiable block of memory with BTF header, types, and strings layed out sequentially and contiguously in memory, into a writable representation with types and strings data split out into separate memory regions, that can be dynamically expanded. Such writable internal representation is transparent to users of libbpf APIs, but allows to append new types and strings at the end of BTF, which is a typical use case when generating BTF programmatically. All the basic guarantees of BTF types and strings layout is preserved, i.e., user can get `struct btf_type *` pointer and read it directly. Such btf_type pointers might be invalidated if BTF is modified, so some care is required in such mixed read/write scenarios. Switch from read-only to writable configuration happens automatically the first time when user attempts to modify BTF by either adding a new type or new string. It is still possible to get raw BTF data, which is a single piece of memory that can be persisted in ELF section or into a file as raw BTF. Such raw data memory is also still owned by BTF and will be freed either when BTF object is freed or if another modification to BTF happens, as any modification invalidates BTF raw representation. This patch adds the first two BTF manipulation APIs: btf__add_str(), which allows to add arbitrary strings to BTF string section, and btf__find_str() which allows to find existing string offset, but not add it if it's missing. All the added strings are automatically deduplicated. This is achieved by maintaining an additional string lookup index for all unique strings. Such index is built when BTF is switched to modifiable mode. If at that time BTF strings section contained duplicate strings, they are not de-duplicated. This is done specifically to not modify the existing content of BTF (types, their string offsets, etc), which can cause confusion and is especially important property if there is struct btf_ext associated with struct btf. By following this "imperfect deduplication" process, btf_ext is kept consitent and correct. If deduplication of strings is necessary, it can be forced by doing BTF deduplication, at which point all the strings will be eagerly deduplicated and all string offsets both in struct btf and struct btf_ext will be updated. Signed-off-by: Andrii Nakryiko <andriin@fb.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Acked-by: John Fastabend <john.fastabend@gmail.com> Link: https://lore.kernel.org/bpf/20200926011357.2366158-6-andriin@fb.com
2020-09-26 09:13:53 +08:00
btf__add_str;
libbpf: Add BTF writing APIs Add APIs for appending new BTF types at the end of BTF object. Each BTF kind has either one API of the form btf__add_<kind>(). For types that have variable amount of additional items (struct/union, enum, func_proto, datasec), additional API is provided to emit each such item. E.g., for emitting a struct, one would use the following sequence of API calls: btf__add_struct(...); btf__add_field(...); ... btf__add_field(...); Each btf__add_field() will ensure that the last BTF type is of STRUCT or UNION kind and will automatically increment that type's vlen field. All the strings are provided as C strings (const char *), not a string offset. This significantly improves usability of BTF writer APIs. All such strings will be automatically appended to string section or existing string will be re-used, if such string was already added previously. Each API attempts to do all the reasonable validations, like enforcing non-empty names for entities with required names, proper value bounds, various bit offset restrictions, etc. Type ID validation is minimal because it's possible to emit a type that refers to type that will be emitted later, so libbpf has no way to enforce such cases. User must be careful to properly emit all the necessary types and specify type IDs that will be valid in the finally generated BTF. Each of btf__add_<kind>() APIs return new type ID on success or negative value on error. APIs like btf__add_field() that emit additional items return zero on success and negative value on error. Signed-off-by: Andrii Nakryiko <andriin@fb.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Acked-by: John Fastabend <john.fastabend@gmail.com> Link: https://lore.kernel.org/bpf/20200929020533.711288-2-andriin@fb.com
2020-09-29 10:05:30 +08:00
btf__add_struct;
btf__add_typedef;
btf__add_union;
btf__add_var;
btf__add_volatile;
btf__endianness;
libbpf: Allow modification of BTF and add btf__add_str API Allow internal BTF representation to switch from default read-only mode, in which raw BTF data is a single non-modifiable block of memory with BTF header, types, and strings layed out sequentially and contiguously in memory, into a writable representation with types and strings data split out into separate memory regions, that can be dynamically expanded. Such writable internal representation is transparent to users of libbpf APIs, but allows to append new types and strings at the end of BTF, which is a typical use case when generating BTF programmatically. All the basic guarantees of BTF types and strings layout is preserved, i.e., user can get `struct btf_type *` pointer and read it directly. Such btf_type pointers might be invalidated if BTF is modified, so some care is required in such mixed read/write scenarios. Switch from read-only to writable configuration happens automatically the first time when user attempts to modify BTF by either adding a new type or new string. It is still possible to get raw BTF data, which is a single piece of memory that can be persisted in ELF section or into a file as raw BTF. Such raw data memory is also still owned by BTF and will be freed either when BTF object is freed or if another modification to BTF happens, as any modification invalidates BTF raw representation. This patch adds the first two BTF manipulation APIs: btf__add_str(), which allows to add arbitrary strings to BTF string section, and btf__find_str() which allows to find existing string offset, but not add it if it's missing. All the added strings are automatically deduplicated. This is achieved by maintaining an additional string lookup index for all unique strings. Such index is built when BTF is switched to modifiable mode. If at that time BTF strings section contained duplicate strings, they are not de-duplicated. This is done specifically to not modify the existing content of BTF (types, their string offsets, etc), which can cause confusion and is especially important property if there is struct btf_ext associated with struct btf. By following this "imperfect deduplication" process, btf_ext is kept consitent and correct. If deduplication of strings is necessary, it can be forced by doing BTF deduplication, at which point all the strings will be eagerly deduplicated and all string offsets both in struct btf and struct btf_ext will be updated. Signed-off-by: Andrii Nakryiko <andriin@fb.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Acked-by: John Fastabend <john.fastabend@gmail.com> Link: https://lore.kernel.org/bpf/20200926011357.2366158-6-andriin@fb.com
2020-09-26 09:13:53 +08:00
btf__find_str;
btf__new_empty;
btf__set_endianness;
btf__str_by_offset;
libbpf: Add perf_buffer APIs for better integration with outside epoll loop Add a set of APIs to perf_buffer manage to allow applications to integrate perf buffer polling into existing epoll-based infrastructure. One example is applications using libevent already and wanting to plug perf_buffer polling, instead of relying on perf_buffer__poll() and waste an extra thread to do it. But perf_buffer is still extremely useful to set up and consume perf buffer rings even for such use cases. So to accomodate such new use cases, add three new APIs: - perf_buffer__buffer_cnt() returns number of per-CPU buffers maintained by given instance of perf_buffer manager; - perf_buffer__buffer_fd() returns FD of perf_event corresponding to a specified per-CPU buffer; this FD is then polled independently; - perf_buffer__consume_buffer() consumes data from single per-CPU buffer, identified by its slot index. To support a simpler, but less efficient, way to integrate perf_buffer into external polling logic, also expose underlying epoll FD through perf_buffer__epoll_fd() API. It will need to be followed by perf_buffer__poll(), wasting extra syscall, or perf_buffer__consume(), wasting CPU to iterate buffers with no data. But could be simpler and more convenient for some cases. These APIs allow for great flexiblity, but do not sacrifice general usability of perf_buffer. Also exercise and check new APIs in perf_buffer selftest. Signed-off-by: Andrii Nakryiko <andriin@fb.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Reviewed-by: Alan Maguire <alan.maguire@oracle.com> Link: https://lore.kernel.org/bpf/20200821165927.849538-1-andriin@fb.com
2020-08-22 00:59:27 +08:00
perf_buffer__buffer_cnt;
perf_buffer__buffer_fd;
perf_buffer__epoll_fd;
perf_buffer__consume_buffer;
xsk_socket__create_shared;
libbpf: Add perf_buffer APIs for better integration with outside epoll loop Add a set of APIs to perf_buffer manage to allow applications to integrate perf buffer polling into existing epoll-based infrastructure. One example is applications using libevent already and wanting to plug perf_buffer polling, instead of relying on perf_buffer__poll() and waste an extra thread to do it. But perf_buffer is still extremely useful to set up and consume perf buffer rings even for such use cases. So to accomodate such new use cases, add three new APIs: - perf_buffer__buffer_cnt() returns number of per-CPU buffers maintained by given instance of perf_buffer manager; - perf_buffer__buffer_fd() returns FD of perf_event corresponding to a specified per-CPU buffer; this FD is then polled independently; - perf_buffer__consume_buffer() consumes data from single per-CPU buffer, identified by its slot index. To support a simpler, but less efficient, way to integrate perf_buffer into external polling logic, also expose underlying epoll FD through perf_buffer__epoll_fd() API. It will need to be followed by perf_buffer__poll(), wasting extra syscall, or perf_buffer__consume(), wasting CPU to iterate buffers with no data. But could be simpler and more convenient for some cases. These APIs allow for great flexiblity, but do not sacrifice general usability of perf_buffer. Also exercise and check new APIs in perf_buffer selftest. Signed-off-by: Andrii Nakryiko <andriin@fb.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Reviewed-by: Alan Maguire <alan.maguire@oracle.com> Link: https://lore.kernel.org/bpf/20200821165927.849538-1-andriin@fb.com
2020-08-22 00:59:27 +08:00
} LIBBPF_0.1.0;
libbpf: Implement basic split BTF support Support split BTF operation, in which one BTF (base BTF) provides basic set of types and strings, while another one (split BTF) builds on top of base's types and strings and adds its own new types and strings. From API standpoint, the fact that the split BTF is built on top of the base BTF is transparent. Type numeration is transparent. If the base BTF had last type ID #N, then all types in the split BTF start at type ID N+1. Any type in split BTF can reference base BTF types, but not vice versa. Programmatically construction of a split BTF on top of a base BTF is supported: one can create an empty split BTF with btf__new_empty_split() and pass base BTF as an input, or pass raw binary data to btf__new_split(), or use btf__parse_xxx_split() variants to get initial set of split types/strings from the ELF file with .BTF section. String offsets are similarly transparent and are a logical continuation of base BTF's strings. When building BTF programmatically and adding a new string (explicitly with btf__add_str() or implicitly through appending new types/members), string-to-be-added would first be looked up from the base BTF's string section and re-used if it's there. If not, it will be looked up and/or added to the split BTF string section. Similarly to type IDs, types in split BTF can refer to strings from base BTF absolutely transparently (but not vice versa, of course, because base BTF doesn't "know" about existence of split BTF). Internal type index is slightly adjusted to be zero-indexed, ignoring a fake [0] VOID type. This allows to handle split/base BTF type lookups transparently by using btf->start_id type ID offset, which is always 1 for base/non-split BTF and equals btf__get_nr_types(base_btf) + 1 for the split BTF. BTF deduplication is not yet supported for split BTF and support for it will be added in separate patch. Signed-off-by: Andrii Nakryiko <andrii@kernel.org> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Acked-by: Song Liu <songliubraving@fb.com> Link: https://lore.kernel.org/bpf/20201105043402.2530976-5-andrii@kernel.org
2020-11-05 12:33:54 +08:00
LIBBPF_0.3.0 {
global:
btf__base_btf;
libbpf: Implement basic split BTF support Support split BTF operation, in which one BTF (base BTF) provides basic set of types and strings, while another one (split BTF) builds on top of base's types and strings and adds its own new types and strings. From API standpoint, the fact that the split BTF is built on top of the base BTF is transparent. Type numeration is transparent. If the base BTF had last type ID #N, then all types in the split BTF start at type ID N+1. Any type in split BTF can reference base BTF types, but not vice versa. Programmatically construction of a split BTF on top of a base BTF is supported: one can create an empty split BTF with btf__new_empty_split() and pass base BTF as an input, or pass raw binary data to btf__new_split(), or use btf__parse_xxx_split() variants to get initial set of split types/strings from the ELF file with .BTF section. String offsets are similarly transparent and are a logical continuation of base BTF's strings. When building BTF programmatically and adding a new string (explicitly with btf__add_str() or implicitly through appending new types/members), string-to-be-added would first be looked up from the base BTF's string section and re-used if it's there. If not, it will be looked up and/or added to the split BTF string section. Similarly to type IDs, types in split BTF can refer to strings from base BTF absolutely transparently (but not vice versa, of course, because base BTF doesn't "know" about existence of split BTF). Internal type index is slightly adjusted to be zero-indexed, ignoring a fake [0] VOID type. This allows to handle split/base BTF type lookups transparently by using btf->start_id type ID offset, which is always 1 for base/non-split BTF and equals btf__get_nr_types(base_btf) + 1 for the split BTF. BTF deduplication is not yet supported for split BTF and support for it will be added in separate patch. Signed-off-by: Andrii Nakryiko <andrii@kernel.org> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Acked-by: Song Liu <songliubraving@fb.com> Link: https://lore.kernel.org/bpf/20201105043402.2530976-5-andrii@kernel.org
2020-11-05 12:33:54 +08:00
btf__parse_elf_split;
btf__parse_raw_split;
btf__parse_split;
btf__new_empty_split;
btf__new_split;
ring_buffer__epoll_fd;
xsk_setup_xdp_prog;
xsk_socket__update_xskmap;
libbpf: Implement basic split BTF support Support split BTF operation, in which one BTF (base BTF) provides basic set of types and strings, while another one (split BTF) builds on top of base's types and strings and adds its own new types and strings. From API standpoint, the fact that the split BTF is built on top of the base BTF is transparent. Type numeration is transparent. If the base BTF had last type ID #N, then all types in the split BTF start at type ID N+1. Any type in split BTF can reference base BTF types, but not vice versa. Programmatically construction of a split BTF on top of a base BTF is supported: one can create an empty split BTF with btf__new_empty_split() and pass base BTF as an input, or pass raw binary data to btf__new_split(), or use btf__parse_xxx_split() variants to get initial set of split types/strings from the ELF file with .BTF section. String offsets are similarly transparent and are a logical continuation of base BTF's strings. When building BTF programmatically and adding a new string (explicitly with btf__add_str() or implicitly through appending new types/members), string-to-be-added would first be looked up from the base BTF's string section and re-used if it's there. If not, it will be looked up and/or added to the split BTF string section. Similarly to type IDs, types in split BTF can refer to strings from base BTF absolutely transparently (but not vice versa, of course, because base BTF doesn't "know" about existence of split BTF). Internal type index is slightly adjusted to be zero-indexed, ignoring a fake [0] VOID type. This allows to handle split/base BTF type lookups transparently by using btf->start_id type ID offset, which is always 1 for base/non-split BTF and equals btf__get_nr_types(base_btf) + 1 for the split BTF. BTF deduplication is not yet supported for split BTF and support for it will be added in separate patch. Signed-off-by: Andrii Nakryiko <andrii@kernel.org> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Acked-by: Song Liu <songliubraving@fb.com> Link: https://lore.kernel.org/bpf/20201105043402.2530976-5-andrii@kernel.org
2020-11-05 12:33:54 +08:00
} LIBBPF_0.2.0;
LIBBPF_0.4.0 {
global:
btf__add_float;
btf__add_type;
libbpf: Add BPF static linker APIs Introduce BPF static linker APIs to libbpf. BPF static linker allows to perform static linking of multiple BPF object files into a single combined resulting object file, preserving all the BPF programs, maps, global variables, etc. Data sections (.bss, .data, .rodata, .maps, maps, etc) with the same name are concatenated together. Similarly, code sections are also concatenated. All the symbols and ELF relocations are also concatenated in their respective ELF sections and are adjusted accordingly to the new object file layout. Static variables and functions are handled correctly as well, adjusting BPF instructions offsets to reflect new variable/function offset within the combined ELF section. Such relocations are referencing STT_SECTION symbols and that stays intact. Data sections in different files can have different alignment requirements, so that is taken care of as well, adjusting sizes and offsets as necessary to satisfy both old and new alignment requirements. DWARF data sections are stripped out, currently. As well as LLLVM_ADDRSIG section, which is ignored by libbpf in bpf_object__open() anyways. So, in a way, BPF static linker is an analogue to `llvm-strip -g`, which is a pretty nice property, especially if resulting .o file is then used to generate BPF skeleton. Original string sections are ignored and instead we construct our own set of unique strings using libbpf-internal `struct strset` API. To reduce the size of the patch, all the .BTF and .BTF.ext processing was moved into a separate patch. The high-level API consists of just 4 functions: - bpf_linker__new() creates an instance of BPF static linker. It accepts output filename and (currently empty) options struct; - bpf_linker__add_file() takes input filename and appends it to the already processed ELF data; it can be called multiple times, one for each BPF ELF object file that needs to be linked in; - bpf_linker__finalize() needs to be called to dump final ELF contents into the output file, specified when bpf_linker was created; after bpf_linker__finalize() is called, no more bpf_linker__add_file() and bpf_linker__finalize() calls are allowed, they will return error; - regardless of whether bpf_linker__finalize() was called or not, bpf_linker__free() will free up all the used resources. Currently, BPF static linker doesn't resolve cross-object file references (extern variables and/or functions). This will be added in the follow up patch set. Signed-off-by: Andrii Nakryiko <andrii@kernel.org> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Link: https://lore.kernel.org/bpf/20210318194036.3521577-7-andrii@kernel.org
2021-03-19 03:40:30 +08:00
bpf_linker__add_file;
bpf_linker__finalize;
bpf_linker__free;
bpf_linker__new;
bpf_map__inner_map;
bpf_object__set_kversion;
libbpf: Add low level TC-BPF management API This adds functions that wrap the netlink API used for adding, manipulating, and removing traffic control filters. The API summary: A bpf_tc_hook represents a location where a TC-BPF filter can be attached. This means that creating a hook leads to creation of the backing qdisc, while destruction either removes all filters attached to a hook, or destroys qdisc if requested explicitly (as discussed below). The TC-BPF API functions operate on this bpf_tc_hook to attach, replace, query, and detach tc filters. All functions return 0 on success, and a negative error code on failure. bpf_tc_hook_create - Create a hook Parameters: @hook - Cannot be NULL, ifindex > 0, attach_point must be set to proper enum constant. Note that parent must be unset when attach_point is one of BPF_TC_INGRESS or BPF_TC_EGRESS. Note that as an exception BPF_TC_INGRESS|BPF_TC_EGRESS is also a valid value for attach_point. Returns -EOPNOTSUPP when hook has attach_point as BPF_TC_CUSTOM. bpf_tc_hook_destroy - Destroy a hook Parameters: @hook - Cannot be NULL. The behaviour depends on value of attach_point. If BPF_TC_INGRESS, all filters attached to the ingress hook will be detached. If BPF_TC_EGRESS, all filters attached to the egress hook will be detached. If BPF_TC_INGRESS|BPF_TC_EGRESS, the clsact qdisc will be deleted, also detaching all filters. As before, parent must be unset for these attach_points, and set for BPF_TC_CUSTOM. It is advised that if the qdisc is operated on by many programs, then the program at least check that there are no other existing filters before deleting the clsact qdisc. An example is shown below: DECLARE_LIBBPF_OPTS(bpf_tc_hook, .ifindex = if_nametoindex("lo"), .attach_point = BPF_TC_INGRESS); /* set opts as NULL, as we're not really interested in * getting any info for a particular filter, but just * detecting its presence. */ r = bpf_tc_query(&hook, NULL); if (r == -ENOENT) { /* no filters */ hook.attach_point = BPF_TC_INGRESS|BPF_TC_EGREESS; return bpf_tc_hook_destroy(&hook); } else { /* failed or r == 0, the latter means filters do exist */ return r; } Note that there is a small race between checking for no filters and deleting the qdisc. This is currently unavoidable. Returns -EOPNOTSUPP when hook has attach_point as BPF_TC_CUSTOM. bpf_tc_attach - Attach a filter to a hook Parameters: @hook - Cannot be NULL. Represents the hook the filter will be attached to. Requirements for ifindex and attach_point are same as described in bpf_tc_hook_create, but BPF_TC_CUSTOM is also supported. In that case, parent must be set to the handle where the filter will be attached (using BPF_TC_PARENT). E.g. to set parent to 1:16 like in tc command line, the equivalent would be BPF_TC_PARENT(1, 16). @opts - Cannot be NULL. The following opts are optional: * handle - The handle of the filter * priority - The priority of the filter Must be >= 0 and <= UINT16_MAX Note that when left unset, they will be auto-allocated by the kernel. The following opts must be set: * prog_fd - The fd of the loaded SCHED_CLS prog The following opts must be unset: * prog_id - The ID of the BPF prog The following opts are optional: * flags - Currently only BPF_TC_F_REPLACE is allowed. It allows replacing an existing filter instead of failing with -EEXIST. The following opts will be filled by bpf_tc_attach on a successful attach operation if they are unset: * handle - The handle of the attached filter * priority - The priority of the attached filter * prog_id - The ID of the attached SCHED_CLS prog This way, the user can know what the auto allocated values for optional opts like handle and priority are for the newly attached filter, if they were unset. Note that some other attributes are set to fixed default values listed below (this holds for all bpf_tc_* APIs): protocol as ETH_P_ALL, direct action mode, chain index of 0, and class ID of 0 (this can be set by writing to the skb->tc_classid field from the BPF program). bpf_tc_detach Parameters: @hook - Cannot be NULL. Represents the hook the filter will be detached from. Requirements are same as described above in bpf_tc_attach. @opts - Cannot be NULL. The following opts must be set: * handle, priority The following opts must be unset: * prog_fd, prog_id, flags bpf_tc_query Parameters: @hook - Cannot be NULL. Represents the hook where the filter lookup will be performed. Requirements are same as described above in bpf_tc_attach(). @opts - Cannot be NULL. The following opts must be set: * handle, priority The following opts must be unset: * prog_fd, prog_id, flags The following fields will be filled by bpf_tc_query upon a successful lookup: * prog_id Some usage examples (using BPF skeleton infrastructure): BPF program (test_tc_bpf.c): #include <linux/bpf.h> #include <bpf/bpf_helpers.h> SEC("classifier") int cls(struct __sk_buff *skb) { return 0; } Userspace loader: struct test_tc_bpf *skel = NULL; int fd, r; skel = test_tc_bpf__open_and_load(); if (!skel) return -ENOMEM; fd = bpf_program__fd(skel->progs.cls); DECLARE_LIBBPF_OPTS(bpf_tc_hook, hook, .ifindex = if_nametoindex("lo"), .attach_point = BPF_TC_INGRESS); /* Create clsact qdisc */ r = bpf_tc_hook_create(&hook); if (r < 0) goto end; DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts, .prog_fd = fd); r = bpf_tc_attach(&hook, &opts); if (r < 0) goto end; /* Print the auto allocated handle and priority */ printf("Handle=%u", opts.handle); printf("Priority=%u", opts.priority); opts.prog_fd = opts.prog_id = 0; bpf_tc_detach(&hook, &opts); end: test_tc_bpf__destroy(skel); This is equivalent to doing the following using tc command line: # tc qdisc add dev lo clsact # tc filter add dev lo ingress bpf obj foo.o sec classifier da # tc filter del dev lo ingress handle <h> prio <p> bpf ... where the handle and priority can be found using: # tc filter show dev lo ingress Another example replacing a filter (extending prior example): /* We can also choose both (or one), let's try replacing an * existing filter. */ DECLARE_LIBBPF_OPTS(bpf_tc_opts, replace_opts, .handle = opts.handle, .priority = opts.priority, .prog_fd = fd); r = bpf_tc_attach(&hook, &replace_opts); if (r == -EEXIST) { /* Expected, now use BPF_TC_F_REPLACE to replace it */ replace_opts.flags = BPF_TC_F_REPLACE; return bpf_tc_attach(&hook, &replace_opts); } else if (r < 0) { return r; } /* There must be no existing filter with these * attributes, so cleanup and return an error. */ replace_opts.prog_fd = replace_opts.prog_id = 0; bpf_tc_detach(&hook, &replace_opts); return -1; To obtain info of a particular filter: /* Find info for filter with handle 1 and priority 50 */ DECLARE_LIBBPF_OPTS(bpf_tc_opts, info_opts, .handle = 1, .priority = 50); r = bpf_tc_query(&hook, &info_opts); if (r == -ENOENT) printf("Filter not found"); else if (r < 0) return r; printf("Prog ID: %u", info_opts.prog_id); return 0; Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com> Co-developed-by: Daniel Borkmann <daniel@iogearbox.net> # libbpf API design [ Daniel: also did major patch cleanup ] Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Reviewed-by: Toke Høiland-Jørgensen <toke@redhat.com> Link: https://lore.kernel.org/bpf/20210512103451.989420-3-memxor@gmail.com
2021-05-13 07:41:22 +08:00
bpf_tc_attach;
bpf_tc_detach;
bpf_tc_hook_create;
bpf_tc_hook_destroy;
bpf_tc_query;
} LIBBPF_0.3.0;