libbpf: Support disabling auto-loading BPF programs

Currently, bpf_object__load() (and by induction skeleton's load), will always
attempt to prepare, relocate, and load into kernel every single BPF program
found inside the BPF object file. This is often convenient and the right thing
to do and what users expect.

But there are plenty of cases (especially with BPF development constantly
picking up the pace), where BPF application is intended to work with old
kernels, with potentially reduced set of features. But on kernels supporting
extra features, it would like to take a full advantage of them, by employing
extra BPF program. This could be a choice of using fentry/fexit over
kprobe/kretprobe, if kernel is recent enough and is built with BTF. Or BPF
program might be providing optimized bpf_iter-based solution that user-space
might want to use, whenever available. And so on.

With libbpf and BPF CO-RE in particular, it's advantageous to not have to
maintain two separate BPF object files to achieve this. So to enable such use
cases, this patch adds ability to request not auto-loading chosen BPF
programs. In such case, libbpf won't attempt to perform relocations (which
might fail due to old kernel), won't try to resolve BTF types for
BTF-aware (tp_btf/fentry/fexit/etc) program types, because BTF might not be
present, and so on. Skeleton will also automatically skip auto-attachment step
for such not loaded BPF programs.

Overall, this feature allows to simplify development and deployment of
real-world BPF applications with complicated compatibility requirements.

Signed-off-by: Andrii Nakryiko <andriin@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20200625232629.3444003-2-andriin@fb.com
This commit is contained in:
Andrii Nakryiko 2020-06-25 16:26:28 -07:00 committed by Alexei Starovoitov
parent 16d37ee3d2
commit d929758101
3 changed files with 44 additions and 8 deletions

View File

@ -230,6 +230,7 @@ struct bpf_program {
struct bpf_insn *insns;
size_t insns_cnt, main_prog_cnt;
enum bpf_prog_type type;
bool load;
struct reloc_desc *reloc_desc;
int nr_reloc;
@ -541,6 +542,7 @@ bpf_program__init(void *data, size_t size, char *section_name, int idx,
prog->instances.fds = NULL;
prog->instances.nr = -1;
prog->type = BPF_PROG_TYPE_UNSPEC;
prog->load = true;
return 0;
errout:
@ -2513,6 +2515,8 @@ static int bpf_object__load_vmlinux_btf(struct bpf_object *obj)
need_vmlinux_btf = true;
bpf_object__for_each_program(prog, obj) {
if (!prog->load)
continue;
if (libbpf_prog_needs_vmlinux_btf(prog)) {
need_vmlinux_btf = true;
break;
@ -5445,6 +5449,12 @@ int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver)
{
int err = 0, fd, i, btf_id;
if (prog->obj->loaded) {
pr_warn("prog '%s'('%s'): can't load after object was loaded\n",
prog->name, prog->section_name);
return -EINVAL;
}
if ((prog->type == BPF_PROG_TYPE_TRACING ||
prog->type == BPF_PROG_TYPE_LSM ||
prog->type == BPF_PROG_TYPE_EXT) && !prog->attach_btf_id) {
@ -5533,16 +5543,21 @@ static bool bpf_program__is_function_storage(const struct bpf_program *prog,
static int
bpf_object__load_progs(struct bpf_object *obj, int log_level)
{
struct bpf_program *prog;
size_t i;
int err;
for (i = 0; i < obj->nr_programs; i++) {
if (bpf_program__is_function_storage(&obj->programs[i], obj))
prog = &obj->programs[i];
if (bpf_program__is_function_storage(prog, obj))
continue;
obj->programs[i].log_level |= log_level;
err = bpf_program__load(&obj->programs[i],
obj->license,
obj->kern_version);
if (!prog->load) {
pr_debug("prog '%s'('%s'): skipped loading\n",
prog->name, prog->section_name);
continue;
}
prog->log_level |= log_level;
err = bpf_program__load(prog, obj->license, obj->kern_version);
if (err)
return err;
}
@ -5869,12 +5884,10 @@ int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
return -EINVAL;
if (obj->loaded) {
pr_warn("object should not be loaded twice\n");
pr_warn("object '%s': load can't be attempted twice\n", obj->name);
return -EINVAL;
}
obj->loaded = true;
err = bpf_object__probe_loading(obj);
err = err ? : bpf_object__probe_caps(obj);
err = err ? : bpf_object__resolve_externs(obj, obj->kconfig);
@ -5889,6 +5902,8 @@ int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
btf__free(obj->btf_vmlinux);
obj->btf_vmlinux = NULL;
obj->loaded = true; /* doesn't matter if successfully or not */
if (err)
goto out;
@ -6661,6 +6676,20 @@ const char *bpf_program__title(const struct bpf_program *prog, bool needs_copy)
return title;
}
bool bpf_program__autoload(const struct bpf_program *prog)
{
return prog->load;
}
int bpf_program__set_autoload(struct bpf_program *prog, bool autoload)
{
if (prog->obj->loaded)
return -EINVAL;
prog->load = autoload;
return 0;
}
int bpf_program__fd(const struct bpf_program *prog)
{
return bpf_program__nth_fd(prog, 0);
@ -9283,6 +9312,9 @@ int bpf_object__attach_skeleton(struct bpf_object_skeleton *s)
const struct bpf_sec_def *sec_def;
const char *sec_name = bpf_program__title(prog, false);
if (!prog->load)
continue;
sec_def = find_sec_def(sec_name);
if (!sec_def || !sec_def->attach_fn)
continue;

View File

@ -200,6 +200,8 @@ LIBBPF_API void bpf_program__set_ifindex(struct bpf_program *prog,
LIBBPF_API const char *bpf_program__name(const struct bpf_program *prog);
LIBBPF_API const char *bpf_program__title(const struct bpf_program *prog,
bool needs_copy);
LIBBPF_API bool bpf_program__autoload(const struct bpf_program *prog);
LIBBPF_API int bpf_program__set_autoload(struct bpf_program *prog, bool autoload);
/* returns program size in bytes */
LIBBPF_API size_t bpf_program__size(const struct bpf_program *prog);

View File

@ -286,4 +286,6 @@ LIBBPF_0.1.0 {
bpf_map__set_value_size;
bpf_map__type;
bpf_map__value_size;
bpf_program__autoload;
bpf_program__set_autoload;
} LIBBPF_0.0.9;