bpftool: Restore support for BPF offload-enabled feature probing

Commit 1a56c18e6c ("bpftool: Stop supporting BPF offload-enabled
feature probing") removed the support to probe for BPF offload features.
This is still something that is useful for NFP NIC that can support
offloading of BPF programs.

The reason for the dropped support was that libbpf starting with v1.0
would drop support for passing the ifindex to the BPF prog/map/helper
feature probing APIs. In order to keep this useful feature for NFP
restore the functionality by moving it directly into bpftool.

The code restored is a simplified version of the code that existed in
libbpf which supposed passing the ifindex. The simplification is that it
only targets the cases where ifindex is given and call into libbpf for
the cases where it's not.

Before restoring support for probing offload features:

  # bpftool feature probe dev ens4np0
  Scanning system call availability...
  bpf() syscall is available

  Scanning eBPF program types...

  Scanning eBPF map types...

  Scanning eBPF helper functions...
  eBPF helpers supported for program type sched_cls:
  eBPF helpers supported for program type xdp:

  Scanning miscellaneous eBPF features...
  Large program size limit is NOT available
  Bounded loop support is NOT available
  ISA extension v2 is NOT available
  ISA extension v3 is NOT available

With support for probing offload features restored:

  # bpftool feature probe dev ens4np0
  Scanning system call availability...
  bpf() syscall is available

  Scanning eBPF program types...
  eBPF program_type sched_cls is available
  eBPF program_type xdp is available

  Scanning eBPF map types...
  eBPF map_type hash is available
  eBPF map_type array is available

  Scanning eBPF helper functions...
  eBPF helpers supported for program type sched_cls:
  	- bpf_map_lookup_elem
  	- bpf_get_prandom_u32
  	- bpf_perf_event_output
  eBPF helpers supported for program type xdp:
  	- bpf_map_lookup_elem
  	- bpf_get_prandom_u32
  	- bpf_perf_event_output
  	- bpf_xdp_adjust_head
  	- bpf_xdp_adjust_tail

  Scanning miscellaneous eBPF features...
  Large program size limit is NOT available
  Bounded loop support is NOT available
  ISA extension v2 is NOT available
  ISA extension v3 is NOT available

Signed-off-by: Niklas Söderlund <niklas.soderlund@corigine.com>
Signed-off-by: Simon Horman <simon.horman@corigine.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Reviewed-by: Quentin Monnet <quentin@isovalent.com>
Link: https://lore.kernel.org/bpf/20220310121846.921256-1-niklas.soderlund@corigine.com
This commit is contained in:
Niklas Söderlund 2022-03-10 13:18:46 +01:00 committed by Daniel Borkmann
parent de55c9a196
commit f655c088e7
1 changed files with 139 additions and 13 deletions

View File

@ -3,6 +3,7 @@
#include <ctype.h>
#include <errno.h>
#include <fcntl.h>
#include <string.h>
#include <unistd.h>
#include <net/if.h>
@ -45,6 +46,11 @@ static bool run_as_unprivileged;
/* Miscellaneous utility functions */
static bool grep(const char *buffer, const char *pattern)
{
return !!strstr(buffer, pattern);
}
static bool check_procfs(void)
{
struct statfs st_fs;
@ -135,6 +141,32 @@ static void print_end_section(void)
/* Probing functions */
static int get_vendor_id(int ifindex)
{
char ifname[IF_NAMESIZE], path[64], buf[8];
ssize_t len;
int fd;
if (!if_indextoname(ifindex, ifname))
return -1;
snprintf(path, sizeof(path), "/sys/class/net/%s/device/vendor", ifname);
fd = open(path, O_RDONLY | O_CLOEXEC);
if (fd < 0)
return -1;
len = read(fd, buf, sizeof(buf));
close(fd);
if (len < 0)
return -1;
if (len >= (ssize_t)sizeof(buf))
return -1;
buf[len] = '\0';
return strtol(buf, NULL, 0);
}
static int read_procfs(const char *path)
{
char *endptr, *line = NULL;
@ -478,6 +510,40 @@ static bool probe_bpf_syscall(const char *define_prefix)
return res;
}
static bool
probe_prog_load_ifindex(enum bpf_prog_type prog_type,
const struct bpf_insn *insns, size_t insns_cnt,
char *log_buf, size_t log_buf_sz,
__u32 ifindex)
{
LIBBPF_OPTS(bpf_prog_load_opts, opts,
.log_buf = log_buf,
.log_size = log_buf_sz,
.log_level = log_buf ? 1 : 0,
.prog_ifindex = ifindex,
);
int fd;
errno = 0;
fd = bpf_prog_load(prog_type, NULL, "GPL", insns, insns_cnt, &opts);
if (fd >= 0)
close(fd);
return fd >= 0 && errno != EINVAL && errno != EOPNOTSUPP;
}
static bool probe_prog_type_ifindex(enum bpf_prog_type prog_type, __u32 ifindex)
{
/* nfp returns -EINVAL on exit(0) with TC offload */
struct bpf_insn insns[2] = {
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN()
};
return probe_prog_load_ifindex(prog_type, insns, ARRAY_SIZE(insns),
NULL, 0, ifindex);
}
static void
probe_prog_type(enum bpf_prog_type prog_type, bool *supported_types,
const char *define_prefix, __u32 ifindex)
@ -488,11 +554,19 @@ probe_prog_type(enum bpf_prog_type prog_type, bool *supported_types,
bool res;
if (ifindex) {
p_info("BPF offload feature probing is not supported");
return;
switch (prog_type) {
case BPF_PROG_TYPE_SCHED_CLS:
case BPF_PROG_TYPE_XDP:
break;
default:
return;
}
res = probe_prog_type_ifindex(prog_type, ifindex);
} else {
res = libbpf_probe_bpf_prog_type(prog_type, NULL);
}
res = libbpf_probe_bpf_prog_type(prog_type, NULL);
#ifdef USE_LIBCAP
/* Probe may succeed even if program load fails, for unprivileged users
* check that we did not fail because of insufficient permissions
@ -521,6 +595,26 @@ probe_prog_type(enum bpf_prog_type prog_type, bool *supported_types,
define_prefix);
}
static bool probe_map_type_ifindex(enum bpf_map_type map_type, __u32 ifindex)
{
LIBBPF_OPTS(bpf_map_create_opts, opts);
int key_size, value_size, max_entries;
int fd;
opts.map_ifindex = ifindex;
key_size = sizeof(__u32);
value_size = sizeof(__u32);
max_entries = 1;
fd = bpf_map_create(map_type, NULL, key_size, value_size, max_entries,
&opts);
if (fd >= 0)
close(fd);
return fd >= 0;
}
static void
probe_map_type(enum bpf_map_type map_type, const char *define_prefix,
__u32 ifindex)
@ -531,11 +625,18 @@ probe_map_type(enum bpf_map_type map_type, const char *define_prefix,
bool res;
if (ifindex) {
p_info("BPF offload feature probing is not supported");
return;
}
switch (map_type) {
case BPF_MAP_TYPE_HASH:
case BPF_MAP_TYPE_ARRAY:
break;
default:
return;
}
res = libbpf_probe_bpf_map_type(map_type, NULL);
res = probe_map_type_ifindex(map_type, ifindex);
} else {
res = libbpf_probe_bpf_map_type(map_type, NULL);
}
/* Probe result depends on the success of map creation, no additional
* check required for unprivileged users
@ -559,6 +660,33 @@ probe_map_type(enum bpf_map_type map_type, const char *define_prefix,
define_prefix);
}
static bool
probe_helper_ifindex(enum bpf_func_id id, enum bpf_prog_type prog_type,
__u32 ifindex)
{
struct bpf_insn insns[2] = {
BPF_EMIT_CALL(id),
BPF_EXIT_INSN()
};
char buf[4096] = {};
bool res;
probe_prog_load_ifindex(prog_type, insns, ARRAY_SIZE(insns), buf,
sizeof(buf), ifindex);
res = !grep(buf, "invalid func ") && !grep(buf, "unknown func ");
switch (get_vendor_id(ifindex)) {
case 0x19ee: /* Netronome specific */
res = res && !grep(buf, "not supported by FW") &&
!grep(buf, "unsupported function id");
break;
default:
break;
}
return res;
}
static void
probe_helper_for_progtype(enum bpf_prog_type prog_type, bool supported_type,
const char *define_prefix, unsigned int id,
@ -567,12 +695,10 @@ probe_helper_for_progtype(enum bpf_prog_type prog_type, bool supported_type,
bool res = false;
if (supported_type) {
if (ifindex) {
p_info("BPF offload feature probing is not supported");
return;
}
res = libbpf_probe_bpf_helper(prog_type, id, NULL);
if (ifindex)
res = probe_helper_ifindex(id, prog_type, ifindex);
else
res = libbpf_probe_bpf_helper(prog_type, id, NULL);
#ifdef USE_LIBCAP
/* Probe may succeed even if program load fails, for
* unprivileged users check that we did not fail because of