Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf

Daniel Borkmann says:

====================
pull-request: bpf 2020-08-15

The following pull-request contains BPF updates for your *net* tree.

We've added 23 non-merge commits during the last 4 day(s) which contain
a total of 32 files changed, 421 insertions(+), 141 deletions(-).

The main changes are:

1) Fix sock_ops ctx access splat due to register override, from John Fastabend.

2) Batch of various fixes to libbpf, bpftool, and selftests when testing build
   in 32-bit mode, from Andrii Nakryiko.

3) Fix vmlinux.h generation on ARM by mapping GCC built-in types (__Poly*_t)
   to equivalent ones clang can work with, from Jean-Philippe Brucker.

4) Fix build_id lookup in bpf_get_stackid() helper by walking all NOTE ELF
   sections instead of just first, from Jiri Olsa.

5) Avoid use of __builtin_offsetof() in libbpf for CO-RE, from Yonghong Song.

6) Fix segfault in test_mmap due to inconsistent length params, from Jianlin Lv.

7) Don't override errno in libbpf when logging errors, from Toke Høiland-Jørgensen.

8) Fix v4_to_v6 sockaddr conversion in sk_lookup test, from Stanislav Fomichev.

9) Add link to bpf-helpers(7) man page to BPF doc, from Joe Stringer.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2020-08-14 17:12:23 -07:00
commit 10a3b7c1c3
32 changed files with 423 additions and 143 deletions

View File

@ -36,6 +36,12 @@ Two sets of Questions and Answers (Q&A) are maintained.
bpf_devel_QA
Helper functions
================
* `bpf-helpers(7)`_ maintains a list of helpers available to eBPF programs.
Program types
=============
@ -79,4 +85,5 @@ Other
.. _networking-filter: ../networking/filter.rst
.. _man-pages: https://www.kernel.org/doc/man-pages/
.. _bpf(2): https://man7.org/linux/man-pages/man2/bpf.2.html
.. _bpf-helpers(7): https://man7.org/linux/man-pages/man7/bpf-helpers.7.html
.. _BPF and XDP Reference Guide: https://docs.cilium.io/en/latest/bpf/

View File

@ -213,11 +213,13 @@ static int stack_map_get_build_id_32(void *page_addr,
phdr = (Elf32_Phdr *)(page_addr + sizeof(Elf32_Ehdr));
for (i = 0; i < ehdr->e_phnum; ++i)
if (phdr[i].p_type == PT_NOTE)
return stack_map_parse_build_id(page_addr, build_id,
page_addr + phdr[i].p_offset,
phdr[i].p_filesz);
for (i = 0; i < ehdr->e_phnum; ++i) {
if (phdr[i].p_type == PT_NOTE &&
!stack_map_parse_build_id(page_addr, build_id,
page_addr + phdr[i].p_offset,
phdr[i].p_filesz))
return 0;
}
return -EINVAL;
}
@ -236,11 +238,13 @@ static int stack_map_get_build_id_64(void *page_addr,
phdr = (Elf64_Phdr *)(page_addr + sizeof(Elf64_Ehdr));
for (i = 0; i < ehdr->e_phnum; ++i)
if (phdr[i].p_type == PT_NOTE)
return stack_map_parse_build_id(page_addr, build_id,
page_addr + phdr[i].p_offset,
phdr[i].p_filesz);
for (i = 0; i < ehdr->e_phnum; ++i) {
if (phdr[i].p_type == PT_NOTE &&
!stack_map_parse_build_id(page_addr, build_id,
page_addr + phdr[i].p_offset,
phdr[i].p_filesz))
return 0;
}
return -EINVAL;
}

View File

@ -8913,10 +8913,6 @@ static int dev_xdp_attach(struct net_device *dev, struct netlink_ext_ack *extack
NL_SET_ERR_MSG(extack, "Active program does not match expected");
return -EEXIST;
}
if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) && cur_prog) {
NL_SET_ERR_MSG(extack, "XDP program already attached");
return -EBUSY;
}
/* put effective new program into new_prog */
if (link)
@ -8927,6 +8923,10 @@ static int dev_xdp_attach(struct net_device *dev, struct netlink_ext_ack *extack
enum bpf_xdp_mode other_mode = mode == XDP_MODE_SKB
? XDP_MODE_DRV : XDP_MODE_SKB;
if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) && cur_prog) {
NL_SET_ERR_MSG(extack, "XDP program already attached");
return -EBUSY;
}
if (!offload && dev_xdp_prog(dev, other_mode)) {
NL_SET_ERR_MSG(extack, "Native and generic XDP can't be active at the same time");
return -EEXIST;

View File

@ -8317,15 +8317,31 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
/* Helper macro for adding read access to tcp_sock or sock fields. */
#define SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ) \
do { \
int fullsock_reg = si->dst_reg, reg = BPF_REG_9, jmp = 2; \
BUILD_BUG_ON(sizeof_field(OBJ, OBJ_FIELD) > \
sizeof_field(struct bpf_sock_ops, BPF_FIELD)); \
if (si->dst_reg == reg || si->src_reg == reg) \
reg--; \
if (si->dst_reg == reg || si->src_reg == reg) \
reg--; \
if (si->dst_reg == si->src_reg) { \
*insn++ = BPF_STX_MEM(BPF_DW, si->src_reg, reg, \
offsetof(struct bpf_sock_ops_kern, \
temp)); \
fullsock_reg = reg; \
jmp += 2; \
} \
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
struct bpf_sock_ops_kern, \
is_fullsock), \
si->dst_reg, si->src_reg, \
fullsock_reg, si->src_reg, \
offsetof(struct bpf_sock_ops_kern, \
is_fullsock)); \
*insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 2); \
*insn++ = BPF_JMP_IMM(BPF_JEQ, fullsock_reg, 0, jmp); \
if (si->dst_reg == si->src_reg) \
*insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg, \
offsetof(struct bpf_sock_ops_kern, \
temp)); \
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
struct bpf_sock_ops_kern, sk),\
si->dst_reg, si->src_reg, \
@ -8334,6 +8350,49 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
OBJ_FIELD), \
si->dst_reg, si->dst_reg, \
offsetof(OBJ, OBJ_FIELD)); \
if (si->dst_reg == si->src_reg) { \
*insn++ = BPF_JMP_A(1); \
*insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg, \
offsetof(struct bpf_sock_ops_kern, \
temp)); \
} \
} while (0)
#define SOCK_OPS_GET_SK() \
do { \
int fullsock_reg = si->dst_reg, reg = BPF_REG_9, jmp = 1; \
if (si->dst_reg == reg || si->src_reg == reg) \
reg--; \
if (si->dst_reg == reg || si->src_reg == reg) \
reg--; \
if (si->dst_reg == si->src_reg) { \
*insn++ = BPF_STX_MEM(BPF_DW, si->src_reg, reg, \
offsetof(struct bpf_sock_ops_kern, \
temp)); \
fullsock_reg = reg; \
jmp += 2; \
} \
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
struct bpf_sock_ops_kern, \
is_fullsock), \
fullsock_reg, si->src_reg, \
offsetof(struct bpf_sock_ops_kern, \
is_fullsock)); \
*insn++ = BPF_JMP_IMM(BPF_JEQ, fullsock_reg, 0, jmp); \
if (si->dst_reg == si->src_reg) \
*insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg, \
offsetof(struct bpf_sock_ops_kern, \
temp)); \
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
struct bpf_sock_ops_kern, sk),\
si->dst_reg, si->src_reg, \
offsetof(struct bpf_sock_ops_kern, sk));\
if (si->dst_reg == si->src_reg) { \
*insn++ = BPF_JMP_A(1); \
*insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg, \
offsetof(struct bpf_sock_ops_kern, \
temp)); \
} \
} while (0)
#define SOCK_OPS_GET_TCP_SOCK_FIELD(FIELD) \
@ -8620,17 +8679,7 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
SOCK_OPS_GET_TCP_SOCK_FIELD(bytes_acked);
break;
case offsetof(struct bpf_sock_ops, sk):
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
struct bpf_sock_ops_kern,
is_fullsock),
si->dst_reg, si->src_reg,
offsetof(struct bpf_sock_ops_kern,
is_fullsock));
*insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1);
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
struct bpf_sock_ops_kern, sk),
si->dst_reg, si->src_reg,
offsetof(struct bpf_sock_ops_kern, sk));
SOCK_OPS_GET_SK();
break;
}
return insn - insn_buf;

View File

@ -67,7 +67,7 @@ static int dump_prog_id_as_func_ptr(const struct btf_dumper *d,
if (!info->btf_id || !info->nr_func_info ||
btf__get_from_id(info->btf_id, &prog_btf))
goto print;
finfo = (struct bpf_func_info *)info->func_info;
finfo = u64_to_ptr(info->func_info);
func_type = btf__type_by_id(prog_btf, finfo->type_id);
if (!func_type || !btf_is_func(func_type))
goto print;

View File

@ -143,6 +143,20 @@ static int codegen_datasec_def(struct bpf_object *obj,
var_name, align);
return -EINVAL;
}
/* Assume 32-bit architectures when generating data section
* struct memory layout. Given bpftool can't know which target
* host architecture it's emitting skeleton for, we need to be
* conservative and assume 32-bit one to ensure enough padding
* bytes are generated for pointer and long types. This will
* still work correctly for 64-bit architectures, because in
* the worst case we'll generate unnecessary padding field,
* which on 64-bit architectures is not strictly necessary and
* would be handled by natural 8-byte alignment. But it still
* will be a correct memory layout, based on recorded offsets
* in BTF.
*/
if (align > 4)
align = 4;
align_off = (off + align - 1) / align * align;
if (align_off != need_off) {
@ -397,7 +411,7 @@ static int do_skeleton(int argc, char **argv)
{ \n\
struct %1$s *obj; \n\
\n\
obj = (typeof(obj))calloc(1, sizeof(*obj)); \n\
obj = (struct %1$s *)calloc(1, sizeof(*obj)); \n\
if (!obj) \n\
return NULL; \n\
if (%1$s__create_skeleton(obj)) \n\
@ -461,7 +475,7 @@ static int do_skeleton(int argc, char **argv)
{ \n\
struct bpf_object_skeleton *s; \n\
\n\
s = (typeof(s))calloc(1, sizeof(*s)); \n\
s = (struct bpf_object_skeleton *)calloc(1, sizeof(*s));\n\
if (!s) \n\
return -1; \n\
obj->skeleton = s; \n\
@ -479,7 +493,7 @@ static int do_skeleton(int argc, char **argv)
/* maps */ \n\
s->map_cnt = %zu; \n\
s->map_skel_sz = sizeof(*s->maps); \n\
s->maps = (typeof(s->maps))calloc(s->map_cnt, s->map_skel_sz);\n\
s->maps = (struct bpf_map_skeleton *)calloc(s->map_cnt, s->map_skel_sz);\n\
if (!s->maps) \n\
goto err; \n\
",
@ -515,7 +529,7 @@ static int do_skeleton(int argc, char **argv)
/* programs */ \n\
s->prog_cnt = %zu; \n\
s->prog_skel_sz = sizeof(*s->progs); \n\
s->progs = (typeof(s->progs))calloc(s->prog_cnt, s->prog_skel_sz);\n\
s->progs = (struct bpf_prog_skeleton *)calloc(s->prog_cnt, s->prog_skel_sz);\n\
if (!s->progs) \n\
goto err; \n\
",

View File

@ -106,7 +106,7 @@ static int show_link_close_json(int fd, struct bpf_link_info *info)
switch (info->type) {
case BPF_LINK_TYPE_RAW_TRACEPOINT:
jsonw_string_field(json_wtr, "tp_name",
(const char *)info->raw_tracepoint.tp_name);
u64_to_ptr(info->raw_tracepoint.tp_name));
break;
case BPF_LINK_TYPE_TRACING:
err = get_prog_info(info->prog_id, &prog_info);
@ -185,7 +185,7 @@ static int show_link_close_plain(int fd, struct bpf_link_info *info)
switch (info->type) {
case BPF_LINK_TYPE_RAW_TRACEPOINT:
printf("\n\ttp '%s' ",
(const char *)info->raw_tracepoint.tp_name);
(const char *)u64_to_ptr(info->raw_tracepoint.tp_name));
break;
case BPF_LINK_TYPE_TRACING:
err = get_prog_info(info->prog_id, &prog_info);

View File

@ -21,7 +21,15 @@
/* Make sure we do not use kernel-only integer typedefs */
#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
#define ptr_to_u64(ptr) ((__u64)(unsigned long)(ptr))
static inline __u64 ptr_to_u64(const void *ptr)
{
return (__u64)(unsigned long)ptr;
}
static inline void *u64_to_ptr(__u64 ptr)
{
return (void *)(unsigned long)ptr;
}
#define NEXT_ARG() ({ argc--; argv++; if (argc < 0) usage(); })
#define NEXT_ARGP() ({ (*argc)--; (*argv)++; if (*argc < 0) usage(); })

View File

@ -428,14 +428,14 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode,
p_info("no instructions returned");
return -1;
}
buf = (unsigned char *)(info->jited_prog_insns);
buf = u64_to_ptr(info->jited_prog_insns);
member_len = info->jited_prog_len;
} else { /* DUMP_XLATED */
if (info->xlated_prog_len == 0 || !info->xlated_prog_insns) {
p_err("error retrieving insn dump: kernel.kptr_restrict set?");
return -1;
}
buf = (unsigned char *)info->xlated_prog_insns;
buf = u64_to_ptr(info->xlated_prog_insns);
member_len = info->xlated_prog_len;
}
@ -444,7 +444,7 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode,
return -1;
}
func_info = (void *)info->func_info;
func_info = u64_to_ptr(info->func_info);
if (info->nr_line_info) {
prog_linfo = bpf_prog_linfo__new(info);
@ -462,7 +462,7 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode,
n = write(fd, buf, member_len);
close(fd);
if (n != member_len) {
if (n != (ssize_t)member_len) {
p_err("error writing output file: %s",
n < 0 ? strerror(errno) : "short write");
return -1;
@ -492,13 +492,13 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode,
__u32 i;
if (info->nr_jited_ksyms) {
kernel_syms_load(&dd);
ksyms = (__u64 *) info->jited_ksyms;
ksyms = u64_to_ptr(info->jited_ksyms);
}
if (json_output)
jsonw_start_array(json_wtr);
lens = (__u32 *) info->jited_func_lens;
lens = u64_to_ptr(info->jited_func_lens);
for (i = 0; i < info->nr_jited_func_lens; i++) {
if (ksyms) {
sym = kernel_syms_search(&dd, ksyms[i]);
@ -559,7 +559,7 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode,
} else {
kernel_syms_load(&dd);
dd.nr_jited_ksyms = info->nr_jited_ksyms;
dd.jited_ksyms = (__u64 *) info->jited_ksyms;
dd.jited_ksyms = u64_to_ptr(info->jited_ksyms);
dd.btf = btf;
dd.func_info = func_info;
dd.finfo_rec_size = info->func_info_rec_size;
@ -1681,7 +1681,7 @@ static char *profile_target_name(int tgt_fd)
goto out;
}
func_info = (struct bpf_func_info *)(info_linear->info.func_info);
func_info = u64_to_ptr(info_linear->info.func_info);
t = btf__type_by_id(btf, func_info[0].type_id);
if (!t) {
p_err("btf %d doesn't have type %d",

View File

@ -40,7 +40,7 @@
* Helper macro to manipulate data structures
*/
#ifndef offsetof
#define offsetof(TYPE, MEMBER) __builtin_offsetof(TYPE, MEMBER)
#define offsetof(TYPE, MEMBER) ((unsigned long)&((TYPE *)0)->MEMBER)
#endif
#ifndef container_of
#define container_of(ptr, type, member) \

View File

@ -41,6 +41,7 @@ struct btf {
__u32 types_size;
__u32 data_size;
int fd;
int ptr_sz;
};
static inline __u64 ptr_to_u64(const void *ptr)
@ -221,6 +222,70 @@ const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 type_id)
return btf->types[type_id];
}
static int determine_ptr_size(const struct btf *btf)
{
const struct btf_type *t;
const char *name;
int i;
for (i = 1; i <= btf->nr_types; i++) {
t = btf__type_by_id(btf, i);
if (!btf_is_int(t))
continue;
name = btf__name_by_offset(btf, t->name_off);
if (!name)
continue;
if (strcmp(name, "long int") == 0 ||
strcmp(name, "long unsigned int") == 0) {
if (t->size != 4 && t->size != 8)
continue;
return t->size;
}
}
return -1;
}
static size_t btf_ptr_sz(const struct btf *btf)
{
if (!btf->ptr_sz)
((struct btf *)btf)->ptr_sz = determine_ptr_size(btf);
return btf->ptr_sz < 0 ? sizeof(void *) : btf->ptr_sz;
}
/* Return pointer size this BTF instance assumes. The size is heuristically
* determined by looking for 'long' or 'unsigned long' integer type and
* recording its size in bytes. If BTF type information doesn't have any such
* type, this function returns 0. In the latter case, native architecture's
* pointer size is assumed, so will be either 4 or 8, depending on
* architecture that libbpf was compiled for. It's possible to override
* guessed value by using btf__set_pointer_size() API.
*/
size_t btf__pointer_size(const struct btf *btf)
{
if (!btf->ptr_sz)
((struct btf *)btf)->ptr_sz = determine_ptr_size(btf);
if (btf->ptr_sz < 0)
/* not enough BTF type info to guess */
return 0;
return btf->ptr_sz;
}
/* Override or set pointer size in bytes. Only values of 4 and 8 are
* supported.
*/
int btf__set_pointer_size(struct btf *btf, size_t ptr_sz)
{
if (ptr_sz != 4 && ptr_sz != 8)
return -EINVAL;
btf->ptr_sz = ptr_sz;
return 0;
}
static bool btf_type_is_void(const struct btf_type *t)
{
return t == &btf_void || btf_is_fwd(t);
@ -253,7 +318,7 @@ __s64 btf__resolve_size(const struct btf *btf, __u32 type_id)
size = t->size;
goto done;
case BTF_KIND_PTR:
size = sizeof(void *);
size = btf_ptr_sz(btf);
goto done;
case BTF_KIND_TYPEDEF:
case BTF_KIND_VOLATILE:
@ -293,9 +358,9 @@ int btf__align_of(const struct btf *btf, __u32 id)
switch (kind) {
case BTF_KIND_INT:
case BTF_KIND_ENUM:
return min(sizeof(void *), (size_t)t->size);
return min(btf_ptr_sz(btf), (size_t)t->size);
case BTF_KIND_PTR:
return sizeof(void *);
return btf_ptr_sz(btf);
case BTF_KIND_TYPEDEF:
case BTF_KIND_VOLATILE:
case BTF_KIND_CONST:
@ -533,6 +598,18 @@ struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext)
if (IS_ERR(btf))
goto done;
switch (gelf_getclass(elf)) {
case ELFCLASS32:
btf__set_pointer_size(btf, 4);
break;
case ELFCLASS64:
btf__set_pointer_size(btf, 8);
break;
default:
pr_warn("failed to get ELF class (bitness) for %s\n", path);
break;
}
if (btf_ext && btf_ext_data) {
*btf_ext = btf_ext__new(btf_ext_data->d_buf,
btf_ext_data->d_size);

View File

@ -76,6 +76,8 @@ LIBBPF_API __s32 btf__find_by_name_kind(const struct btf *btf,
LIBBPF_API __u32 btf__get_nr_types(const struct btf *btf);
LIBBPF_API const struct btf_type *btf__type_by_id(const struct btf *btf,
__u32 id);
LIBBPF_API size_t btf__pointer_size(const struct btf *btf);
LIBBPF_API int btf__set_pointer_size(struct btf *btf, size_t ptr_sz);
LIBBPF_API __s64 btf__resolve_size(const struct btf *btf, __u32 type_id);
LIBBPF_API int btf__resolve_type(const struct btf *btf, __u32 type_id);
LIBBPF_API int btf__align_of(const struct btf *btf, __u32 id);

View File

@ -13,6 +13,7 @@
#include <errno.h>
#include <linux/err.h>
#include <linux/btf.h>
#include <linux/kernel.h>
#include "btf.h"
#include "hashmap.h"
#include "libbpf.h"
@ -60,6 +61,7 @@ struct btf_dump {
const struct btf_ext *btf_ext;
btf_dump_printf_fn_t printf_fn;
struct btf_dump_opts opts;
int ptr_sz;
bool strip_mods;
/* per-type auxiliary state */
@ -138,6 +140,7 @@ struct btf_dump *btf_dump__new(const struct btf *btf,
d->btf_ext = btf_ext;
d->printf_fn = printf_fn;
d->opts.ctx = opts ? opts->ctx : NULL;
d->ptr_sz = btf__pointer_size(btf) ? : sizeof(void *);
d->type_names = hashmap__new(str_hash_fn, str_equal_fn, NULL);
if (IS_ERR(d->type_names)) {
@ -549,6 +552,9 @@ static int btf_dump_order_type(struct btf_dump *d, __u32 id, bool through_ptr)
}
}
static void btf_dump_emit_missing_aliases(struct btf_dump *d, __u32 id,
const struct btf_type *t);
static void btf_dump_emit_struct_fwd(struct btf_dump *d, __u32 id,
const struct btf_type *t);
static void btf_dump_emit_struct_def(struct btf_dump *d, __u32 id,
@ -671,6 +677,9 @@ static void btf_dump_emit_type(struct btf_dump *d, __u32 id, __u32 cont_id)
switch (kind) {
case BTF_KIND_INT:
/* Emit type alias definitions if necessary */
btf_dump_emit_missing_aliases(d, id, t);
tstate->emit_state = EMITTED;
break;
case BTF_KIND_ENUM:
@ -797,7 +806,7 @@ static void btf_dump_emit_bit_padding(const struct btf_dump *d,
int align, int lvl)
{
int off_diff = m_off - cur_off;
int ptr_bits = sizeof(void *) * 8;
int ptr_bits = d->ptr_sz * 8;
if (off_diff <= 0)
/* no gap */
@ -870,7 +879,7 @@ static void btf_dump_emit_struct_def(struct btf_dump *d,
btf_dump_printf(d, ": %d", m_sz);
off = m_off + m_sz;
} else {
m_sz = max(0, btf__resolve_size(d->btf, m->type));
m_sz = max(0LL, btf__resolve_size(d->btf, m->type));
off = m_off + m_sz * 8;
}
btf_dump_printf(d, ";");
@ -890,6 +899,32 @@ static void btf_dump_emit_struct_def(struct btf_dump *d,
btf_dump_printf(d, " __attribute__((packed))");
}
static const char *missing_base_types[][2] = {
/*
* GCC emits typedefs to its internal __PolyX_t types when compiling Arm
* SIMD intrinsics. Alias them to standard base types.
*/
{ "__Poly8_t", "unsigned char" },
{ "__Poly16_t", "unsigned short" },
{ "__Poly64_t", "unsigned long long" },
{ "__Poly128_t", "unsigned __int128" },
};
static void btf_dump_emit_missing_aliases(struct btf_dump *d, __u32 id,
const struct btf_type *t)
{
const char *name = btf_dump_type_name(d, id);
int i;
for (i = 0; i < ARRAY_SIZE(missing_base_types); i++) {
if (strcmp(name, missing_base_types[i][0]) == 0) {
btf_dump_printf(d, "typedef %s %s;\n\n",
missing_base_types[i][1], name);
break;
}
}
}
static void btf_dump_emit_enum_fwd(struct btf_dump *d, __u32 id,
const struct btf_type *t)
{

View File

@ -2434,6 +2434,8 @@ static int bpf_object__init_btf(struct bpf_object *obj,
BTF_ELF_SEC, err);
goto out;
}
/* enforce 8-byte pointers for BPF-targeted BTFs */
btf__set_pointer_size(obj->btf, 8);
err = 0;
}
if (btf_ext_data) {
@ -2542,6 +2544,8 @@ static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
if (IS_ERR(kern_btf))
return PTR_ERR(kern_btf);
/* enforce 8-byte pointers for BPF-targeted BTFs */
btf__set_pointer_size(obj->btf, 8);
bpf_object__sanitize_btf(obj, kern_btf);
}
@ -3478,10 +3482,11 @@ bpf_object__probe_global_data(struct bpf_object *obj)
map = bpf_create_map_xattr(&map_attr);
if (map < 0) {
cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
ret = -errno;
cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
__func__, cp, errno);
return -errno;
__func__, cp, -ret);
return ret;
}
insns[0].imm = map;
@ -5194,7 +5199,8 @@ static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
static int bpf_object__collect_map_relos(struct bpf_object *obj,
GElf_Shdr *shdr, Elf_Data *data)
{
int i, j, nrels, new_sz, ptr_sz = sizeof(void *);
const int bpf_ptr_sz = 8, host_ptr_sz = sizeof(void *);
int i, j, nrels, new_sz;
const struct btf_var_secinfo *vi = NULL;
const struct btf_type *sec, *var, *def;
const struct btf_member *member;
@ -5243,7 +5249,7 @@ static int bpf_object__collect_map_relos(struct bpf_object *obj,
vi = btf_var_secinfos(sec) + map->btf_var_idx;
if (vi->offset <= rel.r_offset &&
rel.r_offset + sizeof(void *) <= vi->offset + vi->size)
rel.r_offset + bpf_ptr_sz <= vi->offset + vi->size)
break;
}
if (j == obj->nr_maps) {
@ -5279,17 +5285,20 @@ static int bpf_object__collect_map_relos(struct bpf_object *obj,
return -EINVAL;
moff = rel.r_offset - vi->offset - moff;
if (moff % ptr_sz)
/* here we use BPF pointer size, which is always 64 bit, as we
* are parsing ELF that was built for BPF target
*/
if (moff % bpf_ptr_sz)
return -EINVAL;
moff /= ptr_sz;
moff /= bpf_ptr_sz;
if (moff >= map->init_slots_sz) {
new_sz = moff + 1;
tmp = realloc(map->init_slots, new_sz * ptr_sz);
tmp = realloc(map->init_slots, new_sz * host_ptr_sz);
if (!tmp)
return -ENOMEM;
map->init_slots = tmp;
memset(map->init_slots + map->init_slots_sz, 0,
(new_sz - map->init_slots_sz) * ptr_sz);
(new_sz - map->init_slots_sz) * host_ptr_sz);
map->init_slots_sz = new_sz;
}
map->init_slots[moff] = targ_map;
@ -6012,9 +6021,10 @@ int bpf_program__pin_instance(struct bpf_program *prog, const char *path,
}
if (bpf_obj_pin(prog->instances.fds[instance], path)) {
cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
err = -errno;
cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
pr_warn("failed to pin program: %s\n", cp);
return -errno;
return err;
}
pr_debug("pinned program '%s'\n", path);

View File

@ -295,5 +295,7 @@ LIBBPF_0.1.0 {
bpf_program__set_sk_lookup;
btf__parse;
btf__parse_raw;
btf__pointer_size;
btf__set_fd;
btf__set_pointer_size;
} LIBBPF_0.0.9;

View File

@ -159,15 +159,15 @@ void test_bpf_obj_id(void)
/* Check getting link info */
info_len = sizeof(struct bpf_link_info) * 2;
bzero(&link_infos[i], info_len);
link_infos[i].raw_tracepoint.tp_name = (__u64)&tp_name;
link_infos[i].raw_tracepoint.tp_name = ptr_to_u64(&tp_name);
link_infos[i].raw_tracepoint.tp_name_len = sizeof(tp_name);
err = bpf_obj_get_info_by_fd(bpf_link__fd(links[i]),
&link_infos[i], &info_len);
if (CHECK(err ||
link_infos[i].type != BPF_LINK_TYPE_RAW_TRACEPOINT ||
link_infos[i].prog_id != prog_infos[i].id ||
link_infos[i].raw_tracepoint.tp_name != (__u64)&tp_name ||
strcmp((char *)link_infos[i].raw_tracepoint.tp_name,
link_infos[i].raw_tracepoint.tp_name != ptr_to_u64(&tp_name) ||
strcmp(u64_to_ptr(link_infos[i].raw_tracepoint.tp_name),
"sys_enter") ||
info_len != sizeof(struct bpf_link_info),
"get-link-info(fd)",
@ -178,7 +178,7 @@ void test_bpf_obj_id(void)
link_infos[i].type, BPF_LINK_TYPE_RAW_TRACEPOINT,
link_infos[i].id,
link_infos[i].prog_id, prog_infos[i].id,
(char *)link_infos[i].raw_tracepoint.tp_name,
(const char *)u64_to_ptr(link_infos[i].raw_tracepoint.tp_name),
"sys_enter"))
goto done;

View File

@ -12,15 +12,16 @@ void btf_dump_printf(void *ctx, const char *fmt, va_list args)
static struct btf_dump_test_case {
const char *name;
const char *file;
bool known_ptr_sz;
struct btf_dump_opts opts;
} btf_dump_test_cases[] = {
{"btf_dump: syntax", "btf_dump_test_case_syntax", {}},
{"btf_dump: ordering", "btf_dump_test_case_ordering", {}},
{"btf_dump: padding", "btf_dump_test_case_padding", {}},
{"btf_dump: packing", "btf_dump_test_case_packing", {}},
{"btf_dump: bitfields", "btf_dump_test_case_bitfields", {}},
{"btf_dump: multidim", "btf_dump_test_case_multidim", {}},
{"btf_dump: namespacing", "btf_dump_test_case_namespacing", {}},
{"btf_dump: syntax", "btf_dump_test_case_syntax", true, {}},
{"btf_dump: ordering", "btf_dump_test_case_ordering", false, {}},
{"btf_dump: padding", "btf_dump_test_case_padding", true, {}},
{"btf_dump: packing", "btf_dump_test_case_packing", true, {}},
{"btf_dump: bitfields", "btf_dump_test_case_bitfields", true, {}},
{"btf_dump: multidim", "btf_dump_test_case_multidim", false, {}},
{"btf_dump: namespacing", "btf_dump_test_case_namespacing", false, {}},
};
static int btf_dump_all_types(const struct btf *btf,
@ -62,6 +63,18 @@ static int test_btf_dump_case(int n, struct btf_dump_test_case *t)
goto done;
}
/* tests with t->known_ptr_sz have no "long" or "unsigned long" type,
* so it's impossible to determine correct pointer size; but if they
* do, it should be 8 regardless of host architecture, becaues BPF
* target is always 64-bit
*/
if (!t->known_ptr_sz) {
btf__set_pointer_size(btf, 8);
} else {
CHECK(btf__pointer_size(btf) != 8, "ptr_sz", "exp %d, got %zu\n",
8, btf__pointer_size(btf));
}
snprintf(out_file, sizeof(out_file), "/tmp/%s.output.XXXXXX", t->file);
fd = mkstemp(out_file);
if (CHECK(fd < 0, "create_tmp", "failed to create file: %d\n", fd)) {

View File

@ -159,8 +159,8 @@ void test_core_extern(void)
exp = (uint64_t *)&t->data;
for (j = 0; j < n; j++) {
CHECK(got[j] != exp[j], "check_res",
"result #%d: expected %lx, but got %lx\n",
j, exp[j], got[j]);
"result #%d: expected %llx, but got %llx\n",
j, (__u64)exp[j], (__u64)got[j]);
}
cleanup:
test_core_extern__destroy(skel);

View File

@ -237,8 +237,8 @@
.union_sz = sizeof(((type *)0)->union_field), \
.arr_sz = sizeof(((type *)0)->arr_field), \
.arr_elem_sz = sizeof(((type *)0)->arr_field[0]), \
.ptr_sz = sizeof(((type *)0)->ptr_field), \
.enum_sz = sizeof(((type *)0)->enum_field), \
.ptr_sz = 8, /* always 8-byte pointer for BPF */ \
.enum_sz = sizeof(((type *)0)->enum_field), \
}
#define SIZE_CASE(name) { \
@ -432,20 +432,20 @@ static struct core_reloc_test_case test_cases[] = {
.sb4 = -1,
.sb20 = -0x17654321,
.u32 = 0xBEEF,
.s32 = -0x3FEDCBA987654321,
.s32 = -0x3FEDCBA987654321LL,
}),
BITFIELDS_CASE(bitfields___bitfield_vs_int, {
.ub1 = 0xFEDCBA9876543210,
.ub1 = 0xFEDCBA9876543210LL,
.ub2 = 0xA6,
.ub7 = -0x7EDCBA987654321,
.sb4 = -0x6123456789ABCDE,
.sb20 = 0xD00D,
.ub7 = -0x7EDCBA987654321LL,
.sb4 = -0x6123456789ABCDELL,
.sb20 = 0xD00DLL,
.u32 = -0x76543,
.s32 = 0x0ADEADBEEFBADB0B,
.s32 = 0x0ADEADBEEFBADB0BLL,
}),
BITFIELDS_CASE(bitfields___just_big_enough, {
.ub1 = 0xF,
.ub2 = 0x0812345678FEDCBA,
.ub1 = 0xFLL,
.ub2 = 0x0812345678FEDCBALL,
}),
BITFIELDS_ERR_CASE(bitfields___err_too_big_bitfield),

View File

@ -16,7 +16,7 @@ static void test_fexit_bpf2bpf_common(const char *obj_file,
__u32 duration = 0, retval;
struct bpf_map *data_map;
const int zero = 0;
u64 *result = NULL;
__u64 *result = NULL;
err = bpf_prog_load(target_obj_file, BPF_PROG_TYPE_UNSPEC,
&pkt_obj, &pkt_fd);
@ -29,7 +29,7 @@ static void test_fexit_bpf2bpf_common(const char *obj_file,
link = calloc(sizeof(struct bpf_link *), prog_cnt);
prog = calloc(sizeof(struct bpf_program *), prog_cnt);
result = malloc((prog_cnt + 32 /* spare */) * sizeof(u64));
result = malloc((prog_cnt + 32 /* spare */) * sizeof(__u64));
if (CHECK(!link || !prog || !result, "alloc_memory",
"failed to alloc memory"))
goto close_prog;
@ -72,7 +72,7 @@ static void test_fexit_bpf2bpf_common(const char *obj_file,
goto close_prog;
for (i = 0; i < prog_cnt; i++)
if (CHECK(result[i] != 1, "result", "fexit_bpf2bpf failed err %ld\n",
if (CHECK(result[i] != 1, "result", "fexit_bpf2bpf failed err %llu\n",
result[i]))
goto close_prog;

View File

@ -591,7 +591,7 @@ void test_flow_dissector(void)
CHECK_ATTR(tattr.data_size_out != sizeof(flow_keys) ||
err || tattr.retval != 1,
tests[i].name,
"err %d errno %d retval %d duration %d size %u/%lu\n",
"err %d errno %d retval %d duration %d size %u/%zu\n",
err, errno, tattr.retval, tattr.duration,
tattr.data_size_out, sizeof(flow_keys));
CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys);

View File

@ -5,7 +5,7 @@
static void test_global_data_number(struct bpf_object *obj, __u32 duration)
{
int i, err, map_fd;
uint64_t num;
__u64 num;
map_fd = bpf_find_map(__func__, obj, "result_number");
if (CHECK_FAIL(map_fd < 0))
@ -14,7 +14,7 @@ static void test_global_data_number(struct bpf_object *obj, __u32 duration)
struct {
char *name;
uint32_t key;
uint64_t num;
__u64 num;
} tests[] = {
{ "relocate .bss reference", 0, 0 },
{ "relocate .data reference", 1, 42 },
@ -32,7 +32,7 @@ static void test_global_data_number(struct bpf_object *obj, __u32 duration)
for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) {
err = bpf_map_lookup_elem(map_fd, &tests[i].key, &num);
CHECK(err || num != tests[i].num, tests[i].name,
"err %d result %lx expected %lx\n",
"err %d result %llx expected %llx\n",
err, num, tests[i].num);
}
}

View File

@ -21,7 +21,7 @@ void test_mmap(void)
const long page_size = sysconf(_SC_PAGE_SIZE);
int err, duration = 0, i, data_map_fd, data_map_id, tmp_fd, rdmap_fd;
struct bpf_map *data_map, *bss_map;
void *bss_mmaped = NULL, *map_mmaped = NULL, *tmp1, *tmp2;
void *bss_mmaped = NULL, *map_mmaped = NULL, *tmp0, *tmp1, *tmp2;
struct test_mmap__bss *bss_data;
struct bpf_map_info map_info;
__u32 map_info_sz = sizeof(map_info);
@ -183,16 +183,23 @@ void test_mmap(void)
/* check some more advanced mmap() manipulations */
/* map all but last page: pages 1-3 mapped */
tmp1 = mmap(NULL, 3 * page_size, PROT_READ, MAP_SHARED,
data_map_fd, 0);
if (CHECK(tmp1 == MAP_FAILED, "adv_mmap1", "errno %d\n", errno))
tmp0 = mmap(NULL, 4 * page_size, PROT_READ, MAP_SHARED | MAP_ANONYMOUS,
-1, 0);
if (CHECK(tmp0 == MAP_FAILED, "adv_mmap0", "errno %d\n", errno))
goto cleanup;
/* map all but last page: pages 1-3 mapped */
tmp1 = mmap(tmp0, 3 * page_size, PROT_READ, MAP_SHARED | MAP_FIXED,
data_map_fd, 0);
if (CHECK(tmp0 != tmp1, "adv_mmap1", "tmp0: %p, tmp1: %p\n", tmp0, tmp1)) {
munmap(tmp0, 4 * page_size);
goto cleanup;
}
/* unmap second page: pages 1, 3 mapped */
err = munmap(tmp1 + page_size, page_size);
if (CHECK(err, "adv_mmap2", "errno %d\n", errno)) {
munmap(tmp1, map_sz);
munmap(tmp1, 4 * page_size);
goto cleanup;
}
@ -201,7 +208,7 @@ void test_mmap(void)
MAP_SHARED | MAP_FIXED, data_map_fd, 0);
if (CHECK(tmp2 == MAP_FAILED, "adv_mmap3", "errno %d\n", errno)) {
munmap(tmp1, page_size);
munmap(tmp1 + 2*page_size, page_size);
munmap(tmp1 + 2*page_size, 2 * page_size);
goto cleanup;
}
CHECK(tmp1 + page_size != tmp2, "adv_mmap4",
@ -211,7 +218,7 @@ void test_mmap(void)
tmp2 = mmap(tmp1, 4 * page_size, PROT_READ, MAP_SHARED | MAP_FIXED,
data_map_fd, 0);
if (CHECK(tmp2 == MAP_FAILED, "adv_mmap5", "errno %d\n", errno)) {
munmap(tmp1, 3 * page_size); /* unmap page 1 */
munmap(tmp1, 4 * page_size); /* unmap page 1 */
goto cleanup;
}
CHECK(tmp1 != tmp2, "adv_mmap6", "tmp1: %p, tmp2: %p\n", tmp1, tmp2);

View File

@ -28,7 +28,7 @@ void test_prog_run_xattr(void)
"err %d errno %d retval %d\n", err, errno, tattr.retval);
CHECK_ATTR(tattr.data_size_out != sizeof(pkt_v4), "data_size_out",
"incorrect output size, want %lu have %u\n",
"incorrect output size, want %zu have %u\n",
sizeof(pkt_v4), tattr.data_size_out);
CHECK_ATTR(buf[5] != 0, "overflow",

View File

@ -309,6 +309,7 @@ static void v4_to_v6(struct sockaddr_storage *ss)
v6->sin6_addr.s6_addr[10] = 0xff;
v6->sin6_addr.s6_addr[11] = 0xff;
memcpy(&v6->sin6_addr.s6_addr[12], &v4.sin_addr.s_addr, 4);
memset(&v6->sin6_addr.s6_addr[0], 0, 10);
}
static int udp_recv_send(int server_fd)

View File

@ -81,7 +81,7 @@ void test_skb_ctx(void)
CHECK_ATTR(tattr.ctx_size_out != sizeof(skb),
"ctx_size_out",
"incorrect output size, want %lu have %u\n",
"incorrect output size, want %zu have %u\n",
sizeof(skb), tattr.ctx_size_out);
for (i = 0; i < 5; i++)

View File

@ -44,25 +44,25 @@ void test_varlen(void)
CHECK_VAL(bss->payload1_len2, size2);
CHECK_VAL(bss->total1, size1 + size2);
CHECK(memcmp(bss->payload1, exp_str, size1 + size2), "content_check",
"doesn't match!");
"doesn't match!\n");
CHECK_VAL(data->payload2_len1, size1);
CHECK_VAL(data->payload2_len2, size2);
CHECK_VAL(data->total2, size1 + size2);
CHECK(memcmp(data->payload2, exp_str, size1 + size2), "content_check",
"doesn't match!");
"doesn't match!\n");
CHECK_VAL(data->payload3_len1, size1);
CHECK_VAL(data->payload3_len2, size2);
CHECK_VAL(data->total3, size1 + size2);
CHECK(memcmp(data->payload3, exp_str, size1 + size2), "content_check",
"doesn't match!");
"doesn't match!\n");
CHECK_VAL(data->payload4_len1, size1);
CHECK_VAL(data->payload4_len2, size2);
CHECK_VAL(data->total4, size1 + size2);
CHECK(memcmp(data->payload4, exp_str, size1 + size2), "content_check",
"doesn't match!");
"doesn't match!\n");
cleanup:
test_varlen__destroy(skel);
}

View File

@ -1,5 +1,10 @@
#include <stdint.h>
#include <stdbool.h>
void preserce_ptr_sz_fn(long x) {}
#define __bpf_aligned __attribute__((aligned(8)))
/*
* KERNEL
*/
@ -444,51 +449,51 @@ struct core_reloc_primitives {
char a;
int b;
enum core_reloc_primitives_enum c;
void *d;
int (*f)(const char *);
void *d __bpf_aligned;
int (*f)(const char *) __bpf_aligned;
};
struct core_reloc_primitives___diff_enum_def {
char a;
int b;
void *d;
int (*f)(const char *);
void *d __bpf_aligned;
int (*f)(const char *) __bpf_aligned;
enum {
X = 100,
Y = 200,
} c; /* inline enum def with differing set of values */
} c __bpf_aligned; /* inline enum def with differing set of values */
};
struct core_reloc_primitives___diff_func_proto {
void (*f)(int); /* incompatible function prototype */
void *d;
enum core_reloc_primitives_enum c;
void (*f)(int) __bpf_aligned; /* incompatible function prototype */
void *d __bpf_aligned;
enum core_reloc_primitives_enum c __bpf_aligned;
int b;
char a;
};
struct core_reloc_primitives___diff_ptr_type {
const char * const d; /* different pointee type + modifiers */
char a;
const char * const d __bpf_aligned; /* different pointee type + modifiers */
char a __bpf_aligned;
int b;
enum core_reloc_primitives_enum c;
int (*f)(const char *);
int (*f)(const char *) __bpf_aligned;
};
struct core_reloc_primitives___err_non_enum {
char a[1];
int b;
int c; /* int instead of enum */
void *d;
int (*f)(const char *);
void *d __bpf_aligned;
int (*f)(const char *) __bpf_aligned;
};
struct core_reloc_primitives___err_non_int {
char a[1];
int *b; /* ptr instead of int */
enum core_reloc_primitives_enum c;
void *d;
int (*f)(const char *);
int *b __bpf_aligned; /* ptr instead of int */
enum core_reloc_primitives_enum c __bpf_aligned;
void *d __bpf_aligned;
int (*f)(const char *) __bpf_aligned;
};
struct core_reloc_primitives___err_non_ptr {
@ -496,7 +501,7 @@ struct core_reloc_primitives___err_non_ptr {
int b;
enum core_reloc_primitives_enum c;
int d; /* int instead of ptr */
int (*f)(const char *);
int (*f)(const char *) __bpf_aligned;
};
/*
@ -507,7 +512,7 @@ struct core_reloc_mods_output {
};
typedef const int int_t;
typedef const char *char_ptr_t;
typedef const char *char_ptr_t __bpf_aligned;
typedef const int arr_t[7];
struct core_reloc_mods_substruct {
@ -523,9 +528,9 @@ typedef struct {
struct core_reloc_mods {
int a;
int_t b;
char *c;
char *c __bpf_aligned;
char_ptr_t d;
int e[3];
int e[3] __bpf_aligned;
arr_t f;
struct core_reloc_mods_substruct g;
core_reloc_mods_substruct_t h;
@ -535,9 +540,9 @@ struct core_reloc_mods {
struct core_reloc_mods___mod_swap {
int b;
int_t a;
char *d;
char *d __bpf_aligned;
char_ptr_t c;
int f[3];
int f[3] __bpf_aligned;
arr_t e;
struct {
int y;
@ -555,7 +560,7 @@ typedef arr1_t arr2_t;
typedef arr2_t arr3_t;
typedef arr3_t arr4_t;
typedef const char * const volatile fancy_char_ptr_t;
typedef const char * const volatile fancy_char_ptr_t __bpf_aligned;
typedef core_reloc_mods_substruct_t core_reloc_mods_substruct_tt;
@ -567,7 +572,7 @@ struct core_reloc_mods___typedefs {
arr4_t e;
fancy_char_ptr_t d;
fancy_char_ptr_t c;
int3_t b;
int3_t b __bpf_aligned;
int3_t a;
};
@ -739,19 +744,19 @@ struct core_reloc_bitfields___bit_sz_change {
int8_t sb4: 1; /* 4 -> 1 */
int32_t sb20: 30; /* 20 -> 30 */
/* non-bitfields */
uint16_t u32; /* 32 -> 16 */
int64_t s32; /* 32 -> 64 */
uint16_t u32; /* 32 -> 16 */
int64_t s32 __bpf_aligned; /* 32 -> 64 */
};
/* turn bitfield into non-bitfield and vice versa */
struct core_reloc_bitfields___bitfield_vs_int {
uint64_t ub1; /* 3 -> 64 non-bitfield */
uint8_t ub2; /* 20 -> 8 non-bitfield */
int64_t ub7; /* 7 -> 64 non-bitfield signed */
int64_t sb4; /* 4 -> 64 non-bitfield signed */
uint64_t sb20; /* 20 -> 16 non-bitfield unsigned */
int32_t u32: 20; /* 32 non-bitfield -> 20 bitfield */
uint64_t s32: 60; /* 32 non-bitfield -> 60 bitfield */
int64_t ub7 __bpf_aligned; /* 7 -> 64 non-bitfield signed */
int64_t sb4 __bpf_aligned; /* 4 -> 64 non-bitfield signed */
uint64_t sb20 __bpf_aligned; /* 20 -> 16 non-bitfield unsigned */
int32_t u32: 20; /* 32 non-bitfield -> 20 bitfield */
uint64_t s32: 60 __bpf_aligned; /* 32 non-bitfield -> 60 bitfield */
};
struct core_reloc_bitfields___just_big_enough {

View File

@ -54,6 +54,7 @@ SEC("sockops")
int bpf_testcb(struct bpf_sock_ops *skops)
{
char header[sizeof(struct ipv6hdr) + sizeof(struct tcphdr)];
struct bpf_sock_ops *reuse = skops;
struct tcphdr *thdr;
int good_call_rv = 0;
int bad_call_rv = 0;
@ -62,6 +63,46 @@ int bpf_testcb(struct bpf_sock_ops *skops)
int v = 0;
int op;
/* Test reading fields in bpf_sock_ops using single register */
asm volatile (
"%[reuse] = *(u32 *)(%[reuse] +96)"
: [reuse] "+r"(reuse)
:);
asm volatile (
"%[op] = *(u32 *)(%[skops] +96)"
: [op] "+r"(op)
: [skops] "r"(skops)
:);
asm volatile (
"r9 = %[skops];\n"
"r8 = *(u32 *)(r9 +164);\n"
"*(u32 *)(r9 +164) = r8;\n"
:: [skops] "r"(skops)
: "r9", "r8");
asm volatile (
"r1 = %[skops];\n"
"r1 = *(u64 *)(r1 +184);\n"
"if r1 == 0 goto +1;\n"
"r1 = *(u32 *)(r1 +4);\n"
:: [skops] "r"(skops):"r1");
asm volatile (
"r9 = %[skops];\n"
"r9 = *(u64 *)(r9 +184);\n"
"if r9 == 0 goto +1;\n"
"r9 = *(u32 *)(r9 +4);\n"
:: [skops] "r"(skops):"r9");
asm volatile (
"r1 = %[skops];\n"
"r2 = *(u64 *)(r1 +184);\n"
"if r2 == 0 goto +1;\n"
"r2 = *(u32 *)(r2 +4);\n"
:: [skops] "r"(skops):"r1", "r2");
op = (int) skops->op;
update_event_map(op);

View File

@ -15,9 +15,9 @@ int test_pid = 0;
bool capture = false;
/* .bss */
long payload1_len1 = 0;
long payload1_len2 = 0;
long total1 = 0;
__u64 payload1_len1 = 0;
__u64 payload1_len2 = 0;
__u64 total1 = 0;
char payload1[MAX_LEN + MAX_LEN] = {};
/* .data */

View File

@ -3883,7 +3883,7 @@ static int test_big_btf_info(unsigned int test_num)
info_garbage.garbage = 0;
err = bpf_obj_get_info_by_fd(btf_fd, info, &info_len);
if (CHECK(err || info_len != sizeof(*info),
"err:%d errno:%d info_len:%u sizeof(*info):%lu",
"err:%d errno:%d info_len:%u sizeof(*info):%zu",
err, errno, info_len, sizeof(*info))) {
err = -1;
goto done;
@ -4094,7 +4094,7 @@ static int do_test_get_info(unsigned int test_num)
if (CHECK(err || !info.id || info_len != sizeof(info) ||
info.btf_size != raw_btf_size ||
(ret = memcmp(raw_btf, user_btf, expected_nbytes)),
"err:%d errno:%d info.id:%u info_len:%u sizeof(info):%lu raw_btf_size:%u info.btf_size:%u expected_nbytes:%u memcmp:%d",
"err:%d errno:%d info.id:%u info_len:%u sizeof(info):%zu raw_btf_size:%u info.btf_size:%u expected_nbytes:%u memcmp:%d",
err, errno, info.id, info_len, sizeof(info),
raw_btf_size, info.btf_size, expected_nbytes, ret)) {
err = -1;
@ -4730,7 +4730,7 @@ ssize_t get_pprint_expected_line(enum pprint_mapv_kind_t mapv_kind,
nexpected_line = snprintf(expected_line, line_size,
"%s%u: {%u,0,%d,0x%x,0x%x,0x%x,"
"{%lu|[%u,%u,%u,%u,%u,%u,%u,%u]},%s,"
"{%llu|[%u,%u,%u,%u,%u,%u,%u,%u]},%s,"
"%u,0x%x,[[%d,%d],[%d,%d]]}\n",
percpu_map ? "\tcpu" : "",
percpu_map ? cpu : next_key,
@ -4738,7 +4738,7 @@ ssize_t get_pprint_expected_line(enum pprint_mapv_kind_t mapv_kind,
v->unused_bits2a,
v->bits28,
v->unused_bits2b,
v->ui64,
(__u64)v->ui64,
v->ui8a[0], v->ui8a[1],
v->ui8a[2], v->ui8a[3],
v->ui8a[4], v->ui8a[5],

View File

@ -135,6 +135,11 @@ static inline __u64 ptr_to_u64(const void *ptr)
return (__u64) (unsigned long) ptr;
}
static inline void *u64_to_ptr(__u64 ptr)
{
return (void *) (unsigned long) ptr;
}
int bpf_find_map(const char *test, struct bpf_object *obj, const char *name);
int compare_map_keys(int map1_fd, int map2_fd);
int compare_stack_ips(int smap_fd, int amap_fd, int stack_trace_len);