Merge branch 'libbpf: auto-resize relocatable LOAD/STORE instructions'
Andrii Nakryiko says: ==================== Patch set implements logic in libbpf to auto-adjust memory size (1-, 2-, 4-, 8-bytes) of load/store (LD/ST/STX) instructions which have BPF CO-RE field offset relocation associated with it. In practice this means transparent handling of 32-bit kernels, both pointer and unsigned integers. Signed integers are not relocatable with zero-extending loads/stores, so libbpf poisons them and generates a warning. If/when BPF gets support for sign-extending loads/stores, it would be possible to automatically relocate them as well. All the details are contained in patch #2 comments and commit message. Patch #3 is a simple change in libbpf to make advanced testing with custom BTF easier. Patch #4 validates correct uses of auto-resizable loads, as well as check that libbpf fails invalid uses. Patch #1 skips CO-RE relocation for programs that had bpf_program__set_autoload(prog, false) set on them, reducing warnings and noise. v2->v3: - fix copyright (Alexei); v1->v2: - more consistent names for instruction mem size convertion routines (Alexei); - extended selftests to use relocatable STX instructions (Alexei); - added a fix for skipping CO-RE relocation for non-loadable programs. Cc: Luka Perkov <luka.perkov@sartura.hr> Cc: Tony Ambardar <tony.ambardar@gmail.com> ==================== Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
commit
1e9259eca8
|
@ -5040,16 +5040,19 @@ static int bpf_core_spec_match(struct bpf_core_spec *local_spec,
|
|||
static int bpf_core_calc_field_relo(const struct bpf_program *prog,
|
||||
const struct bpf_core_relo *relo,
|
||||
const struct bpf_core_spec *spec,
|
||||
__u32 *val, bool *validate)
|
||||
__u32 *val, __u32 *field_sz, __u32 *type_id,
|
||||
bool *validate)
|
||||
{
|
||||
const struct bpf_core_accessor *acc;
|
||||
const struct btf_type *t;
|
||||
__u32 byte_off, byte_sz, bit_off, bit_sz;
|
||||
__u32 byte_off, byte_sz, bit_off, bit_sz, field_type_id;
|
||||
const struct btf_member *m;
|
||||
const struct btf_type *mt;
|
||||
bool bitfield;
|
||||
__s64 sz;
|
||||
|
||||
*field_sz = 0;
|
||||
|
||||
if (relo->kind == BPF_FIELD_EXISTS) {
|
||||
*val = spec ? 1 : 0;
|
||||
return 0;
|
||||
|
@ -5065,6 +5068,12 @@ static int bpf_core_calc_field_relo(const struct bpf_program *prog,
|
|||
if (!acc->name) {
|
||||
if (relo->kind == BPF_FIELD_BYTE_OFFSET) {
|
||||
*val = spec->bit_offset / 8;
|
||||
/* remember field size for load/store mem size */
|
||||
sz = btf__resolve_size(spec->btf, acc->type_id);
|
||||
if (sz < 0)
|
||||
return -EINVAL;
|
||||
*field_sz = sz;
|
||||
*type_id = acc->type_id;
|
||||
} else if (relo->kind == BPF_FIELD_BYTE_SIZE) {
|
||||
sz = btf__resolve_size(spec->btf, acc->type_id);
|
||||
if (sz < 0)
|
||||
|
@ -5081,7 +5090,7 @@ static int bpf_core_calc_field_relo(const struct bpf_program *prog,
|
|||
}
|
||||
|
||||
m = btf_members(t) + acc->idx;
|
||||
mt = skip_mods_and_typedefs(spec->btf, m->type, NULL);
|
||||
mt = skip_mods_and_typedefs(spec->btf, m->type, &field_type_id);
|
||||
bit_off = spec->bit_offset;
|
||||
bit_sz = btf_member_bitfield_size(t, acc->idx);
|
||||
|
||||
|
@ -5101,7 +5110,7 @@ static int bpf_core_calc_field_relo(const struct bpf_program *prog,
|
|||
byte_off = bit_off / 8 / byte_sz * byte_sz;
|
||||
}
|
||||
} else {
|
||||
sz = btf__resolve_size(spec->btf, m->type);
|
||||
sz = btf__resolve_size(spec->btf, field_type_id);
|
||||
if (sz < 0)
|
||||
return -EINVAL;
|
||||
byte_sz = sz;
|
||||
|
@ -5119,6 +5128,10 @@ static int bpf_core_calc_field_relo(const struct bpf_program *prog,
|
|||
switch (relo->kind) {
|
||||
case BPF_FIELD_BYTE_OFFSET:
|
||||
*val = byte_off;
|
||||
if (!bitfield) {
|
||||
*field_sz = byte_sz;
|
||||
*type_id = field_type_id;
|
||||
}
|
||||
break;
|
||||
case BPF_FIELD_BYTE_SIZE:
|
||||
*val = byte_sz;
|
||||
|
@ -5219,6 +5232,19 @@ struct bpf_core_relo_res
|
|||
bool poison;
|
||||
/* some relocations can't be validated against orig_val */
|
||||
bool validate;
|
||||
/* for field byte offset relocations or the forms:
|
||||
* *(T *)(rX + <off>) = rY
|
||||
* rX = *(T *)(rY + <off>),
|
||||
* we remember original and resolved field size to adjust direct
|
||||
* memory loads of pointers and integers; this is necessary for 32-bit
|
||||
* host kernel architectures, but also allows to automatically
|
||||
* relocate fields that were resized from, e.g., u32 to u64, etc.
|
||||
*/
|
||||
bool fail_memsz_adjust;
|
||||
__u32 orig_sz;
|
||||
__u32 orig_type_id;
|
||||
__u32 new_sz;
|
||||
__u32 new_type_id;
|
||||
};
|
||||
|
||||
/* Calculate original and target relocation values, given local and target
|
||||
|
@ -5240,10 +5266,56 @@ static int bpf_core_calc_relo(const struct bpf_program *prog,
|
|||
res->new_val = 0;
|
||||
res->poison = false;
|
||||
res->validate = true;
|
||||
res->fail_memsz_adjust = false;
|
||||
res->orig_sz = res->new_sz = 0;
|
||||
res->orig_type_id = res->new_type_id = 0;
|
||||
|
||||
if (core_relo_is_field_based(relo->kind)) {
|
||||
err = bpf_core_calc_field_relo(prog, relo, local_spec, &res->orig_val, &res->validate);
|
||||
err = err ?: bpf_core_calc_field_relo(prog, relo, targ_spec, &res->new_val, NULL);
|
||||
err = bpf_core_calc_field_relo(prog, relo, local_spec,
|
||||
&res->orig_val, &res->orig_sz,
|
||||
&res->orig_type_id, &res->validate);
|
||||
err = err ?: bpf_core_calc_field_relo(prog, relo, targ_spec,
|
||||
&res->new_val, &res->new_sz,
|
||||
&res->new_type_id, NULL);
|
||||
if (err)
|
||||
goto done;
|
||||
/* Validate if it's safe to adjust load/store memory size.
|
||||
* Adjustments are performed only if original and new memory
|
||||
* sizes differ.
|
||||
*/
|
||||
res->fail_memsz_adjust = false;
|
||||
if (res->orig_sz != res->new_sz) {
|
||||
const struct btf_type *orig_t, *new_t;
|
||||
|
||||
orig_t = btf__type_by_id(local_spec->btf, res->orig_type_id);
|
||||
new_t = btf__type_by_id(targ_spec->btf, res->new_type_id);
|
||||
|
||||
/* There are two use cases in which it's safe to
|
||||
* adjust load/store's mem size:
|
||||
* - reading a 32-bit kernel pointer, while on BPF
|
||||
* size pointers are always 64-bit; in this case
|
||||
* it's safe to "downsize" instruction size due to
|
||||
* pointer being treated as unsigned integer with
|
||||
* zero-extended upper 32-bits;
|
||||
* - reading unsigned integers, again due to
|
||||
* zero-extension is preserving the value correctly.
|
||||
*
|
||||
* In all other cases it's incorrect to attempt to
|
||||
* load/store field because read value will be
|
||||
* incorrect, so we poison relocated instruction.
|
||||
*/
|
||||
if (btf_is_ptr(orig_t) && btf_is_ptr(new_t))
|
||||
goto done;
|
||||
if (btf_is_int(orig_t) && btf_is_int(new_t) &&
|
||||
btf_int_encoding(orig_t) != BTF_INT_SIGNED &&
|
||||
btf_int_encoding(new_t) != BTF_INT_SIGNED)
|
||||
goto done;
|
||||
|
||||
/* mark as invalid mem size adjustment, but this will
|
||||
* only be checked for LDX/STX/ST insns
|
||||
*/
|
||||
res->fail_memsz_adjust = true;
|
||||
}
|
||||
} else if (core_relo_is_type_based(relo->kind)) {
|
||||
err = bpf_core_calc_type_relo(relo, local_spec, &res->orig_val);
|
||||
err = err ?: bpf_core_calc_type_relo(relo, targ_spec, &res->new_val);
|
||||
|
@ -5252,6 +5324,7 @@ static int bpf_core_calc_relo(const struct bpf_program *prog,
|
|||
err = err ?: bpf_core_calc_enumval_relo(relo, targ_spec, &res->new_val);
|
||||
}
|
||||
|
||||
done:
|
||||
if (err == -EUCLEAN) {
|
||||
/* EUCLEAN is used to signal instruction poisoning request */
|
||||
res->poison = true;
|
||||
|
@ -5291,6 +5364,28 @@ static bool is_ldimm64(struct bpf_insn *insn)
|
|||
return insn->code == (BPF_LD | BPF_IMM | BPF_DW);
|
||||
}
|
||||
|
||||
static int insn_bpf_size_to_bytes(struct bpf_insn *insn)
|
||||
{
|
||||
switch (BPF_SIZE(insn->code)) {
|
||||
case BPF_DW: return 8;
|
||||
case BPF_W: return 4;
|
||||
case BPF_H: return 2;
|
||||
case BPF_B: return 1;
|
||||
default: return -1;
|
||||
}
|
||||
}
|
||||
|
||||
static int insn_bytes_to_bpf_size(__u32 sz)
|
||||
{
|
||||
switch (sz) {
|
||||
case 8: return BPF_DW;
|
||||
case 4: return BPF_W;
|
||||
case 2: return BPF_H;
|
||||
case 1: return BPF_B;
|
||||
default: return -1;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Patch relocatable BPF instruction.
|
||||
*
|
||||
|
@ -5300,10 +5395,13 @@ static bool is_ldimm64(struct bpf_insn *insn)
|
|||
* spec, and is checked before patching instruction. If actual insn->imm value
|
||||
* is wrong, bail out with error.
|
||||
*
|
||||
* Currently three kinds of BPF instructions are supported:
|
||||
* Currently supported classes of BPF instruction are:
|
||||
* 1. rX = <imm> (assignment with immediate operand);
|
||||
* 2. rX += <imm> (arithmetic operations with immediate operand);
|
||||
* 3. rX = <imm64> (load with 64-bit immediate value).
|
||||
* 3. rX = <imm64> (load with 64-bit immediate value);
|
||||
* 4. rX = *(T *)(rY + <off>), where T is one of {u8, u16, u32, u64};
|
||||
* 5. *(T *)(rX + <off>) = rY, where T is one of {u8, u16, u32, u64};
|
||||
* 6. *(T *)(rX + <off>) = <imm>, where T is one of {u8, u16, u32, u64}.
|
||||
*/
|
||||
static int bpf_core_patch_insn(struct bpf_program *prog,
|
||||
const struct bpf_core_relo *relo,
|
||||
|
@ -5327,6 +5425,7 @@ static int bpf_core_patch_insn(struct bpf_program *prog,
|
|||
class = BPF_CLASS(insn->code);
|
||||
|
||||
if (res->poison) {
|
||||
poison:
|
||||
/* poison second part of ldimm64 to avoid confusing error from
|
||||
* verifier about "unknown opcode 00"
|
||||
*/
|
||||
|
@ -5369,10 +5468,39 @@ static int bpf_core_patch_insn(struct bpf_program *prog,
|
|||
prog->name, relo_idx, insn_idx, new_val);
|
||||
return -ERANGE;
|
||||
}
|
||||
if (res->fail_memsz_adjust) {
|
||||
pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) accesses field incorrectly. "
|
||||
"Make sure you are accessing pointers, unsigned integers, or fields of matching type and size.\n",
|
||||
prog->name, relo_idx, insn_idx);
|
||||
goto poison;
|
||||
}
|
||||
|
||||
orig_val = insn->off;
|
||||
insn->off = new_val;
|
||||
pr_debug("prog '%s': relo #%d: patched insn #%d (LDX/ST/STX) off %u -> %u\n",
|
||||
prog->name, relo_idx, insn_idx, orig_val, new_val);
|
||||
|
||||
if (res->new_sz != res->orig_sz) {
|
||||
int insn_bytes_sz, insn_bpf_sz;
|
||||
|
||||
insn_bytes_sz = insn_bpf_size_to_bytes(insn);
|
||||
if (insn_bytes_sz != res->orig_sz) {
|
||||
pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) unexpected mem size: got %d, exp %u\n",
|
||||
prog->name, relo_idx, insn_idx, insn_bytes_sz, res->orig_sz);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
insn_bpf_sz = insn_bytes_to_bpf_size(res->new_sz);
|
||||
if (insn_bpf_sz < 0) {
|
||||
pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) invalid new mem size: %u\n",
|
||||
prog->name, relo_idx, insn_idx, res->new_sz);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
insn->code = BPF_MODE(insn->code) | insn_bpf_sz | BPF_CLASS(insn->code);
|
||||
pr_debug("prog '%s': relo #%d: patched insn #%d (LDX/ST/STX) mem_sz %u -> %u\n",
|
||||
prog->name, relo_idx, insn_idx, res->orig_sz, res->new_sz);
|
||||
}
|
||||
break;
|
||||
case BPF_LD: {
|
||||
__u64 imm;
|
||||
|
@ -5714,7 +5842,7 @@ bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
|
|||
return 0;
|
||||
|
||||
if (targ_btf_path)
|
||||
targ_btf = btf__parse_elf(targ_btf_path, NULL);
|
||||
targ_btf = btf__parse(targ_btf_path, NULL);
|
||||
else
|
||||
targ_btf = obj->btf_vmlinux;
|
||||
if (IS_ERR_OR_NULL(targ_btf)) {
|
||||
|
@ -5765,6 +5893,11 @@ bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
|
|||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
/* no need to apply CO-RE relocation if the program is
|
||||
* not going to be loaded
|
||||
*/
|
||||
if (!prog->load)
|
||||
continue;
|
||||
|
||||
err = bpf_core_apply_relo(prog, rec, i, obj->btf,
|
||||
targ_btf, cand_cache);
|
||||
|
|
|
@ -0,0 +1,225 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (c) 2020 Facebook */
|
||||
|
||||
#include <test_progs.h>
|
||||
#include <bpf/btf.h>
|
||||
|
||||
/* real layout and sizes according to test's (32-bit) BTF
|
||||
* needs to be defined before skeleton is included */
|
||||
struct test_struct___real {
|
||||
unsigned int ptr; /* can't use `void *`, it is always 8 byte in BPF target */
|
||||
unsigned int val2;
|
||||
unsigned long long val1;
|
||||
unsigned short val3;
|
||||
unsigned char val4;
|
||||
unsigned char _pad;
|
||||
};
|
||||
|
||||
#include "test_core_autosize.skel.h"
|
||||
|
||||
static int duration = 0;
|
||||
|
||||
static struct {
|
||||
unsigned long long ptr_samesized;
|
||||
unsigned long long val1_samesized;
|
||||
unsigned long long val2_samesized;
|
||||
unsigned long long val3_samesized;
|
||||
unsigned long long val4_samesized;
|
||||
struct test_struct___real output_samesized;
|
||||
|
||||
unsigned long long ptr_downsized;
|
||||
unsigned long long val1_downsized;
|
||||
unsigned long long val2_downsized;
|
||||
unsigned long long val3_downsized;
|
||||
unsigned long long val4_downsized;
|
||||
struct test_struct___real output_downsized;
|
||||
|
||||
unsigned long long ptr_probed;
|
||||
unsigned long long val1_probed;
|
||||
unsigned long long val2_probed;
|
||||
unsigned long long val3_probed;
|
||||
unsigned long long val4_probed;
|
||||
|
||||
unsigned long long ptr_signed;
|
||||
unsigned long long val1_signed;
|
||||
unsigned long long val2_signed;
|
||||
unsigned long long val3_signed;
|
||||
unsigned long long val4_signed;
|
||||
struct test_struct___real output_signed;
|
||||
} out;
|
||||
|
||||
void test_core_autosize(void)
|
||||
{
|
||||
char btf_file[] = "/tmp/core_autosize.btf.XXXXXX";
|
||||
int err, fd = -1, zero = 0;
|
||||
int char_id, short_id, int_id, long_long_id, void_ptr_id, id;
|
||||
struct test_core_autosize* skel = NULL;
|
||||
struct bpf_object_load_attr load_attr = {};
|
||||
struct bpf_program *prog;
|
||||
struct bpf_map *bss_map;
|
||||
struct btf *btf = NULL;
|
||||
size_t written;
|
||||
const void *raw_data;
|
||||
__u32 raw_sz;
|
||||
FILE *f = NULL;
|
||||
|
||||
btf = btf__new_empty();
|
||||
if (!ASSERT_OK_PTR(btf, "empty_btf"))
|
||||
return;
|
||||
/* Emit the following struct with 32-bit pointer size:
|
||||
*
|
||||
* struct test_struct {
|
||||
* void *ptr;
|
||||
* unsigned long val2;
|
||||
* unsigned long long val1;
|
||||
* unsigned short val3;
|
||||
* unsigned char val4;
|
||||
* char: 8;
|
||||
* };
|
||||
*
|
||||
* This struct is going to be used as the "kernel BTF" for this test.
|
||||
* It's equivalent memory-layout-wise to test_struct__real above.
|
||||
*/
|
||||
|
||||
/* force 32-bit pointer size */
|
||||
btf__set_pointer_size(btf, 4);
|
||||
|
||||
char_id = btf__add_int(btf, "unsigned char", 1, 0);
|
||||
ASSERT_EQ(char_id, 1, "char_id");
|
||||
short_id = btf__add_int(btf, "unsigned short", 2, 0);
|
||||
ASSERT_EQ(short_id, 2, "short_id");
|
||||
/* "long unsigned int" of 4 byte size tells BTF that sizeof(void *) == 4 */
|
||||
int_id = btf__add_int(btf, "long unsigned int", 4, 0);
|
||||
ASSERT_EQ(int_id, 3, "int_id");
|
||||
long_long_id = btf__add_int(btf, "unsigned long long", 8, 0);
|
||||
ASSERT_EQ(long_long_id, 4, "long_long_id");
|
||||
void_ptr_id = btf__add_ptr(btf, 0);
|
||||
ASSERT_EQ(void_ptr_id, 5, "void_ptr_id");
|
||||
|
||||
id = btf__add_struct(btf, "test_struct", 20 /* bytes */);
|
||||
ASSERT_EQ(id, 6, "struct_id");
|
||||
err = btf__add_field(btf, "ptr", void_ptr_id, 0, 0);
|
||||
err = err ?: btf__add_field(btf, "val2", int_id, 32, 0);
|
||||
err = err ?: btf__add_field(btf, "val1", long_long_id, 64, 0);
|
||||
err = err ?: btf__add_field(btf, "val3", short_id, 128, 0);
|
||||
err = err ?: btf__add_field(btf, "val4", char_id, 144, 0);
|
||||
ASSERT_OK(err, "struct_fields");
|
||||
|
||||
fd = mkstemp(btf_file);
|
||||
if (CHECK(fd < 0, "btf_tmp", "failed to create file: %d\n", fd))
|
||||
goto cleanup;
|
||||
f = fdopen(fd, "w");
|
||||
if (!ASSERT_OK_PTR(f, "btf_fdopen"))
|
||||
goto cleanup;
|
||||
|
||||
raw_data = btf__get_raw_data(btf, &raw_sz);
|
||||
if (!ASSERT_OK_PTR(raw_data, "raw_data"))
|
||||
goto cleanup;
|
||||
written = fwrite(raw_data, 1, raw_sz, f);
|
||||
if (CHECK(written != raw_sz, "btf_write", "written: %zu, errno: %d\n", written, errno))
|
||||
goto cleanup;
|
||||
fflush(f);
|
||||
fclose(f);
|
||||
f = NULL;
|
||||
close(fd);
|
||||
fd = -1;
|
||||
|
||||
/* open and load BPF program with custom BTF as the kernel BTF */
|
||||
skel = test_core_autosize__open();
|
||||
if (!ASSERT_OK_PTR(skel, "skel_open"))
|
||||
return;
|
||||
|
||||
/* disable handle_signed() for now */
|
||||
prog = bpf_object__find_program_by_name(skel->obj, "handle_signed");
|
||||
if (!ASSERT_OK_PTR(prog, "prog_find"))
|
||||
goto cleanup;
|
||||
bpf_program__set_autoload(prog, false);
|
||||
|
||||
load_attr.obj = skel->obj;
|
||||
load_attr.target_btf_path = btf_file;
|
||||
err = bpf_object__load_xattr(&load_attr);
|
||||
if (!ASSERT_OK(err, "prog_load"))
|
||||
goto cleanup;
|
||||
|
||||
prog = bpf_object__find_program_by_name(skel->obj, "handle_samesize");
|
||||
if (!ASSERT_OK_PTR(prog, "prog_find"))
|
||||
goto cleanup;
|
||||
skel->links.handle_samesize = bpf_program__attach(prog);
|
||||
if (!ASSERT_OK_PTR(skel->links.handle_samesize, "prog_attach"))
|
||||
goto cleanup;
|
||||
|
||||
prog = bpf_object__find_program_by_name(skel->obj, "handle_downsize");
|
||||
if (!ASSERT_OK_PTR(prog, "prog_find"))
|
||||
goto cleanup;
|
||||
skel->links.handle_downsize = bpf_program__attach(prog);
|
||||
if (!ASSERT_OK_PTR(skel->links.handle_downsize, "prog_attach"))
|
||||
goto cleanup;
|
||||
|
||||
prog = bpf_object__find_program_by_name(skel->obj, "handle_probed");
|
||||
if (!ASSERT_OK_PTR(prog, "prog_find"))
|
||||
goto cleanup;
|
||||
skel->links.handle_probed = bpf_program__attach(prog);
|
||||
if (!ASSERT_OK_PTR(skel->links.handle_probed, "prog_attach"))
|
||||
goto cleanup;
|
||||
|
||||
usleep(1);
|
||||
|
||||
bss_map = bpf_object__find_map_by_name(skel->obj, "test_cor.bss");
|
||||
if (!ASSERT_OK_PTR(bss_map, "bss_map_find"))
|
||||
goto cleanup;
|
||||
|
||||
err = bpf_map_lookup_elem(bpf_map__fd(bss_map), &zero, (void *)&out);
|
||||
if (!ASSERT_OK(err, "bss_lookup"))
|
||||
goto cleanup;
|
||||
|
||||
ASSERT_EQ(out.ptr_samesized, 0x01020304, "ptr_samesized");
|
||||
ASSERT_EQ(out.val1_samesized, 0x1020304050607080, "val1_samesized");
|
||||
ASSERT_EQ(out.val2_samesized, 0x0a0b0c0d, "val2_samesized");
|
||||
ASSERT_EQ(out.val3_samesized, 0xfeed, "val3_samesized");
|
||||
ASSERT_EQ(out.val4_samesized, 0xb9, "val4_samesized");
|
||||
ASSERT_EQ(out.output_samesized.ptr, 0x01020304, "ptr_samesized");
|
||||
ASSERT_EQ(out.output_samesized.val1, 0x1020304050607080, "val1_samesized");
|
||||
ASSERT_EQ(out.output_samesized.val2, 0x0a0b0c0d, "val2_samesized");
|
||||
ASSERT_EQ(out.output_samesized.val3, 0xfeed, "val3_samesized");
|
||||
ASSERT_EQ(out.output_samesized.val4, 0xb9, "val4_samesized");
|
||||
|
||||
ASSERT_EQ(out.ptr_downsized, 0x01020304, "ptr_downsized");
|
||||
ASSERT_EQ(out.val1_downsized, 0x1020304050607080, "val1_downsized");
|
||||
ASSERT_EQ(out.val2_downsized, 0x0a0b0c0d, "val2_downsized");
|
||||
ASSERT_EQ(out.val3_downsized, 0xfeed, "val3_downsized");
|
||||
ASSERT_EQ(out.val4_downsized, 0xb9, "val4_downsized");
|
||||
ASSERT_EQ(out.output_downsized.ptr, 0x01020304, "ptr_downsized");
|
||||
ASSERT_EQ(out.output_downsized.val1, 0x1020304050607080, "val1_downsized");
|
||||
ASSERT_EQ(out.output_downsized.val2, 0x0a0b0c0d, "val2_downsized");
|
||||
ASSERT_EQ(out.output_downsized.val3, 0xfeed, "val3_downsized");
|
||||
ASSERT_EQ(out.output_downsized.val4, 0xb9, "val4_downsized");
|
||||
|
||||
ASSERT_EQ(out.ptr_probed, 0x01020304, "ptr_probed");
|
||||
ASSERT_EQ(out.val1_probed, 0x1020304050607080, "val1_probed");
|
||||
ASSERT_EQ(out.val2_probed, 0x0a0b0c0d, "val2_probed");
|
||||
ASSERT_EQ(out.val3_probed, 0xfeed, "val3_probed");
|
||||
ASSERT_EQ(out.val4_probed, 0xb9, "val4_probed");
|
||||
|
||||
test_core_autosize__destroy(skel);
|
||||
skel = NULL;
|
||||
|
||||
/* now re-load with handle_signed() enabled, it should fail loading */
|
||||
skel = test_core_autosize__open();
|
||||
if (!ASSERT_OK_PTR(skel, "skel_open"))
|
||||
return;
|
||||
|
||||
load_attr.obj = skel->obj;
|
||||
load_attr.target_btf_path = btf_file;
|
||||
err = bpf_object__load_xattr(&load_attr);
|
||||
if (!ASSERT_ERR(err, "bad_prog_load"))
|
||||
goto cleanup;
|
||||
|
||||
cleanup:
|
||||
if (f)
|
||||
fclose(f);
|
||||
if (fd >= 0)
|
||||
close(fd);
|
||||
remove(btf_file);
|
||||
btf__free(btf);
|
||||
test_core_autosize__destroy(skel);
|
||||
}
|
|
@ -0,0 +1,172 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (c) 2020 Facebook */
|
||||
|
||||
#include <linux/bpf.h>
|
||||
#include <stdint.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include <bpf/bpf_core_read.h>
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
||||
|
||||
/* fields of exactly the same size */
|
||||
struct test_struct___samesize {
|
||||
void *ptr;
|
||||
unsigned long long val1;
|
||||
unsigned int val2;
|
||||
unsigned short val3;
|
||||
unsigned char val4;
|
||||
} __attribute((preserve_access_index));
|
||||
|
||||
/* unsigned fields that have to be downsized by libbpf */
|
||||
struct test_struct___downsize {
|
||||
void *ptr;
|
||||
unsigned long val1;
|
||||
unsigned long val2;
|
||||
unsigned long val3;
|
||||
unsigned long val4;
|
||||
/* total sz: 40 */
|
||||
} __attribute__((preserve_access_index));
|
||||
|
||||
/* fields with signed integers of wrong size, should be rejected */
|
||||
struct test_struct___signed {
|
||||
void *ptr;
|
||||
long val1;
|
||||
long val2;
|
||||
long val3;
|
||||
long val4;
|
||||
} __attribute((preserve_access_index));
|
||||
|
||||
/* real layout and sizes according to test's (32-bit) BTF */
|
||||
struct test_struct___real {
|
||||
unsigned int ptr; /* can't use `void *`, it is always 8 byte in BPF target */
|
||||
unsigned int val2;
|
||||
unsigned long long val1;
|
||||
unsigned short val3;
|
||||
unsigned char val4;
|
||||
unsigned char _pad;
|
||||
/* total sz: 20 */
|
||||
};
|
||||
|
||||
struct test_struct___real input = {
|
||||
.ptr = 0x01020304,
|
||||
.val1 = 0x1020304050607080,
|
||||
.val2 = 0x0a0b0c0d,
|
||||
.val3 = 0xfeed,
|
||||
.val4 = 0xb9,
|
||||
._pad = 0xff, /* make sure no accidental zeros are present */
|
||||
};
|
||||
|
||||
unsigned long long ptr_samesized = 0;
|
||||
unsigned long long val1_samesized = 0;
|
||||
unsigned long long val2_samesized = 0;
|
||||
unsigned long long val3_samesized = 0;
|
||||
unsigned long long val4_samesized = 0;
|
||||
struct test_struct___real output_samesized = {};
|
||||
|
||||
unsigned long long ptr_downsized = 0;
|
||||
unsigned long long val1_downsized = 0;
|
||||
unsigned long long val2_downsized = 0;
|
||||
unsigned long long val3_downsized = 0;
|
||||
unsigned long long val4_downsized = 0;
|
||||
struct test_struct___real output_downsized = {};
|
||||
|
||||
unsigned long long ptr_probed = 0;
|
||||
unsigned long long val1_probed = 0;
|
||||
unsigned long long val2_probed = 0;
|
||||
unsigned long long val3_probed = 0;
|
||||
unsigned long long val4_probed = 0;
|
||||
|
||||
unsigned long long ptr_signed = 0;
|
||||
unsigned long long val1_signed = 0;
|
||||
unsigned long long val2_signed = 0;
|
||||
unsigned long long val3_signed = 0;
|
||||
unsigned long long val4_signed = 0;
|
||||
struct test_struct___real output_signed = {};
|
||||
|
||||
SEC("raw_tp/sys_exit")
|
||||
int handle_samesize(void *ctx)
|
||||
{
|
||||
struct test_struct___samesize *in = (void *)&input;
|
||||
struct test_struct___samesize *out = (void *)&output_samesized;
|
||||
|
||||
ptr_samesized = (unsigned long long)in->ptr;
|
||||
val1_samesized = in->val1;
|
||||
val2_samesized = in->val2;
|
||||
val3_samesized = in->val3;
|
||||
val4_samesized = in->val4;
|
||||
|
||||
out->ptr = in->ptr;
|
||||
out->val1 = in->val1;
|
||||
out->val2 = in->val2;
|
||||
out->val3 = in->val3;
|
||||
out->val4 = in->val4;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("raw_tp/sys_exit")
|
||||
int handle_downsize(void *ctx)
|
||||
{
|
||||
struct test_struct___downsize *in = (void *)&input;
|
||||
struct test_struct___downsize *out = (void *)&output_downsized;
|
||||
|
||||
ptr_downsized = (unsigned long long)in->ptr;
|
||||
val1_downsized = in->val1;
|
||||
val2_downsized = in->val2;
|
||||
val3_downsized = in->val3;
|
||||
val4_downsized = in->val4;
|
||||
|
||||
out->ptr = in->ptr;
|
||||
out->val1 = in->val1;
|
||||
out->val2 = in->val2;
|
||||
out->val3 = in->val3;
|
||||
out->val4 = in->val4;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("raw_tp/sys_enter")
|
||||
int handle_probed(void *ctx)
|
||||
{
|
||||
struct test_struct___downsize *in = (void *)&input;
|
||||
__u64 tmp;
|
||||
|
||||
tmp = 0;
|
||||
bpf_core_read(&tmp, bpf_core_field_size(in->ptr), &in->ptr);
|
||||
ptr_probed = tmp;
|
||||
|
||||
tmp = 0;
|
||||
bpf_core_read(&tmp, bpf_core_field_size(in->val1), &in->val1);
|
||||
val1_probed = tmp;
|
||||
|
||||
tmp = 0;
|
||||
bpf_core_read(&tmp, bpf_core_field_size(in->val2), &in->val2);
|
||||
val2_probed = tmp;
|
||||
|
||||
tmp = 0;
|
||||
bpf_core_read(&tmp, bpf_core_field_size(in->val3), &in->val3);
|
||||
val3_probed = tmp;
|
||||
|
||||
tmp = 0;
|
||||
bpf_core_read(&tmp, bpf_core_field_size(in->val4), &in->val4);
|
||||
val4_probed = tmp;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("raw_tp/sys_enter")
|
||||
int handle_signed(void *ctx)
|
||||
{
|
||||
struct test_struct___signed *in = (void *)&input;
|
||||
struct test_struct___signed *out = (void *)&output_signed;
|
||||
|
||||
val2_signed = in->val2;
|
||||
val3_signed = in->val3;
|
||||
val4_signed = in->val4;
|
||||
|
||||
out->val2= in->val2;
|
||||
out->val3= in->val3;
|
||||
out->val4= in->val4;
|
||||
|
||||
return 0;
|
||||
}
|
Loading…
Reference in New Issue