Andrii Nakryiko says: ==================== bpf 2022-11-11 We've added 11 non-merge commits during the last 8 day(s) which contain a total of 11 files changed, 83 insertions(+), 74 deletions(-). The main changes are: 1) Fix strncpy_from_kernel_nofault() to prevent out-of-bounds writes, from Alban Crequy. 2) Fix for bpf_prog_test_run_skb() to prevent wrong alignment, from Baisong Zhong. 3) Switch BPF_DISPATCHER to static_call() instead of ftrace infra, with a small build fix on top, from Peter Zijlstra and Nathan Chancellor. 4) Fix memory leak in BPF verifier in some error cases, from Wang Yufen. 5) 32-bit compilation error fixes for BPF selftests, from Pu Lehui and Yang Jihong. 6) Ensure even distribution of per-CPU free list elements, from Xu Kuohai. 7) Fix copy_map_value() to track special zeroed out areas properly, from Xu Kuohai. * https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf: bpf: Fix offset calculation error in __copy_map_value and zero_map_value bpf: Initialize same number of free nodes for each pcpu_freelist selftests: bpf: Add a test when bpf_probe_read_kernel_str() returns EFAULT maccess: Fix writing offset in case of fault in strncpy_from_kernel_nofault() selftests/bpf: Fix test_progs compilation failure in 32-bit arch selftests/bpf: Fix casting error when cross-compiling test_verifier for 32-bit platforms bpf: Fix memory leaks in __check_func_call bpf: Add explicit cast to 'void *' for __BPF_DISPATCHER_UPDATE() bpf: Convert BPF_DISPATCHER to use static_call() (not ftrace) bpf: Revert ("Fix dispatcher patchable function entry to 5 bytes nop") bpf, test_run: Fix alignment problem in bpf_prog_test_run_skb() ==================== Link: https://lore.kernel.org/r/20221111231624.938829-1-andrii@kernel.org Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
c1754bf019
arch/x86/net
include/linux
kernel/bpf
mm
net/bpf
tools/testing/selftests/bpf
|
@ -11,7 +11,6 @@
|
||||||
#include <linux/bpf.h>
|
#include <linux/bpf.h>
|
||||||
#include <linux/memory.h>
|
#include <linux/memory.h>
|
||||||
#include <linux/sort.h>
|
#include <linux/sort.h>
|
||||||
#include <linux/init.h>
|
|
||||||
#include <asm/extable.h>
|
#include <asm/extable.h>
|
||||||
#include <asm/set_memory.h>
|
#include <asm/set_memory.h>
|
||||||
#include <asm/nospec-branch.h>
|
#include <asm/nospec-branch.h>
|
||||||
|
@ -389,18 +388,6 @@ out:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
int __init bpf_arch_init_dispatcher_early(void *ip)
|
|
||||||
{
|
|
||||||
const u8 *nop_insn = x86_nops[5];
|
|
||||||
|
|
||||||
if (is_endbr(*(u32 *)ip))
|
|
||||||
ip += ENDBR_INSN_SIZE;
|
|
||||||
|
|
||||||
if (memcmp(ip, nop_insn, X86_PATCH_SIZE))
|
|
||||||
text_poke_early(ip, nop_insn, X86_PATCH_SIZE);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
|
int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
|
||||||
void *old_addr, void *new_addr)
|
void *old_addr, void *new_addr)
|
||||||
{
|
{
|
||||||
|
|
|
@ -27,7 +27,7 @@
|
||||||
#include <linux/bpfptr.h>
|
#include <linux/bpfptr.h>
|
||||||
#include <linux/btf.h>
|
#include <linux/btf.h>
|
||||||
#include <linux/rcupdate_trace.h>
|
#include <linux/rcupdate_trace.h>
|
||||||
#include <linux/init.h>
|
#include <linux/static_call.h>
|
||||||
|
|
||||||
struct bpf_verifier_env;
|
struct bpf_verifier_env;
|
||||||
struct bpf_verifier_log;
|
struct bpf_verifier_log;
|
||||||
|
@ -315,7 +315,7 @@ static inline void __copy_map_value(struct bpf_map *map, void *dst, void *src, b
|
||||||
u32 next_off = map->off_arr->field_off[i];
|
u32 next_off = map->off_arr->field_off[i];
|
||||||
|
|
||||||
memcpy(dst + curr_off, src + curr_off, next_off - curr_off);
|
memcpy(dst + curr_off, src + curr_off, next_off - curr_off);
|
||||||
curr_off += map->off_arr->field_sz[i];
|
curr_off = next_off + map->off_arr->field_sz[i];
|
||||||
}
|
}
|
||||||
memcpy(dst + curr_off, src + curr_off, map->value_size - curr_off);
|
memcpy(dst + curr_off, src + curr_off, map->value_size - curr_off);
|
||||||
}
|
}
|
||||||
|
@ -344,7 +344,7 @@ static inline void zero_map_value(struct bpf_map *map, void *dst)
|
||||||
u32 next_off = map->off_arr->field_off[i];
|
u32 next_off = map->off_arr->field_off[i];
|
||||||
|
|
||||||
memset(dst + curr_off, 0, next_off - curr_off);
|
memset(dst + curr_off, 0, next_off - curr_off);
|
||||||
curr_off += map->off_arr->field_sz[i];
|
curr_off = next_off + map->off_arr->field_sz[i];
|
||||||
}
|
}
|
||||||
memset(dst + curr_off, 0, map->value_size - curr_off);
|
memset(dst + curr_off, 0, map->value_size - curr_off);
|
||||||
}
|
}
|
||||||
|
@ -954,6 +954,10 @@ struct bpf_dispatcher {
|
||||||
void *rw_image;
|
void *rw_image;
|
||||||
u32 image_off;
|
u32 image_off;
|
||||||
struct bpf_ksym ksym;
|
struct bpf_ksym ksym;
|
||||||
|
#ifdef CONFIG_HAVE_STATIC_CALL
|
||||||
|
struct static_call_key *sc_key;
|
||||||
|
void *sc_tramp;
|
||||||
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
static __always_inline __nocfi unsigned int bpf_dispatcher_nop_func(
|
static __always_inline __nocfi unsigned int bpf_dispatcher_nop_func(
|
||||||
|
@ -971,7 +975,33 @@ struct bpf_trampoline *bpf_trampoline_get(u64 key,
|
||||||
struct bpf_attach_target_info *tgt_info);
|
struct bpf_attach_target_info *tgt_info);
|
||||||
void bpf_trampoline_put(struct bpf_trampoline *tr);
|
void bpf_trampoline_put(struct bpf_trampoline *tr);
|
||||||
int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_funcs);
|
int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_funcs);
|
||||||
int __init bpf_arch_init_dispatcher_early(void *ip);
|
|
||||||
|
/*
|
||||||
|
* When the architecture supports STATIC_CALL replace the bpf_dispatcher_fn
|
||||||
|
* indirection with a direct call to the bpf program. If the architecture does
|
||||||
|
* not have STATIC_CALL, avoid a double-indirection.
|
||||||
|
*/
|
||||||
|
#ifdef CONFIG_HAVE_STATIC_CALL
|
||||||
|
|
||||||
|
#define __BPF_DISPATCHER_SC_INIT(_name) \
|
||||||
|
.sc_key = &STATIC_CALL_KEY(_name), \
|
||||||
|
.sc_tramp = STATIC_CALL_TRAMP_ADDR(_name),
|
||||||
|
|
||||||
|
#define __BPF_DISPATCHER_SC(name) \
|
||||||
|
DEFINE_STATIC_CALL(bpf_dispatcher_##name##_call, bpf_dispatcher_nop_func)
|
||||||
|
|
||||||
|
#define __BPF_DISPATCHER_CALL(name) \
|
||||||
|
static_call(bpf_dispatcher_##name##_call)(ctx, insnsi, bpf_func)
|
||||||
|
|
||||||
|
#define __BPF_DISPATCHER_UPDATE(_d, _new) \
|
||||||
|
__static_call_update((_d)->sc_key, (_d)->sc_tramp, (_new))
|
||||||
|
|
||||||
|
#else
|
||||||
|
#define __BPF_DISPATCHER_SC_INIT(name)
|
||||||
|
#define __BPF_DISPATCHER_SC(name)
|
||||||
|
#define __BPF_DISPATCHER_CALL(name) bpf_func(ctx, insnsi)
|
||||||
|
#define __BPF_DISPATCHER_UPDATE(_d, _new)
|
||||||
|
#endif
|
||||||
|
|
||||||
#define BPF_DISPATCHER_INIT(_name) { \
|
#define BPF_DISPATCHER_INIT(_name) { \
|
||||||
.mutex = __MUTEX_INITIALIZER(_name.mutex), \
|
.mutex = __MUTEX_INITIALIZER(_name.mutex), \
|
||||||
|
@ -984,34 +1014,21 @@ int __init bpf_arch_init_dispatcher_early(void *ip);
|
||||||
.name = #_name, \
|
.name = #_name, \
|
||||||
.lnode = LIST_HEAD_INIT(_name.ksym.lnode), \
|
.lnode = LIST_HEAD_INIT(_name.ksym.lnode), \
|
||||||
}, \
|
}, \
|
||||||
|
__BPF_DISPATCHER_SC_INIT(_name##_call) \
|
||||||
}
|
}
|
||||||
|
|
||||||
#define BPF_DISPATCHER_INIT_CALL(_name) \
|
|
||||||
static int __init _name##_init(void) \
|
|
||||||
{ \
|
|
||||||
return bpf_arch_init_dispatcher_early(_name##_func); \
|
|
||||||
} \
|
|
||||||
early_initcall(_name##_init)
|
|
||||||
|
|
||||||
#ifdef CONFIG_X86_64
|
|
||||||
#define BPF_DISPATCHER_ATTRIBUTES __attribute__((patchable_function_entry(5)))
|
|
||||||
#else
|
|
||||||
#define BPF_DISPATCHER_ATTRIBUTES
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define DEFINE_BPF_DISPATCHER(name) \
|
#define DEFINE_BPF_DISPATCHER(name) \
|
||||||
notrace BPF_DISPATCHER_ATTRIBUTES \
|
__BPF_DISPATCHER_SC(name); \
|
||||||
noinline __nocfi unsigned int bpf_dispatcher_##name##_func( \
|
noinline __nocfi unsigned int bpf_dispatcher_##name##_func( \
|
||||||
const void *ctx, \
|
const void *ctx, \
|
||||||
const struct bpf_insn *insnsi, \
|
const struct bpf_insn *insnsi, \
|
||||||
bpf_func_t bpf_func) \
|
bpf_func_t bpf_func) \
|
||||||
{ \
|
{ \
|
||||||
return bpf_func(ctx, insnsi); \
|
return __BPF_DISPATCHER_CALL(name); \
|
||||||
} \
|
} \
|
||||||
EXPORT_SYMBOL(bpf_dispatcher_##name##_func); \
|
EXPORT_SYMBOL(bpf_dispatcher_##name##_func); \
|
||||||
struct bpf_dispatcher bpf_dispatcher_##name = \
|
struct bpf_dispatcher bpf_dispatcher_##name = \
|
||||||
BPF_DISPATCHER_INIT(bpf_dispatcher_##name); \
|
BPF_DISPATCHER_INIT(bpf_dispatcher_##name);
|
||||||
BPF_DISPATCHER_INIT_CALL(bpf_dispatcher_##name);
|
|
||||||
|
|
||||||
#define DECLARE_BPF_DISPATCHER(name) \
|
#define DECLARE_BPF_DISPATCHER(name) \
|
||||||
unsigned int bpf_dispatcher_##name##_func( \
|
unsigned int bpf_dispatcher_##name##_func( \
|
||||||
|
@ -1019,6 +1036,7 @@ int __init bpf_arch_init_dispatcher_early(void *ip);
|
||||||
const struct bpf_insn *insnsi, \
|
const struct bpf_insn *insnsi, \
|
||||||
bpf_func_t bpf_func); \
|
bpf_func_t bpf_func); \
|
||||||
extern struct bpf_dispatcher bpf_dispatcher_##name;
|
extern struct bpf_dispatcher bpf_dispatcher_##name;
|
||||||
|
|
||||||
#define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_##name##_func
|
#define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_##name##_func
|
||||||
#define BPF_DISPATCHER_PTR(name) (&bpf_dispatcher_##name)
|
#define BPF_DISPATCHER_PTR(name) (&bpf_dispatcher_##name)
|
||||||
void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from,
|
void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from,
|
||||||
|
|
|
@ -4,7 +4,7 @@
|
||||||
#include <linux/hash.h>
|
#include <linux/hash.h>
|
||||||
#include <linux/bpf.h>
|
#include <linux/bpf.h>
|
||||||
#include <linux/filter.h>
|
#include <linux/filter.h>
|
||||||
#include <linux/init.h>
|
#include <linux/static_call.h>
|
||||||
|
|
||||||
/* The BPF dispatcher is a multiway branch code generator. The
|
/* The BPF dispatcher is a multiway branch code generator. The
|
||||||
* dispatcher is a mechanism to avoid the performance penalty of an
|
* dispatcher is a mechanism to avoid the performance penalty of an
|
||||||
|
@ -91,11 +91,6 @@ int __weak arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int n
|
||||||
return -ENOTSUPP;
|
return -ENOTSUPP;
|
||||||
}
|
}
|
||||||
|
|
||||||
int __weak __init bpf_arch_init_dispatcher_early(void *ip)
|
|
||||||
{
|
|
||||||
return -ENOTSUPP;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int bpf_dispatcher_prepare(struct bpf_dispatcher *d, void *image, void *buf)
|
static int bpf_dispatcher_prepare(struct bpf_dispatcher *d, void *image, void *buf)
|
||||||
{
|
{
|
||||||
s64 ips[BPF_DISPATCHER_MAX] = {}, *ipsp = &ips[0];
|
s64 ips[BPF_DISPATCHER_MAX] = {}, *ipsp = &ips[0];
|
||||||
|
@ -110,17 +105,11 @@ static int bpf_dispatcher_prepare(struct bpf_dispatcher *d, void *image, void *b
|
||||||
|
|
||||||
static void bpf_dispatcher_update(struct bpf_dispatcher *d, int prev_num_progs)
|
static void bpf_dispatcher_update(struct bpf_dispatcher *d, int prev_num_progs)
|
||||||
{
|
{
|
||||||
void *old, *new, *tmp;
|
void *new, *tmp;
|
||||||
u32 noff;
|
u32 noff = 0;
|
||||||
int err;
|
|
||||||
|
|
||||||
if (!prev_num_progs) {
|
if (prev_num_progs)
|
||||||
old = NULL;
|
|
||||||
noff = 0;
|
|
||||||
} else {
|
|
||||||
old = d->image + d->image_off;
|
|
||||||
noff = d->image_off ^ (PAGE_SIZE / 2);
|
noff = d->image_off ^ (PAGE_SIZE / 2);
|
||||||
}
|
|
||||||
|
|
||||||
new = d->num_progs ? d->image + noff : NULL;
|
new = d->num_progs ? d->image + noff : NULL;
|
||||||
tmp = d->num_progs ? d->rw_image + noff : NULL;
|
tmp = d->num_progs ? d->rw_image + noff : NULL;
|
||||||
|
@ -134,11 +123,10 @@ static void bpf_dispatcher_update(struct bpf_dispatcher *d, int prev_num_progs)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
err = bpf_arch_text_poke(d->func, BPF_MOD_JUMP, old, new);
|
__BPF_DISPATCHER_UPDATE(d, new ?: (void *)&bpf_dispatcher_nop_func);
|
||||||
if (err || !new)
|
|
||||||
return;
|
|
||||||
|
|
||||||
d->image_off = noff;
|
if (new)
|
||||||
|
d->image_off = noff;
|
||||||
}
|
}
|
||||||
|
|
||||||
void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from,
|
void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from,
|
||||||
|
|
|
@ -100,22 +100,21 @@ void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size,
|
||||||
u32 nr_elems)
|
u32 nr_elems)
|
||||||
{
|
{
|
||||||
struct pcpu_freelist_head *head;
|
struct pcpu_freelist_head *head;
|
||||||
int i, cpu, pcpu_entries;
|
unsigned int cpu, cpu_idx, i, j, n, m;
|
||||||
|
|
||||||
pcpu_entries = nr_elems / num_possible_cpus() + 1;
|
n = nr_elems / num_possible_cpus();
|
||||||
i = 0;
|
m = nr_elems % num_possible_cpus();
|
||||||
|
|
||||||
|
cpu_idx = 0;
|
||||||
for_each_possible_cpu(cpu) {
|
for_each_possible_cpu(cpu) {
|
||||||
again:
|
|
||||||
head = per_cpu_ptr(s->freelist, cpu);
|
head = per_cpu_ptr(s->freelist, cpu);
|
||||||
/* No locking required as this is not visible yet. */
|
j = n + (cpu_idx < m ? 1 : 0);
|
||||||
pcpu_freelist_push_node(head, buf);
|
for (i = 0; i < j; i++) {
|
||||||
i++;
|
/* No locking required as this is not visible yet. */
|
||||||
buf += elem_size;
|
pcpu_freelist_push_node(head, buf);
|
||||||
if (i == nr_elems)
|
buf += elem_size;
|
||||||
break;
|
}
|
||||||
if (i % pcpu_entries)
|
cpu_idx++;
|
||||||
goto again;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -6745,11 +6745,11 @@ static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn
|
||||||
/* Transfer references to the callee */
|
/* Transfer references to the callee */
|
||||||
err = copy_reference_state(callee, caller);
|
err = copy_reference_state(callee, caller);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
goto err_out;
|
||||||
|
|
||||||
err = set_callee_state_cb(env, caller, callee, *insn_idx);
|
err = set_callee_state_cb(env, caller, callee, *insn_idx);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
goto err_out;
|
||||||
|
|
||||||
clear_caller_saved_regs(env, caller->regs);
|
clear_caller_saved_regs(env, caller->regs);
|
||||||
|
|
||||||
|
@ -6766,6 +6766,11 @@ static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn
|
||||||
print_verifier_state(env, callee, true);
|
print_verifier_state(env, callee, true);
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
err_out:
|
||||||
|
free_func_state(callee);
|
||||||
|
state->frame[state->curframe + 1] = NULL;
|
||||||
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
int map_set_for_each_callback_args(struct bpf_verifier_env *env,
|
int map_set_for_each_callback_args(struct bpf_verifier_env *env,
|
||||||
|
@ -6979,8 +6984,7 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
state->curframe--;
|
caller = state->frame[state->curframe - 1];
|
||||||
caller = state->frame[state->curframe];
|
|
||||||
if (callee->in_callback_fn) {
|
if (callee->in_callback_fn) {
|
||||||
/* enforce R0 return value range [0, 1]. */
|
/* enforce R0 return value range [0, 1]. */
|
||||||
struct tnum range = callee->callback_ret_range;
|
struct tnum range = callee->callback_ret_range;
|
||||||
|
@ -7019,7 +7023,7 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
|
||||||
}
|
}
|
||||||
/* clear everything in the callee */
|
/* clear everything in the callee */
|
||||||
free_func_state(callee);
|
free_func_state(callee);
|
||||||
state->frame[state->curframe + 1] = NULL;
|
state->frame[state->curframe--] = NULL;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -97,7 +97,7 @@ long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr, long count)
|
||||||
return src - unsafe_addr;
|
return src - unsafe_addr;
|
||||||
Efault:
|
Efault:
|
||||||
pagefault_enable();
|
pagefault_enable();
|
||||||
dst[-1] = '\0';
|
dst[0] = '\0';
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -774,6 +774,7 @@ static void *bpf_test_init(const union bpf_attr *kattr, u32 user_size,
|
||||||
if (user_size > size)
|
if (user_size > size)
|
||||||
return ERR_PTR(-EMSGSIZE);
|
return ERR_PTR(-EMSGSIZE);
|
||||||
|
|
||||||
|
size = SKB_DATA_ALIGN(size);
|
||||||
data = kzalloc(size + headroom + tailroom, GFP_USER);
|
data = kzalloc(size + headroom + tailroom, GFP_USER);
|
||||||
if (!data)
|
if (!data)
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
|
@ -63,6 +63,13 @@ void test_varlen(void)
|
||||||
CHECK_VAL(data->total4, size1 + size2);
|
CHECK_VAL(data->total4, size1 + size2);
|
||||||
CHECK(memcmp(data->payload4, exp_str, size1 + size2), "content_check",
|
CHECK(memcmp(data->payload4, exp_str, size1 + size2), "content_check",
|
||||||
"doesn't match!\n");
|
"doesn't match!\n");
|
||||||
|
|
||||||
|
CHECK_VAL(bss->ret_bad_read, -EFAULT);
|
||||||
|
CHECK_VAL(data->payload_bad[0], 0x42);
|
||||||
|
CHECK_VAL(data->payload_bad[1], 0x42);
|
||||||
|
CHECK_VAL(data->payload_bad[2], 0);
|
||||||
|
CHECK_VAL(data->payload_bad[3], 0x42);
|
||||||
|
CHECK_VAL(data->payload_bad[4], 0x42);
|
||||||
cleanup:
|
cleanup:
|
||||||
test_varlen__destroy(skel);
|
test_varlen__destroy(skel);
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,6 +19,7 @@ __u64 payload1_len1 = 0;
|
||||||
__u64 payload1_len2 = 0;
|
__u64 payload1_len2 = 0;
|
||||||
__u64 total1 = 0;
|
__u64 total1 = 0;
|
||||||
char payload1[MAX_LEN + MAX_LEN] = {};
|
char payload1[MAX_LEN + MAX_LEN] = {};
|
||||||
|
__u64 ret_bad_read = 0;
|
||||||
|
|
||||||
/* .data */
|
/* .data */
|
||||||
int payload2_len1 = -1;
|
int payload2_len1 = -1;
|
||||||
|
@ -36,6 +37,8 @@ int payload4_len2 = -1;
|
||||||
int total4= -1;
|
int total4= -1;
|
||||||
char payload4[MAX_LEN + MAX_LEN] = { 1 };
|
char payload4[MAX_LEN + MAX_LEN] = { 1 };
|
||||||
|
|
||||||
|
char payload_bad[5] = { 0x42, 0x42, 0x42, 0x42, 0x42 };
|
||||||
|
|
||||||
SEC("raw_tp/sys_enter")
|
SEC("raw_tp/sys_enter")
|
||||||
int handler64_unsigned(void *regs)
|
int handler64_unsigned(void *regs)
|
||||||
{
|
{
|
||||||
|
@ -61,6 +64,8 @@ int handler64_unsigned(void *regs)
|
||||||
|
|
||||||
total1 = payload - (void *)payload1;
|
total1 = payload - (void *)payload1;
|
||||||
|
|
||||||
|
ret_bad_read = bpf_probe_read_kernel_str(payload_bad + 2, 1, (void *) -1);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1010,7 +1010,7 @@ static inline const char *str_msg(const struct msg *msg, char *buf)
|
||||||
msg->subtest_done.have_log);
|
msg->subtest_done.have_log);
|
||||||
break;
|
break;
|
||||||
case MSG_TEST_LOG:
|
case MSG_TEST_LOG:
|
||||||
sprintf(buf, "MSG_TEST_LOG (cnt: %ld, last: %d)",
|
sprintf(buf, "MSG_TEST_LOG (cnt: %zu, last: %d)",
|
||||||
strlen(msg->test_log.log_buf),
|
strlen(msg->test_log.log_buf),
|
||||||
msg->test_log.is_last);
|
msg->test_log.is_last);
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -1260,7 +1260,7 @@ static int get_xlated_program(int fd_prog, struct bpf_insn **buf, int *cnt)
|
||||||
|
|
||||||
bzero(&info, sizeof(info));
|
bzero(&info, sizeof(info));
|
||||||
info.xlated_prog_len = xlated_prog_len;
|
info.xlated_prog_len = xlated_prog_len;
|
||||||
info.xlated_prog_insns = (__u64)*buf;
|
info.xlated_prog_insns = (__u64)(unsigned long)*buf;
|
||||||
if (bpf_obj_get_info_by_fd(fd_prog, &info, &info_len)) {
|
if (bpf_obj_get_info_by_fd(fd_prog, &info, &info_len)) {
|
||||||
perror("second bpf_obj_get_info_by_fd failed");
|
perror("second bpf_obj_get_info_by_fd failed");
|
||||||
goto out_free_buf;
|
goto out_free_buf;
|
||||||
|
|
Loading…
Reference in New Issue