Merge branch 'bpf-verifier-improvements'
Alexei Starovoitov says: ==================== bpf: verifier improvements A number of bpf verifier improvements from Gianluca. See individual patches for details. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
c22e5c125b
|
@ -69,14 +69,14 @@ enum bpf_arg_type {
|
|||
/* the following constraints used to prototype bpf_memcmp() and other
|
||||
* functions that access data on eBPF program stack
|
||||
*/
|
||||
ARG_PTR_TO_STACK, /* any pointer to eBPF program stack */
|
||||
ARG_PTR_TO_RAW_STACK, /* any pointer to eBPF program stack, area does not
|
||||
* need to be initialized, helper function must fill
|
||||
* all bytes or clear them in error case.
|
||||
ARG_PTR_TO_MEM, /* pointer to valid memory (stack, packet, map value) */
|
||||
ARG_PTR_TO_UNINIT_MEM, /* pointer to memory does not need to be initialized,
|
||||
* helper function must fill all bytes or clear
|
||||
* them in error case.
|
||||
*/
|
||||
|
||||
ARG_CONST_STACK_SIZE, /* number of bytes accessed from stack */
|
||||
ARG_CONST_STACK_SIZE_OR_ZERO, /* number of bytes accessed from stack or 0 */
|
||||
ARG_CONST_SIZE, /* number of bytes accessed from memory */
|
||||
ARG_CONST_SIZE_OR_ZERO, /* number of bytes accessed from memory or 0 */
|
||||
|
||||
ARG_PTR_TO_CTX, /* pointer to context */
|
||||
ARG_ANYTHING, /* any (initialized) argument is ok */
|
||||
|
|
|
@ -176,6 +176,6 @@ const struct bpf_func_proto bpf_get_current_comm_proto = {
|
|||
.func = bpf_get_current_comm,
|
||||
.gpl_only = false,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_RAW_STACK,
|
||||
.arg2_type = ARG_CONST_STACK_SIZE,
|
||||
.arg1_type = ARG_PTR_TO_UNINIT_MEM,
|
||||
.arg2_type = ARG_CONST_SIZE,
|
||||
};
|
||||
|
|
|
@ -481,6 +481,13 @@ static void reset_reg_range_values(struct bpf_reg_state *regs, u32 regno)
|
|||
regs[regno].max_value = BPF_REGISTER_MAX_RANGE;
|
||||
}
|
||||
|
||||
static void mark_reg_unknown_value_and_range(struct bpf_reg_state *regs,
|
||||
u32 regno)
|
||||
{
|
||||
mark_reg_unknown_value(regs, regno);
|
||||
reset_reg_range_values(regs, regno);
|
||||
}
|
||||
|
||||
enum reg_arg_type {
|
||||
SRC_OP, /* register is used as source operand */
|
||||
DST_OP, /* register is used as destination operand */
|
||||
|
@ -532,6 +539,7 @@ static bool is_spillable_regtype(enum bpf_reg_type type)
|
|||
switch (type) {
|
||||
case PTR_TO_MAP_VALUE:
|
||||
case PTR_TO_MAP_VALUE_OR_NULL:
|
||||
case PTR_TO_MAP_VALUE_ADJ:
|
||||
case PTR_TO_STACK:
|
||||
case PTR_TO_CTX:
|
||||
case PTR_TO_PACKET:
|
||||
|
@ -616,7 +624,8 @@ static int check_stack_read(struct bpf_verifier_state *state, int off, int size,
|
|||
}
|
||||
if (value_regno >= 0)
|
||||
/* have read misc data from the stack */
|
||||
mark_reg_unknown_value(state->regs, value_regno);
|
||||
mark_reg_unknown_value_and_range(state->regs,
|
||||
value_regno);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
@ -627,7 +636,7 @@ static int check_map_access(struct bpf_verifier_env *env, u32 regno, int off,
|
|||
{
|
||||
struct bpf_map *map = env->cur_state.regs[regno].map_ptr;
|
||||
|
||||
if (off < 0 || off + size > map->value_size) {
|
||||
if (off < 0 || size <= 0 || off + size > map->value_size) {
|
||||
verbose("invalid access to map value, value_size=%d off=%d size=%d\n",
|
||||
map->value_size, off, size);
|
||||
return -EACCES;
|
||||
|
@ -635,6 +644,51 @@ static int check_map_access(struct bpf_verifier_env *env, u32 regno, int off,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* check read/write into an adjusted map element */
|
||||
static int check_map_access_adj(struct bpf_verifier_env *env, u32 regno,
|
||||
int off, int size)
|
||||
{
|
||||
struct bpf_verifier_state *state = &env->cur_state;
|
||||
struct bpf_reg_state *reg = &state->regs[regno];
|
||||
int err;
|
||||
|
||||
/* We adjusted the register to this map value, so we
|
||||
* need to change off and size to min_value and max_value
|
||||
* respectively to make sure our theoretical access will be
|
||||
* safe.
|
||||
*/
|
||||
if (log_level)
|
||||
print_verifier_state(state);
|
||||
env->varlen_map_value_access = true;
|
||||
/* The minimum value is only important with signed
|
||||
* comparisons where we can't assume the floor of a
|
||||
* value is 0. If we are using signed variables for our
|
||||
* index'es we need to make sure that whatever we use
|
||||
* will have a set floor within our range.
|
||||
*/
|
||||
if (reg->min_value < 0) {
|
||||
verbose("R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
|
||||
regno);
|
||||
return -EACCES;
|
||||
}
|
||||
err = check_map_access(env, regno, reg->min_value + off, size);
|
||||
if (err) {
|
||||
verbose("R%d min value is outside of the array range\n",
|
||||
regno);
|
||||
return err;
|
||||
}
|
||||
|
||||
/* If we haven't set a max value then we need to bail
|
||||
* since we can't be sure we won't do bad things.
|
||||
*/
|
||||
if (reg->max_value == BPF_REGISTER_MAX_RANGE) {
|
||||
verbose("R%d unbounded memory access, make sure to bounds check any array access into a map\n",
|
||||
regno);
|
||||
return -EACCES;
|
||||
}
|
||||
return check_map_access(env, regno, reg->max_value + off, size);
|
||||
}
|
||||
|
||||
#define MAX_PACKET_OFF 0xffff
|
||||
|
||||
static bool may_access_direct_pkt_data(struct bpf_verifier_env *env,
|
||||
|
@ -775,47 +829,13 @@ static int check_mem_access(struct bpf_verifier_env *env, u32 regno, int off,
|
|||
return -EACCES;
|
||||
}
|
||||
|
||||
/* If we adjusted the register to this map value at all then we
|
||||
* need to change off and size to min_value and max_value
|
||||
* respectively to make sure our theoretical access will be
|
||||
* safe.
|
||||
*/
|
||||
if (reg->type == PTR_TO_MAP_VALUE_ADJ) {
|
||||
if (log_level)
|
||||
print_verifier_state(state);
|
||||
env->varlen_map_value_access = true;
|
||||
/* The minimum value is only important with signed
|
||||
* comparisons where we can't assume the floor of a
|
||||
* value is 0. If we are using signed variables for our
|
||||
* index'es we need to make sure that whatever we use
|
||||
* will have a set floor within our range.
|
||||
*/
|
||||
if (reg->min_value < 0) {
|
||||
verbose("R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
|
||||
regno);
|
||||
return -EACCES;
|
||||
}
|
||||
err = check_map_access(env, regno, reg->min_value + off,
|
||||
size);
|
||||
if (err) {
|
||||
verbose("R%d min value is outside of the array range\n",
|
||||
regno);
|
||||
return err;
|
||||
}
|
||||
|
||||
/* If we haven't set a max value then we need to bail
|
||||
* since we can't be sure we won't do bad things.
|
||||
*/
|
||||
if (reg->max_value == BPF_REGISTER_MAX_RANGE) {
|
||||
verbose("R%d unbounded memory access, make sure to bounds check any array access into a map\n",
|
||||
regno);
|
||||
return -EACCES;
|
||||
}
|
||||
off += reg->max_value;
|
||||
}
|
||||
err = check_map_access(env, regno, off, size);
|
||||
if (reg->type == PTR_TO_MAP_VALUE_ADJ)
|
||||
err = check_map_access_adj(env, regno, off, size);
|
||||
else
|
||||
err = check_map_access(env, regno, off, size);
|
||||
if (!err && t == BPF_READ && value_regno >= 0)
|
||||
mark_reg_unknown_value(state->regs, value_regno);
|
||||
mark_reg_unknown_value_and_range(state->regs,
|
||||
value_regno);
|
||||
|
||||
} else if (reg->type == PTR_TO_CTX) {
|
||||
enum bpf_reg_type reg_type = UNKNOWN_VALUE;
|
||||
|
@ -827,7 +847,8 @@ static int check_mem_access(struct bpf_verifier_env *env, u32 regno, int off,
|
|||
}
|
||||
err = check_ctx_access(env, off, size, t, ®_type);
|
||||
if (!err && t == BPF_READ && value_regno >= 0) {
|
||||
mark_reg_unknown_value(state->regs, value_regno);
|
||||
mark_reg_unknown_value_and_range(state->regs,
|
||||
value_regno);
|
||||
/* note that reg.[id|off|range] == 0 */
|
||||
state->regs[value_regno].type = reg_type;
|
||||
}
|
||||
|
@ -860,7 +881,8 @@ static int check_mem_access(struct bpf_verifier_env *env, u32 regno, int off,
|
|||
}
|
||||
err = check_packet_access(env, regno, off, size);
|
||||
if (!err && t == BPF_READ && value_regno >= 0)
|
||||
mark_reg_unknown_value(state->regs, value_regno);
|
||||
mark_reg_unknown_value_and_range(state->regs,
|
||||
value_regno);
|
||||
} else {
|
||||
verbose("R%d invalid mem access '%s'\n",
|
||||
regno, reg_type_str[reg->type]);
|
||||
|
@ -958,6 +980,25 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
|
||||
int access_size, bool zero_size_allowed,
|
||||
struct bpf_call_arg_meta *meta)
|
||||
{
|
||||
struct bpf_reg_state *regs = env->cur_state.regs;
|
||||
|
||||
switch (regs[regno].type) {
|
||||
case PTR_TO_PACKET:
|
||||
return check_packet_access(env, regno, 0, access_size);
|
||||
case PTR_TO_MAP_VALUE:
|
||||
return check_map_access(env, regno, 0, access_size);
|
||||
case PTR_TO_MAP_VALUE_ADJ:
|
||||
return check_map_access_adj(env, regno, 0, access_size);
|
||||
default: /* const_imm|ptr_to_stack or invalid ptr */
|
||||
return check_stack_boundary(env, regno, access_size,
|
||||
zero_size_allowed, meta);
|
||||
}
|
||||
}
|
||||
|
||||
static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
|
||||
enum bpf_arg_type arg_type,
|
||||
struct bpf_call_arg_meta *meta)
|
||||
|
@ -993,10 +1034,13 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
|
|||
expected_type = PTR_TO_STACK;
|
||||
if (type != PTR_TO_PACKET && type != expected_type)
|
||||
goto err_type;
|
||||
} else if (arg_type == ARG_CONST_STACK_SIZE ||
|
||||
arg_type == ARG_CONST_STACK_SIZE_OR_ZERO) {
|
||||
} else if (arg_type == ARG_CONST_SIZE ||
|
||||
arg_type == ARG_CONST_SIZE_OR_ZERO) {
|
||||
expected_type = CONST_IMM;
|
||||
if (type != expected_type)
|
||||
/* One exception. Allow UNKNOWN_VALUE registers when the
|
||||
* boundaries are known and don't cause unsafe memory accesses
|
||||
*/
|
||||
if (type != UNKNOWN_VALUE && type != expected_type)
|
||||
goto err_type;
|
||||
} else if (arg_type == ARG_CONST_MAP_PTR) {
|
||||
expected_type = CONST_PTR_TO_MAP;
|
||||
|
@ -1006,8 +1050,8 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
|
|||
expected_type = PTR_TO_CTX;
|
||||
if (type != expected_type)
|
||||
goto err_type;
|
||||
} else if (arg_type == ARG_PTR_TO_STACK ||
|
||||
arg_type == ARG_PTR_TO_RAW_STACK) {
|
||||
} else if (arg_type == ARG_PTR_TO_MEM ||
|
||||
arg_type == ARG_PTR_TO_UNINIT_MEM) {
|
||||
expected_type = PTR_TO_STACK;
|
||||
/* One exception here. In case function allows for NULL to be
|
||||
* passed in as argument, it's a CONST_IMM type. Final test
|
||||
|
@ -1015,9 +1059,10 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
|
|||
*/
|
||||
if (type == CONST_IMM && reg->imm == 0)
|
||||
/* final test in check_stack_boundary() */;
|
||||
else if (type != PTR_TO_PACKET && type != expected_type)
|
||||
else if (type != PTR_TO_PACKET && type != PTR_TO_MAP_VALUE &&
|
||||
type != PTR_TO_MAP_VALUE_ADJ && type != expected_type)
|
||||
goto err_type;
|
||||
meta->raw_mode = arg_type == ARG_PTR_TO_RAW_STACK;
|
||||
meta->raw_mode = arg_type == ARG_PTR_TO_UNINIT_MEM;
|
||||
} else {
|
||||
verbose("unsupported arg_type %d\n", arg_type);
|
||||
return -EFAULT;
|
||||
|
@ -1063,9 +1108,9 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
|
|||
err = check_stack_boundary(env, regno,
|
||||
meta->map_ptr->value_size,
|
||||
false, NULL);
|
||||
} else if (arg_type == ARG_CONST_STACK_SIZE ||
|
||||
arg_type == ARG_CONST_STACK_SIZE_OR_ZERO) {
|
||||
bool zero_size_allowed = (arg_type == ARG_CONST_STACK_SIZE_OR_ZERO);
|
||||
} else if (arg_type == ARG_CONST_SIZE ||
|
||||
arg_type == ARG_CONST_SIZE_OR_ZERO) {
|
||||
bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO);
|
||||
|
||||
/* bpf_xxx(..., buf, len) call will access 'len' bytes
|
||||
* from stack pointer 'buf'. Check it
|
||||
|
@ -1073,14 +1118,50 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
|
|||
*/
|
||||
if (regno == 0) {
|
||||
/* kernel subsystem misconfigured verifier */
|
||||
verbose("ARG_CONST_STACK_SIZE cannot be first argument\n");
|
||||
verbose("ARG_CONST_SIZE cannot be first argument\n");
|
||||
return -EACCES;
|
||||
}
|
||||
if (regs[regno - 1].type == PTR_TO_PACKET)
|
||||
err = check_packet_access(env, regno - 1, 0, reg->imm);
|
||||
else
|
||||
err = check_stack_boundary(env, regno - 1, reg->imm,
|
||||
zero_size_allowed, meta);
|
||||
|
||||
/* If the register is UNKNOWN_VALUE, the access check happens
|
||||
* using its boundaries. Otherwise, just use its imm
|
||||
*/
|
||||
if (type == UNKNOWN_VALUE) {
|
||||
/* For unprivileged variable accesses, disable raw
|
||||
* mode so that the program is required to
|
||||
* initialize all the memory that the helper could
|
||||
* just partially fill up.
|
||||
*/
|
||||
meta = NULL;
|
||||
|
||||
if (reg->min_value < 0) {
|
||||
verbose("R%d min value is negative, either use unsigned or 'var &= const'\n",
|
||||
regno);
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
if (reg->min_value == 0) {
|
||||
err = check_helper_mem_access(env, regno - 1, 0,
|
||||
zero_size_allowed,
|
||||
meta);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
if (reg->max_value == BPF_REGISTER_MAX_RANGE) {
|
||||
verbose("R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n",
|
||||
regno);
|
||||
return -EACCES;
|
||||
}
|
||||
err = check_helper_mem_access(env, regno - 1,
|
||||
reg->max_value,
|
||||
zero_size_allowed, meta);
|
||||
if (err)
|
||||
return err;
|
||||
} else {
|
||||
/* register is CONST_IMM */
|
||||
err = check_helper_mem_access(env, regno - 1, reg->imm,
|
||||
zero_size_allowed, meta);
|
||||
}
|
||||
}
|
||||
|
||||
return err;
|
||||
|
@ -1154,15 +1235,15 @@ static int check_raw_mode(const struct bpf_func_proto *fn)
|
|||
{
|
||||
int count = 0;
|
||||
|
||||
if (fn->arg1_type == ARG_PTR_TO_RAW_STACK)
|
||||
if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM)
|
||||
count++;
|
||||
if (fn->arg2_type == ARG_PTR_TO_RAW_STACK)
|
||||
if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM)
|
||||
count++;
|
||||
if (fn->arg3_type == ARG_PTR_TO_RAW_STACK)
|
||||
if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM)
|
||||
count++;
|
||||
if (fn->arg4_type == ARG_PTR_TO_RAW_STACK)
|
||||
if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM)
|
||||
count++;
|
||||
if (fn->arg5_type == ARG_PTR_TO_RAW_STACK)
|
||||
if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM)
|
||||
count++;
|
||||
|
||||
return count > 1 ? -EINVAL : 0;
|
||||
|
@ -2729,7 +2810,6 @@ static int do_check(struct bpf_verifier_env *env)
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
reset_reg_range_values(regs, insn->dst_reg);
|
||||
if (BPF_SIZE(insn->code) != BPF_W &&
|
||||
BPF_SIZE(insn->code) != BPF_DW) {
|
||||
insn_idx++;
|
||||
|
|
|
@ -76,8 +76,8 @@ static const struct bpf_func_proto bpf_probe_read_proto = {
|
|||
.func = bpf_probe_read,
|
||||
.gpl_only = true,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_RAW_STACK,
|
||||
.arg2_type = ARG_CONST_STACK_SIZE,
|
||||
.arg1_type = ARG_PTR_TO_UNINIT_MEM,
|
||||
.arg2_type = ARG_CONST_SIZE,
|
||||
.arg3_type = ARG_ANYTHING,
|
||||
};
|
||||
|
||||
|
@ -109,8 +109,8 @@ static const struct bpf_func_proto bpf_probe_write_user_proto = {
|
|||
.gpl_only = true,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_ANYTHING,
|
||||
.arg2_type = ARG_PTR_TO_STACK,
|
||||
.arg3_type = ARG_CONST_STACK_SIZE,
|
||||
.arg2_type = ARG_PTR_TO_MEM,
|
||||
.arg3_type = ARG_CONST_SIZE,
|
||||
};
|
||||
|
||||
static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
|
||||
|
@ -213,8 +213,8 @@ static const struct bpf_func_proto bpf_trace_printk_proto = {
|
|||
.func = bpf_trace_printk,
|
||||
.gpl_only = true,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_STACK,
|
||||
.arg2_type = ARG_CONST_STACK_SIZE,
|
||||
.arg1_type = ARG_PTR_TO_MEM,
|
||||
.arg2_type = ARG_CONST_SIZE,
|
||||
};
|
||||
|
||||
const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
|
||||
|
@ -329,8 +329,8 @@ static const struct bpf_func_proto bpf_perf_event_output_proto = {
|
|||
.arg1_type = ARG_PTR_TO_CTX,
|
||||
.arg2_type = ARG_CONST_MAP_PTR,
|
||||
.arg3_type = ARG_ANYTHING,
|
||||
.arg4_type = ARG_PTR_TO_STACK,
|
||||
.arg5_type = ARG_CONST_STACK_SIZE,
|
||||
.arg4_type = ARG_PTR_TO_MEM,
|
||||
.arg5_type = ARG_CONST_SIZE,
|
||||
};
|
||||
|
||||
static DEFINE_PER_CPU(struct pt_regs, bpf_pt_regs);
|
||||
|
@ -492,8 +492,8 @@ static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
|
|||
.arg1_type = ARG_PTR_TO_CTX,
|
||||
.arg2_type = ARG_CONST_MAP_PTR,
|
||||
.arg3_type = ARG_ANYTHING,
|
||||
.arg4_type = ARG_PTR_TO_STACK,
|
||||
.arg5_type = ARG_CONST_STACK_SIZE,
|
||||
.arg4_type = ARG_PTR_TO_MEM,
|
||||
.arg5_type = ARG_CONST_SIZE,
|
||||
};
|
||||
|
||||
BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
|
||||
|
|
|
@ -1416,8 +1416,8 @@ static const struct bpf_func_proto bpf_skb_store_bytes_proto = {
|
|||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_CTX,
|
||||
.arg2_type = ARG_ANYTHING,
|
||||
.arg3_type = ARG_PTR_TO_STACK,
|
||||
.arg4_type = ARG_CONST_STACK_SIZE,
|
||||
.arg3_type = ARG_PTR_TO_MEM,
|
||||
.arg4_type = ARG_CONST_SIZE,
|
||||
.arg5_type = ARG_ANYTHING,
|
||||
};
|
||||
|
||||
|
@ -1447,8 +1447,8 @@ static const struct bpf_func_proto bpf_skb_load_bytes_proto = {
|
|||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_CTX,
|
||||
.arg2_type = ARG_ANYTHING,
|
||||
.arg3_type = ARG_PTR_TO_RAW_STACK,
|
||||
.arg4_type = ARG_CONST_STACK_SIZE,
|
||||
.arg3_type = ARG_PTR_TO_UNINIT_MEM,
|
||||
.arg4_type = ARG_CONST_SIZE,
|
||||
};
|
||||
|
||||
BPF_CALL_2(bpf_skb_pull_data, struct sk_buff *, skb, u32, len)
|
||||
|
@ -1601,10 +1601,10 @@ static const struct bpf_func_proto bpf_csum_diff_proto = {
|
|||
.gpl_only = false,
|
||||
.pkt_access = true,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_STACK,
|
||||
.arg2_type = ARG_CONST_STACK_SIZE_OR_ZERO,
|
||||
.arg3_type = ARG_PTR_TO_STACK,
|
||||
.arg4_type = ARG_CONST_STACK_SIZE_OR_ZERO,
|
||||
.arg1_type = ARG_PTR_TO_MEM,
|
||||
.arg2_type = ARG_CONST_SIZE_OR_ZERO,
|
||||
.arg3_type = ARG_PTR_TO_MEM,
|
||||
.arg4_type = ARG_CONST_SIZE_OR_ZERO,
|
||||
.arg5_type = ARG_ANYTHING,
|
||||
};
|
||||
|
||||
|
@ -2306,8 +2306,8 @@ static const struct bpf_func_proto bpf_skb_event_output_proto = {
|
|||
.arg1_type = ARG_PTR_TO_CTX,
|
||||
.arg2_type = ARG_CONST_MAP_PTR,
|
||||
.arg3_type = ARG_ANYTHING,
|
||||
.arg4_type = ARG_PTR_TO_STACK,
|
||||
.arg5_type = ARG_CONST_STACK_SIZE,
|
||||
.arg4_type = ARG_PTR_TO_MEM,
|
||||
.arg5_type = ARG_CONST_SIZE,
|
||||
};
|
||||
|
||||
static unsigned short bpf_tunnel_key_af(u64 flags)
|
||||
|
@ -2377,8 +2377,8 @@ static const struct bpf_func_proto bpf_skb_get_tunnel_key_proto = {
|
|||
.gpl_only = false,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_CTX,
|
||||
.arg2_type = ARG_PTR_TO_RAW_STACK,
|
||||
.arg3_type = ARG_CONST_STACK_SIZE,
|
||||
.arg2_type = ARG_PTR_TO_UNINIT_MEM,
|
||||
.arg3_type = ARG_CONST_SIZE,
|
||||
.arg4_type = ARG_ANYTHING,
|
||||
};
|
||||
|
||||
|
@ -2412,8 +2412,8 @@ static const struct bpf_func_proto bpf_skb_get_tunnel_opt_proto = {
|
|||
.gpl_only = false,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_CTX,
|
||||
.arg2_type = ARG_PTR_TO_RAW_STACK,
|
||||
.arg3_type = ARG_CONST_STACK_SIZE,
|
||||
.arg2_type = ARG_PTR_TO_UNINIT_MEM,
|
||||
.arg3_type = ARG_CONST_SIZE,
|
||||
};
|
||||
|
||||
static struct metadata_dst __percpu *md_dst;
|
||||
|
@ -2483,8 +2483,8 @@ static const struct bpf_func_proto bpf_skb_set_tunnel_key_proto = {
|
|||
.gpl_only = false,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_CTX,
|
||||
.arg2_type = ARG_PTR_TO_STACK,
|
||||
.arg3_type = ARG_CONST_STACK_SIZE,
|
||||
.arg2_type = ARG_PTR_TO_MEM,
|
||||
.arg3_type = ARG_CONST_SIZE,
|
||||
.arg4_type = ARG_ANYTHING,
|
||||
};
|
||||
|
||||
|
@ -2509,8 +2509,8 @@ static const struct bpf_func_proto bpf_skb_set_tunnel_opt_proto = {
|
|||
.gpl_only = false,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_CTX,
|
||||
.arg2_type = ARG_PTR_TO_STACK,
|
||||
.arg3_type = ARG_CONST_STACK_SIZE,
|
||||
.arg2_type = ARG_PTR_TO_MEM,
|
||||
.arg3_type = ARG_CONST_SIZE,
|
||||
};
|
||||
|
||||
static const struct bpf_func_proto *
|
||||
|
@ -2593,8 +2593,8 @@ static const struct bpf_func_proto bpf_xdp_event_output_proto = {
|
|||
.arg1_type = ARG_PTR_TO_CTX,
|
||||
.arg2_type = ARG_CONST_MAP_PTR,
|
||||
.arg3_type = ARG_ANYTHING,
|
||||
.arg4_type = ARG_PTR_TO_STACK,
|
||||
.arg5_type = ARG_CONST_STACK_SIZE,
|
||||
.arg4_type = ARG_PTR_TO_MEM,
|
||||
.arg5_type = ARG_CONST_SIZE,
|
||||
};
|
||||
|
||||
static const struct bpf_func_proto *
|
||||
|
|
|
@ -2905,6 +2905,953 @@ static struct bpf_test tests[] = {
|
|||
.result = REJECT,
|
||||
.errstr = "invalid bpf_context access",
|
||||
},
|
||||
{
|
||||
"helper access to map: full range",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map2 = { 3 },
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to map: partial range",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 8),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map2 = { 3 },
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to map: empty range",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map2 = { 3 },
|
||||
.errstr = "invalid access to map value, value_size=48 off=0 size=0",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to map: out-of-bound range",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val) + 8),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map2 = { 3 },
|
||||
.errstr = "invalid access to map value, value_size=48 off=0 size=56",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to map: negative range",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_MOV64_IMM(BPF_REG_2, -8),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map2 = { 3 },
|
||||
.errstr = "invalid access to map value, value_size=48 off=0 size=-8",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to adjusted map (via const imm): full range",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
|
||||
offsetof(struct test_val, foo)),
|
||||
BPF_MOV64_IMM(BPF_REG_2,
|
||||
sizeof(struct test_val) -
|
||||
offsetof(struct test_val, foo)),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map2 = { 3 },
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to adjusted map (via const imm): partial range",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
|
||||
offsetof(struct test_val, foo)),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 8),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map2 = { 3 },
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to adjusted map (via const imm): empty range",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
|
||||
offsetof(struct test_val, foo)),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map2 = { 3 },
|
||||
.errstr = "R1 min value is outside of the array range",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to adjusted map (via const imm): out-of-bound range",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
|
||||
offsetof(struct test_val, foo)),
|
||||
BPF_MOV64_IMM(BPF_REG_2,
|
||||
sizeof(struct test_val) -
|
||||
offsetof(struct test_val, foo) + 8),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map2 = { 3 },
|
||||
.errstr = "invalid access to map value, value_size=48 off=4 size=52",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to adjusted map (via const imm): negative range (> adjustment)",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
|
||||
offsetof(struct test_val, foo)),
|
||||
BPF_MOV64_IMM(BPF_REG_2, -8),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map2 = { 3 },
|
||||
.errstr = "invalid access to map value, value_size=48 off=4 size=-8",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to adjusted map (via const imm): negative range (< adjustment)",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
|
||||
offsetof(struct test_val, foo)),
|
||||
BPF_MOV64_IMM(BPF_REG_2, -1),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map2 = { 3 },
|
||||
.errstr = "R1 min value is outside of the array range",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to adjusted map (via const reg): full range",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_MOV64_IMM(BPF_REG_3,
|
||||
offsetof(struct test_val, foo)),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
|
||||
BPF_MOV64_IMM(BPF_REG_2,
|
||||
sizeof(struct test_val) -
|
||||
offsetof(struct test_val, foo)),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map2 = { 3 },
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to adjusted map (via const reg): partial range",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_MOV64_IMM(BPF_REG_3,
|
||||
offsetof(struct test_val, foo)),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 8),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map2 = { 3 },
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to adjusted map (via const reg): empty range",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map2 = { 3 },
|
||||
.errstr = "R1 min value is outside of the array range",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to adjusted map (via const reg): out-of-bound range",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_MOV64_IMM(BPF_REG_3,
|
||||
offsetof(struct test_val, foo)),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
|
||||
BPF_MOV64_IMM(BPF_REG_2,
|
||||
sizeof(struct test_val) -
|
||||
offsetof(struct test_val, foo) + 8),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map2 = { 3 },
|
||||
.errstr = "invalid access to map value, value_size=48 off=4 size=52",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to adjusted map (via const reg): negative range (> adjustment)",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_MOV64_IMM(BPF_REG_3,
|
||||
offsetof(struct test_val, foo)),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
|
||||
BPF_MOV64_IMM(BPF_REG_2, -8),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map2 = { 3 },
|
||||
.errstr = "invalid access to map value, value_size=48 off=4 size=-8",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to adjusted map (via const reg): negative range (< adjustment)",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_MOV64_IMM(BPF_REG_3,
|
||||
offsetof(struct test_val, foo)),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
|
||||
BPF_MOV64_IMM(BPF_REG_2, -1),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map2 = { 3 },
|
||||
.errstr = "R1 min value is outside of the array range",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to adjusted map (via variable): full range",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
|
||||
BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
|
||||
offsetof(struct test_val, foo), 4),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
|
||||
BPF_MOV64_IMM(BPF_REG_2,
|
||||
sizeof(struct test_val) -
|
||||
offsetof(struct test_val, foo)),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map2 = { 3 },
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to adjusted map (via variable): partial range",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
|
||||
BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
|
||||
offsetof(struct test_val, foo), 4),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 8),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map2 = { 3 },
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to adjusted map (via variable): empty range",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
|
||||
BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
|
||||
offsetof(struct test_val, foo), 4),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map2 = { 3 },
|
||||
.errstr = "R1 min value is outside of the array range",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to adjusted map (via variable): no max check",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map2 = { 3 },
|
||||
.errstr = "R1 min value is negative, either use unsigned index or do a if (index >=0) check",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to adjusted map (via variable): wrong max check",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
|
||||
BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
|
||||
offsetof(struct test_val, foo), 4),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
|
||||
BPF_MOV64_IMM(BPF_REG_2,
|
||||
sizeof(struct test_val) -
|
||||
offsetof(struct test_val, foo) + 1),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map2 = { 3 },
|
||||
.errstr = "invalid access to map value, value_size=48 off=4 size=45",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"map element value is preserved across register spilling",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map2 = { 3 },
|
||||
.errstr_unpriv = "R0 leaks addr",
|
||||
.result = ACCEPT,
|
||||
.result_unpriv = REJECT,
|
||||
},
|
||||
{
|
||||
"map element value (adjusted) is preserved across register spilling",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0,
|
||||
offsetof(struct test_val, foo)),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map2 = { 3 },
|
||||
.errstr_unpriv = "R0 pointer arithmetic prohibited",
|
||||
.result = ACCEPT,
|
||||
.result_unpriv = REJECT,
|
||||
},
|
||||
{
|
||||
"helper access to variable memory: stack, bitwise AND + JMP, correct bounds",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 16),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
|
||||
BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 0),
|
||||
BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to variable memory: stack, bitwise AND, zero included",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 16),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
|
||||
BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "invalid stack type R1 off=-64 access_size=0",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to variable memory: stack, bitwise AND + JMP, wrong max",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 16),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
|
||||
BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 65),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 0),
|
||||
BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "invalid stack type R1 off=-64 access_size=65",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to variable memory: stack, JMP, correct bounds",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 16),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
|
||||
BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 4),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 0),
|
||||
BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to variable memory: stack, JMP (signed), correct bounds",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 16),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
|
||||
BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 4),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 0),
|
||||
BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to variable memory: stack, JMP, bounds + offset",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 16),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
|
||||
BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 5),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 0),
|
||||
BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 3),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "invalid stack type R1 off=-64 access_size=65",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to variable memory: stack, JMP, wrong max",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 16),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
|
||||
BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 65, 4),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 0),
|
||||
BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "invalid stack type R1 off=-64 access_size=65",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to variable memory: stack, JMP, no max check",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 16),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 0),
|
||||
BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "R2 unbounded memory access",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to variable memory: stack, JMP, no min check",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 16),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
|
||||
BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 3),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "invalid stack type R1 off=-64 access_size=0",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to variable memory: stack, JMP (signed), no min check",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 16),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
|
||||
BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 3),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "R2 min value is negative",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to variable memory: map, JMP, correct bounds",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
|
||||
BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
|
||||
sizeof(struct test_val), 4),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 0),
|
||||
BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map2 = { 3 },
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to variable memory: map, JMP, wrong max",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
|
||||
BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
|
||||
sizeof(struct test_val) + 1, 4),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 0),
|
||||
BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map2 = { 3 },
|
||||
.errstr = "invalid access to map value, value_size=48 off=0 size=49",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to variable memory: map adjusted, JMP, correct bounds",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
|
||||
BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
|
||||
BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
|
||||
sizeof(struct test_val) - 20, 4),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 0),
|
||||
BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map2 = { 3 },
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to variable memory: map adjusted, JMP, wrong max",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
|
||||
BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
|
||||
BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
|
||||
sizeof(struct test_val) - 19, 4),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 0),
|
||||
BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map2 = { 3 },
|
||||
.errstr = "R1 min value is outside of the array range",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to variable memory: size > 0 not allowed on NULL",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_1, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 0),
|
||||
BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_5, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_csum_diff),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "R1 type=imm expected=fp",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
},
|
||||
{
|
||||
"helper access to variable memory: size = 0 not allowed on != NULL",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 0),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
|
||||
BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 8),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_5, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_csum_diff),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "invalid stack type R1 off=-8 access_size=0",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
},
|
||||
{
|
||||
"helper access to variable memory: 8 bytes leak",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 0),
|
||||
BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "invalid indirect read from stack off -64+32 size 64",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"helper access to variable memory: 8 bytes no leak (init memory)",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 0),
|
||||
BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 32),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 32),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
};
|
||||
|
||||
static int probe_filter_length(const struct bpf_insn *fp)
|
||||
|
|
Loading…
Reference in New Issue