Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
Daniel Borkmann says: ==================== pull-request: bpf 2017-12-21 The following pull-request contains BPF updates for your *net* tree. The main changes are: 1) Fix multiple security issues in the BPF verifier mostly related to the value and min/max bounds tracking rework in 4.14. Issues range from incorrect bounds calculation in some BPF_RSH cases, to improper sign extension and reg size handling on 32 bit ALU ops, missing strict alignment checks on stack pointers, and several others that got fixed, from Jann, Alexei and Edward. 2) Fix various build failures in BPF selftests on sparc64. More specifically, librt needed to be added to the libs to link against and few format string fixups for sizeof, from David. 3) Fix one last remaining issue from BPF selftest build that was still occuring on s390x from the asm/bpf_perf_event.h include which could not find the asm/ptrace.h copy, from Hendrik. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
8b6ca2bf5a
|
@ -15,11 +15,11 @@
|
|||
* In practice this is far bigger than any realistic pointer offset; this limit
|
||||
* ensures that umax_value + (int)off + (int)size cannot overflow a u64.
|
||||
*/
|
||||
#define BPF_MAX_VAR_OFF (1ULL << 31)
|
||||
#define BPF_MAX_VAR_OFF (1 << 29)
|
||||
/* Maximum variable size permitted for ARG_CONST_SIZE[_OR_ZERO]. This ensures
|
||||
* that converting umax_value to int cannot overflow.
|
||||
*/
|
||||
#define BPF_MAX_VAR_SIZ INT_MAX
|
||||
#define BPF_MAX_VAR_SIZ (1 << 29)
|
||||
|
||||
/* Liveness marks, used for registers and spilled-regs (in stack slots).
|
||||
* Read marks propagate upwards until they find a write mark; they record that
|
||||
|
|
|
@ -1059,6 +1059,11 @@ static int check_ptr_alignment(struct bpf_verifier_env *env,
|
|||
break;
|
||||
case PTR_TO_STACK:
|
||||
pointer_desc = "stack ";
|
||||
/* The stack spill tracking logic in check_stack_write()
|
||||
* and check_stack_read() relies on stack accesses being
|
||||
* aligned.
|
||||
*/
|
||||
strict = true;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
@ -1067,6 +1072,29 @@ static int check_ptr_alignment(struct bpf_verifier_env *env,
|
|||
strict);
|
||||
}
|
||||
|
||||
/* truncate register to smaller size (in bytes)
|
||||
* must be called with size < BPF_REG_SIZE
|
||||
*/
|
||||
static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
|
||||
{
|
||||
u64 mask;
|
||||
|
||||
/* clear high bits in bit representation */
|
||||
reg->var_off = tnum_cast(reg->var_off, size);
|
||||
|
||||
/* fix arithmetic bounds */
|
||||
mask = ((u64)1 << (size * 8)) - 1;
|
||||
if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) {
|
||||
reg->umin_value &= mask;
|
||||
reg->umax_value &= mask;
|
||||
} else {
|
||||
reg->umin_value = 0;
|
||||
reg->umax_value = mask;
|
||||
}
|
||||
reg->smin_value = reg->umin_value;
|
||||
reg->smax_value = reg->umax_value;
|
||||
}
|
||||
|
||||
/* check whether memory at (regno + off) is accessible for t = (read | write)
|
||||
* if t==write, value_regno is a register which value is stored into memory
|
||||
* if t==read, value_regno is a register which will receive the value from memory
|
||||
|
@ -1200,9 +1228,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
|
|||
if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ &&
|
||||
regs[value_regno].type == SCALAR_VALUE) {
|
||||
/* b/h/w load zero-extends, mark upper bits as known 0 */
|
||||
regs[value_regno].var_off =
|
||||
tnum_cast(regs[value_regno].var_off, size);
|
||||
__update_reg_bounds(®s[value_regno]);
|
||||
coerce_reg_to_size(®s[value_regno], size);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
@ -1282,6 +1308,7 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
|
|||
tnum_strn(tn_buf, sizeof(tn_buf), regs[regno].var_off);
|
||||
verbose(env, "invalid variable stack read R%d var_off=%s\n",
|
||||
regno, tn_buf);
|
||||
return -EACCES;
|
||||
}
|
||||
off = regs[regno].off + regs[regno].var_off.value;
|
||||
if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 ||
|
||||
|
@ -1772,14 +1799,6 @@ static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void coerce_reg_to_32(struct bpf_reg_state *reg)
|
||||
{
|
||||
/* clear high 32 bits */
|
||||
reg->var_off = tnum_cast(reg->var_off, 4);
|
||||
/* Update bounds */
|
||||
__update_reg_bounds(reg);
|
||||
}
|
||||
|
||||
static bool signed_add_overflows(s64 a, s64 b)
|
||||
{
|
||||
/* Do the add in u64, where overflow is well-defined */
|
||||
|
@ -1800,6 +1819,41 @@ static bool signed_sub_overflows(s64 a, s64 b)
|
|||
return res > a;
|
||||
}
|
||||
|
||||
static bool check_reg_sane_offset(struct bpf_verifier_env *env,
|
||||
const struct bpf_reg_state *reg,
|
||||
enum bpf_reg_type type)
|
||||
{
|
||||
bool known = tnum_is_const(reg->var_off);
|
||||
s64 val = reg->var_off.value;
|
||||
s64 smin = reg->smin_value;
|
||||
|
||||
if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) {
|
||||
verbose(env, "math between %s pointer and %lld is not allowed\n",
|
||||
reg_type_str[type], val);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) {
|
||||
verbose(env, "%s pointer offset %d is not allowed\n",
|
||||
reg_type_str[type], reg->off);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (smin == S64_MIN) {
|
||||
verbose(env, "math between %s pointer and register with unbounded min value is not allowed\n",
|
||||
reg_type_str[type]);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) {
|
||||
verbose(env, "value %lld makes %s pointer be out of bounds\n",
|
||||
smin, reg_type_str[type]);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
|
||||
* Caller should also handle BPF_MOV case separately.
|
||||
* If we return -EACCES, caller may want to try again treating pointer as a
|
||||
|
@ -1836,29 +1890,25 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
|
|||
|
||||
if (BPF_CLASS(insn->code) != BPF_ALU64) {
|
||||
/* 32-bit ALU ops on pointers produce (meaningless) scalars */
|
||||
if (!env->allow_ptr_leaks)
|
||||
verbose(env,
|
||||
"R%d 32-bit pointer arithmetic prohibited\n",
|
||||
dst);
|
||||
verbose(env,
|
||||
"R%d 32-bit pointer arithmetic prohibited\n",
|
||||
dst);
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
if (ptr_reg->type == PTR_TO_MAP_VALUE_OR_NULL) {
|
||||
if (!env->allow_ptr_leaks)
|
||||
verbose(env, "R%d pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL prohibited, null-check it first\n",
|
||||
dst);
|
||||
verbose(env, "R%d pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL prohibited, null-check it first\n",
|
||||
dst);
|
||||
return -EACCES;
|
||||
}
|
||||
if (ptr_reg->type == CONST_PTR_TO_MAP) {
|
||||
if (!env->allow_ptr_leaks)
|
||||
verbose(env, "R%d pointer arithmetic on CONST_PTR_TO_MAP prohibited\n",
|
||||
dst);
|
||||
verbose(env, "R%d pointer arithmetic on CONST_PTR_TO_MAP prohibited\n",
|
||||
dst);
|
||||
return -EACCES;
|
||||
}
|
||||
if (ptr_reg->type == PTR_TO_PACKET_END) {
|
||||
if (!env->allow_ptr_leaks)
|
||||
verbose(env, "R%d pointer arithmetic on PTR_TO_PACKET_END prohibited\n",
|
||||
dst);
|
||||
verbose(env, "R%d pointer arithmetic on PTR_TO_PACKET_END prohibited\n",
|
||||
dst);
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
|
@ -1868,6 +1918,10 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
|
|||
dst_reg->type = ptr_reg->type;
|
||||
dst_reg->id = ptr_reg->id;
|
||||
|
||||
if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) ||
|
||||
!check_reg_sane_offset(env, ptr_reg, ptr_reg->type))
|
||||
return -EINVAL;
|
||||
|
||||
switch (opcode) {
|
||||
case BPF_ADD:
|
||||
/* We can take a fixed offset as long as it doesn't overflow
|
||||
|
@ -1921,9 +1975,8 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
|
|||
case BPF_SUB:
|
||||
if (dst_reg == off_reg) {
|
||||
/* scalar -= pointer. Creates an unknown scalar */
|
||||
if (!env->allow_ptr_leaks)
|
||||
verbose(env, "R%d tried to subtract pointer from scalar\n",
|
||||
dst);
|
||||
verbose(env, "R%d tried to subtract pointer from scalar\n",
|
||||
dst);
|
||||
return -EACCES;
|
||||
}
|
||||
/* We don't allow subtraction from FP, because (according to
|
||||
|
@ -1931,9 +1984,8 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
|
|||
* be able to deal with it.
|
||||
*/
|
||||
if (ptr_reg->type == PTR_TO_STACK) {
|
||||
if (!env->allow_ptr_leaks)
|
||||
verbose(env, "R%d subtraction from stack pointer prohibited\n",
|
||||
dst);
|
||||
verbose(env, "R%d subtraction from stack pointer prohibited\n",
|
||||
dst);
|
||||
return -EACCES;
|
||||
}
|
||||
if (known && (ptr_reg->off - smin_val ==
|
||||
|
@ -1982,28 +2034,30 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
|
|||
case BPF_AND:
|
||||
case BPF_OR:
|
||||
case BPF_XOR:
|
||||
/* bitwise ops on pointers are troublesome, prohibit for now.
|
||||
* (However, in principle we could allow some cases, e.g.
|
||||
* ptr &= ~3 which would reduce min_value by 3.)
|
||||
*/
|
||||
if (!env->allow_ptr_leaks)
|
||||
verbose(env, "R%d bitwise operator %s on pointer prohibited\n",
|
||||
dst, bpf_alu_string[opcode >> 4]);
|
||||
/* bitwise ops on pointers are troublesome, prohibit. */
|
||||
verbose(env, "R%d bitwise operator %s on pointer prohibited\n",
|
||||
dst, bpf_alu_string[opcode >> 4]);
|
||||
return -EACCES;
|
||||
default:
|
||||
/* other operators (e.g. MUL,LSH) produce non-pointer results */
|
||||
if (!env->allow_ptr_leaks)
|
||||
verbose(env, "R%d pointer arithmetic with %s operator prohibited\n",
|
||||
dst, bpf_alu_string[opcode >> 4]);
|
||||
verbose(env, "R%d pointer arithmetic with %s operator prohibited\n",
|
||||
dst, bpf_alu_string[opcode >> 4]);
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type))
|
||||
return -EINVAL;
|
||||
|
||||
__update_reg_bounds(dst_reg);
|
||||
__reg_deduce_bounds(dst_reg);
|
||||
__reg_bound_offset(dst_reg);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* WARNING: This function does calculations on 64-bit values, but the actual
|
||||
* execution may occur on 32-bit values. Therefore, things like bitshifts
|
||||
* need extra checks in the 32-bit case.
|
||||
*/
|
||||
static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
|
||||
struct bpf_insn *insn,
|
||||
struct bpf_reg_state *dst_reg,
|
||||
|
@ -2014,12 +2068,8 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
|
|||
bool src_known, dst_known;
|
||||
s64 smin_val, smax_val;
|
||||
u64 umin_val, umax_val;
|
||||
u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
|
||||
|
||||
if (BPF_CLASS(insn->code) != BPF_ALU64) {
|
||||
/* 32-bit ALU ops are (32,32)->64 */
|
||||
coerce_reg_to_32(dst_reg);
|
||||
coerce_reg_to_32(&src_reg);
|
||||
}
|
||||
smin_val = src_reg.smin_value;
|
||||
smax_val = src_reg.smax_value;
|
||||
umin_val = src_reg.umin_value;
|
||||
|
@ -2027,6 +2077,12 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
|
|||
src_known = tnum_is_const(src_reg.var_off);
|
||||
dst_known = tnum_is_const(dst_reg->var_off);
|
||||
|
||||
if (!src_known &&
|
||||
opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) {
|
||||
__mark_reg_unknown(dst_reg);
|
||||
return 0;
|
||||
}
|
||||
|
||||
switch (opcode) {
|
||||
case BPF_ADD:
|
||||
if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
|
||||
|
@ -2155,9 +2211,9 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
|
|||
__update_reg_bounds(dst_reg);
|
||||
break;
|
||||
case BPF_LSH:
|
||||
if (umax_val > 63) {
|
||||
/* Shifts greater than 63 are undefined. This includes
|
||||
* shifts by a negative number.
|
||||
if (umax_val >= insn_bitness) {
|
||||
/* Shifts greater than 31 or 63 are undefined.
|
||||
* This includes shifts by a negative number.
|
||||
*/
|
||||
mark_reg_unknown(env, regs, insn->dst_reg);
|
||||
break;
|
||||
|
@ -2183,27 +2239,29 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
|
|||
__update_reg_bounds(dst_reg);
|
||||
break;
|
||||
case BPF_RSH:
|
||||
if (umax_val > 63) {
|
||||
/* Shifts greater than 63 are undefined. This includes
|
||||
* shifts by a negative number.
|
||||
if (umax_val >= insn_bitness) {
|
||||
/* Shifts greater than 31 or 63 are undefined.
|
||||
* This includes shifts by a negative number.
|
||||
*/
|
||||
mark_reg_unknown(env, regs, insn->dst_reg);
|
||||
break;
|
||||
}
|
||||
/* BPF_RSH is an unsigned shift, so make the appropriate casts */
|
||||
if (dst_reg->smin_value < 0) {
|
||||
if (umin_val) {
|
||||
/* Sign bit will be cleared */
|
||||
dst_reg->smin_value = 0;
|
||||
} else {
|
||||
/* Lost sign bit information */
|
||||
dst_reg->smin_value = S64_MIN;
|
||||
dst_reg->smax_value = S64_MAX;
|
||||
}
|
||||
} else {
|
||||
dst_reg->smin_value =
|
||||
(u64)(dst_reg->smin_value) >> umax_val;
|
||||
}
|
||||
/* BPF_RSH is an unsigned shift. If the value in dst_reg might
|
||||
* be negative, then either:
|
||||
* 1) src_reg might be zero, so the sign bit of the result is
|
||||
* unknown, so we lose our signed bounds
|
||||
* 2) it's known negative, thus the unsigned bounds capture the
|
||||
* signed bounds
|
||||
* 3) the signed bounds cross zero, so they tell us nothing
|
||||
* about the result
|
||||
* If the value in dst_reg is known nonnegative, then again the
|
||||
* unsigned bounts capture the signed bounds.
|
||||
* Thus, in all cases it suffices to blow away our signed bounds
|
||||
* and rely on inferring new ones from the unsigned bounds and
|
||||
* var_off of the result.
|
||||
*/
|
||||
dst_reg->smin_value = S64_MIN;
|
||||
dst_reg->smax_value = S64_MAX;
|
||||
if (src_known)
|
||||
dst_reg->var_off = tnum_rshift(dst_reg->var_off,
|
||||
umin_val);
|
||||
|
@ -2219,6 +2277,12 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
|
|||
break;
|
||||
}
|
||||
|
||||
if (BPF_CLASS(insn->code) != BPF_ALU64) {
|
||||
/* 32-bit ALU ops are (32,32)->32 */
|
||||
coerce_reg_to_size(dst_reg, 4);
|
||||
coerce_reg_to_size(&src_reg, 4);
|
||||
}
|
||||
|
||||
__reg_deduce_bounds(dst_reg);
|
||||
__reg_bound_offset(dst_reg);
|
||||
return 0;
|
||||
|
@ -2233,7 +2297,6 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
|
|||
struct bpf_reg_state *regs = cur_regs(env), *dst_reg, *src_reg;
|
||||
struct bpf_reg_state *ptr_reg = NULL, off_reg = {0};
|
||||
u8 opcode = BPF_OP(insn->code);
|
||||
int rc;
|
||||
|
||||
dst_reg = ®s[insn->dst_reg];
|
||||
src_reg = NULL;
|
||||
|
@ -2244,43 +2307,29 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
|
|||
if (src_reg->type != SCALAR_VALUE) {
|
||||
if (dst_reg->type != SCALAR_VALUE) {
|
||||
/* Combining two pointers by any ALU op yields
|
||||
* an arbitrary scalar.
|
||||
* an arbitrary scalar. Disallow all math except
|
||||
* pointer subtraction
|
||||
*/
|
||||
if (!env->allow_ptr_leaks) {
|
||||
verbose(env, "R%d pointer %s pointer prohibited\n",
|
||||
insn->dst_reg,
|
||||
bpf_alu_string[opcode >> 4]);
|
||||
return -EACCES;
|
||||
if (opcode == BPF_SUB){
|
||||
mark_reg_unknown(env, regs, insn->dst_reg);
|
||||
return 0;
|
||||
}
|
||||
mark_reg_unknown(env, regs, insn->dst_reg);
|
||||
return 0;
|
||||
verbose(env, "R%d pointer %s pointer prohibited\n",
|
||||
insn->dst_reg,
|
||||
bpf_alu_string[opcode >> 4]);
|
||||
return -EACCES;
|
||||
} else {
|
||||
/* scalar += pointer
|
||||
* This is legal, but we have to reverse our
|
||||
* src/dest handling in computing the range
|
||||
*/
|
||||
rc = adjust_ptr_min_max_vals(env, insn,
|
||||
src_reg, dst_reg);
|
||||
if (rc == -EACCES && env->allow_ptr_leaks) {
|
||||
/* scalar += unknown scalar */
|
||||
__mark_reg_unknown(&off_reg);
|
||||
return adjust_scalar_min_max_vals(
|
||||
env, insn,
|
||||
dst_reg, off_reg);
|
||||
}
|
||||
return rc;
|
||||
return adjust_ptr_min_max_vals(env, insn,
|
||||
src_reg, dst_reg);
|
||||
}
|
||||
} else if (ptr_reg) {
|
||||
/* pointer += scalar */
|
||||
rc = adjust_ptr_min_max_vals(env, insn,
|
||||
dst_reg, src_reg);
|
||||
if (rc == -EACCES && env->allow_ptr_leaks) {
|
||||
/* unknown scalar += scalar */
|
||||
__mark_reg_unknown(dst_reg);
|
||||
return adjust_scalar_min_max_vals(
|
||||
env, insn, dst_reg, *src_reg);
|
||||
}
|
||||
return rc;
|
||||
return adjust_ptr_min_max_vals(env, insn,
|
||||
dst_reg, src_reg);
|
||||
}
|
||||
} else {
|
||||
/* Pretend the src is a reg with a known value, since we only
|
||||
|
@ -2289,17 +2338,9 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
|
|||
off_reg.type = SCALAR_VALUE;
|
||||
__mark_reg_known(&off_reg, insn->imm);
|
||||
src_reg = &off_reg;
|
||||
if (ptr_reg) { /* pointer += K */
|
||||
rc = adjust_ptr_min_max_vals(env, insn,
|
||||
ptr_reg, src_reg);
|
||||
if (rc == -EACCES && env->allow_ptr_leaks) {
|
||||
/* unknown scalar += K */
|
||||
__mark_reg_unknown(dst_reg);
|
||||
return adjust_scalar_min_max_vals(
|
||||
env, insn, dst_reg, off_reg);
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
if (ptr_reg) /* pointer += K */
|
||||
return adjust_ptr_min_max_vals(env, insn,
|
||||
ptr_reg, src_reg);
|
||||
}
|
||||
|
||||
/* Got here implies adding two SCALAR_VALUEs */
|
||||
|
@ -2396,17 +2437,20 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
|
|||
return -EACCES;
|
||||
}
|
||||
mark_reg_unknown(env, regs, insn->dst_reg);
|
||||
/* high 32 bits are known zero. */
|
||||
regs[insn->dst_reg].var_off = tnum_cast(
|
||||
regs[insn->dst_reg].var_off, 4);
|
||||
__update_reg_bounds(®s[insn->dst_reg]);
|
||||
coerce_reg_to_size(®s[insn->dst_reg], 4);
|
||||
}
|
||||
} else {
|
||||
/* case: R = imm
|
||||
* remember the value we stored into this reg
|
||||
*/
|
||||
regs[insn->dst_reg].type = SCALAR_VALUE;
|
||||
__mark_reg_known(regs + insn->dst_reg, insn->imm);
|
||||
if (BPF_CLASS(insn->code) == BPF_ALU64) {
|
||||
__mark_reg_known(regs + insn->dst_reg,
|
||||
insn->imm);
|
||||
} else {
|
||||
__mark_reg_known(regs + insn->dst_reg,
|
||||
(u32)insn->imm);
|
||||
}
|
||||
}
|
||||
|
||||
} else if (opcode > BPF_END) {
|
||||
|
@ -3437,15 +3481,14 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
|
|||
return range_within(rold, rcur) &&
|
||||
tnum_in(rold->var_off, rcur->var_off);
|
||||
} else {
|
||||
/* if we knew anything about the old value, we're not
|
||||
* equal, because we can't know anything about the
|
||||
* scalar value of the pointer in the new value.
|
||||
/* We're trying to use a pointer in place of a scalar.
|
||||
* Even if the scalar was unbounded, this could lead to
|
||||
* pointer leaks because scalars are allowed to leak
|
||||
* while pointers are not. We could make this safe in
|
||||
* special cases if root is calling us, but it's
|
||||
* probably not worth the hassle.
|
||||
*/
|
||||
return rold->umin_value == 0 &&
|
||||
rold->umax_value == U64_MAX &&
|
||||
rold->smin_value == S64_MIN &&
|
||||
rold->smax_value == S64_MAX &&
|
||||
tnum_is_unknown(rold->var_off);
|
||||
return false;
|
||||
}
|
||||
case PTR_TO_MAP_VALUE:
|
||||
/* If the new min/max/var_off satisfy the old ones and
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
#ifndef _UAPI__ASM_BPF_PERF_EVENT_H__
|
||||
#define _UAPI__ASM_BPF_PERF_EVENT_H__
|
||||
|
||||
#include <asm/ptrace.h>
|
||||
#include "ptrace.h"
|
||||
|
||||
typedef user_pt_regs bpf_user_pt_regs_t;
|
||||
|
||||
|
|
|
@ -11,7 +11,7 @@ ifneq ($(wildcard $(GENHDR)),)
|
|||
endif
|
||||
|
||||
CFLAGS += -Wall -O2 -I$(APIDIR) -I$(LIBDIR) -I$(GENDIR) $(GENFLAGS) -I../../../include
|
||||
LDLIBS += -lcap -lelf
|
||||
LDLIBS += -lcap -lelf -lrt
|
||||
|
||||
TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \
|
||||
test_align test_verifier_log test_dev_cgroup
|
||||
|
|
|
@ -351,7 +351,7 @@ static void test_bpf_obj_id(void)
|
|||
info_len != sizeof(struct bpf_map_info) ||
|
||||
strcmp((char *)map_infos[i].name, expected_map_name),
|
||||
"get-map-info(fd)",
|
||||
"err %d errno %d type %d(%d) info_len %u(%lu) key_size %u value_size %u max_entries %u map_flags %X name %s(%s)\n",
|
||||
"err %d errno %d type %d(%d) info_len %u(%Zu) key_size %u value_size %u max_entries %u map_flags %X name %s(%s)\n",
|
||||
err, errno,
|
||||
map_infos[i].type, BPF_MAP_TYPE_ARRAY,
|
||||
info_len, sizeof(struct bpf_map_info),
|
||||
|
@ -395,7 +395,7 @@ static void test_bpf_obj_id(void)
|
|||
*(int *)prog_infos[i].map_ids != map_infos[i].id ||
|
||||
strcmp((char *)prog_infos[i].name, expected_prog_name),
|
||||
"get-prog-info(fd)",
|
||||
"err %d errno %d i %d type %d(%d) info_len %u(%lu) jit_enabled %d jited_prog_len %u xlated_prog_len %u jited_prog %d xlated_prog %d load_time %lu(%lu) uid %u(%u) nr_map_ids %u(%u) map_id %u(%u) name %s(%s)\n",
|
||||
"err %d errno %d i %d type %d(%d) info_len %u(%Zu) jit_enabled %d jited_prog_len %u xlated_prog_len %u jited_prog %d xlated_prog %d load_time %lu(%lu) uid %u(%u) nr_map_ids %u(%u) map_id %u(%u) name %s(%s)\n",
|
||||
err, errno, i,
|
||||
prog_infos[i].type, BPF_PROG_TYPE_SOCKET_FILTER,
|
||||
info_len, sizeof(struct bpf_prog_info),
|
||||
|
@ -463,7 +463,7 @@ static void test_bpf_obj_id(void)
|
|||
memcmp(&prog_info, &prog_infos[i], info_len) ||
|
||||
*(int *)prog_info.map_ids != saved_map_id,
|
||||
"get-prog-info(next_id->fd)",
|
||||
"err %d errno %d info_len %u(%lu) memcmp %d map_id %u(%u)\n",
|
||||
"err %d errno %d info_len %u(%Zu) memcmp %d map_id %u(%u)\n",
|
||||
err, errno, info_len, sizeof(struct bpf_prog_info),
|
||||
memcmp(&prog_info, &prog_infos[i], info_len),
|
||||
*(int *)prog_info.map_ids, saved_map_id);
|
||||
|
@ -509,7 +509,7 @@ static void test_bpf_obj_id(void)
|
|||
memcmp(&map_info, &map_infos[i], info_len) ||
|
||||
array_value != array_magic_value,
|
||||
"check get-map-info(next_id->fd)",
|
||||
"err %d errno %d info_len %u(%lu) memcmp %d array_value %llu(%llu)\n",
|
||||
"err %d errno %d info_len %u(%Zu) memcmp %d array_value %llu(%llu)\n",
|
||||
err, errno, info_len, sizeof(struct bpf_map_info),
|
||||
memcmp(&map_info, &map_infos[i], info_len),
|
||||
array_value, array_magic_value);
|
||||
|
|
|
@ -422,9 +422,7 @@ static struct bpf_test tests[] = {
|
|||
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr_unpriv = "R1 subtraction from stack pointer",
|
||||
.result_unpriv = REJECT,
|
||||
.errstr = "R1 invalid mem access",
|
||||
.errstr = "R1 subtraction from stack pointer",
|
||||
.result = REJECT,
|
||||
},
|
||||
{
|
||||
|
@ -606,7 +604,6 @@ static struct bpf_test tests[] = {
|
|||
},
|
||||
.errstr = "misaligned stack access",
|
||||
.result = REJECT,
|
||||
.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
|
||||
},
|
||||
{
|
||||
"invalid map_fd for function call",
|
||||
|
@ -1797,7 +1794,6 @@ static struct bpf_test tests[] = {
|
|||
},
|
||||
.result = REJECT,
|
||||
.errstr = "misaligned stack access off (0x0; 0x0)+-8+2 size 8",
|
||||
.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
|
||||
},
|
||||
{
|
||||
"PTR_TO_STACK store/load - bad alignment on reg",
|
||||
|
@ -1810,7 +1806,6 @@ static struct bpf_test tests[] = {
|
|||
},
|
||||
.result = REJECT,
|
||||
.errstr = "misaligned stack access off (0x0; 0x0)+-10+8 size 8",
|
||||
.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
|
||||
},
|
||||
{
|
||||
"PTR_TO_STACK store/load - out of bounds low",
|
||||
|
@ -1862,9 +1857,8 @@ static struct bpf_test tests[] = {
|
|||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.result_unpriv = REJECT,
|
||||
.errstr_unpriv = "R1 pointer += pointer",
|
||||
.result = REJECT,
|
||||
.errstr = "R1 pointer += pointer",
|
||||
},
|
||||
{
|
||||
"unpriv: neg pointer",
|
||||
|
@ -2592,7 +2586,8 @@ static struct bpf_test tests[] = {
|
|||
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, data)),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, len)),
|
||||
BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 49),
|
||||
BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 49),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
|
||||
|
@ -2899,7 +2894,7 @@ static struct bpf_test tests[] = {
|
|||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "invalid access to packet",
|
||||
.errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
},
|
||||
|
@ -3885,9 +3880,7 @@ static struct bpf_test tests[] = {
|
|||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map2 = { 3, 11 },
|
||||
.errstr_unpriv = "R0 pointer += pointer",
|
||||
.errstr = "R0 invalid mem access 'inv'",
|
||||
.result_unpriv = REJECT,
|
||||
.errstr = "R0 pointer += pointer",
|
||||
.result = REJECT,
|
||||
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
||||
},
|
||||
|
@ -3928,7 +3921,7 @@ static struct bpf_test tests[] = {
|
|||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map1 = { 4 },
|
||||
.errstr = "R4 invalid mem access",
|
||||
.errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS
|
||||
},
|
||||
|
@ -3949,7 +3942,7 @@ static struct bpf_test tests[] = {
|
|||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map1 = { 4 },
|
||||
.errstr = "R4 invalid mem access",
|
||||
.errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS
|
||||
},
|
||||
|
@ -3970,7 +3963,7 @@ static struct bpf_test tests[] = {
|
|||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map1 = { 4 },
|
||||
.errstr = "R4 invalid mem access",
|
||||
.errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS
|
||||
},
|
||||
|
@ -5195,10 +5188,8 @@ static struct bpf_test tests[] = {
|
|||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map2 = { 3 },
|
||||
.errstr_unpriv = "R0 bitwise operator &= on pointer",
|
||||
.errstr = "invalid mem access 'inv'",
|
||||
.errstr = "R0 bitwise operator &= on pointer",
|
||||
.result = REJECT,
|
||||
.result_unpriv = REJECT,
|
||||
},
|
||||
{
|
||||
"map element value illegal alu op, 2",
|
||||
|
@ -5214,10 +5205,8 @@ static struct bpf_test tests[] = {
|
|||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map2 = { 3 },
|
||||
.errstr_unpriv = "R0 32-bit pointer arithmetic prohibited",
|
||||
.errstr = "invalid mem access 'inv'",
|
||||
.errstr = "R0 32-bit pointer arithmetic prohibited",
|
||||
.result = REJECT,
|
||||
.result_unpriv = REJECT,
|
||||
},
|
||||
{
|
||||
"map element value illegal alu op, 3",
|
||||
|
@ -5233,10 +5222,8 @@ static struct bpf_test tests[] = {
|
|||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map2 = { 3 },
|
||||
.errstr_unpriv = "R0 pointer arithmetic with /= operator",
|
||||
.errstr = "invalid mem access 'inv'",
|
||||
.errstr = "R0 pointer arithmetic with /= operator",
|
||||
.result = REJECT,
|
||||
.result_unpriv = REJECT,
|
||||
},
|
||||
{
|
||||
"map element value illegal alu op, 4",
|
||||
|
@ -6019,8 +6006,7 @@ static struct bpf_test tests[] = {
|
|||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_in_map = { 3 },
|
||||
.errstr = "R1 type=inv expected=map_ptr",
|
||||
.errstr_unpriv = "R1 pointer arithmetic on CONST_PTR_TO_MAP prohibited",
|
||||
.errstr = "R1 pointer arithmetic on CONST_PTR_TO_MAP prohibited",
|
||||
.result = REJECT,
|
||||
},
|
||||
{
|
||||
|
@ -6324,7 +6310,7 @@ static struct bpf_test tests[] = {
|
|||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map1 = { 3 },
|
||||
.errstr = "R0 min value is negative",
|
||||
.errstr = "unbounded min value",
|
||||
.result = REJECT,
|
||||
},
|
||||
{
|
||||
|
@ -6348,7 +6334,7 @@ static struct bpf_test tests[] = {
|
|||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map1 = { 3 },
|
||||
.errstr = "R0 min value is negative",
|
||||
.errstr = "unbounded min value",
|
||||
.result = REJECT,
|
||||
},
|
||||
{
|
||||
|
@ -6374,7 +6360,7 @@ static struct bpf_test tests[] = {
|
|||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map1 = { 3 },
|
||||
.errstr = "R8 invalid mem access 'inv'",
|
||||
.errstr = "unbounded min value",
|
||||
.result = REJECT,
|
||||
},
|
||||
{
|
||||
|
@ -6399,7 +6385,7 @@ static struct bpf_test tests[] = {
|
|||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map1 = { 3 },
|
||||
.errstr = "R8 invalid mem access 'inv'",
|
||||
.errstr = "unbounded min value",
|
||||
.result = REJECT,
|
||||
},
|
||||
{
|
||||
|
@ -6447,7 +6433,7 @@ static struct bpf_test tests[] = {
|
|||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map1 = { 3 },
|
||||
.errstr = "R0 min value is negative",
|
||||
.errstr = "unbounded min value",
|
||||
.result = REJECT,
|
||||
},
|
||||
{
|
||||
|
@ -6518,7 +6504,7 @@ static struct bpf_test tests[] = {
|
|||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map1 = { 3 },
|
||||
.errstr = "R0 min value is negative",
|
||||
.errstr = "unbounded min value",
|
||||
.result = REJECT,
|
||||
},
|
||||
{
|
||||
|
@ -6569,7 +6555,7 @@ static struct bpf_test tests[] = {
|
|||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map1 = { 3 },
|
||||
.errstr = "R0 min value is negative",
|
||||
.errstr = "unbounded min value",
|
||||
.result = REJECT,
|
||||
},
|
||||
{
|
||||
|
@ -6596,7 +6582,7 @@ static struct bpf_test tests[] = {
|
|||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map1 = { 3 },
|
||||
.errstr = "R0 min value is negative",
|
||||
.errstr = "unbounded min value",
|
||||
.result = REJECT,
|
||||
},
|
||||
{
|
||||
|
@ -6622,7 +6608,7 @@ static struct bpf_test tests[] = {
|
|||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map1 = { 3 },
|
||||
.errstr = "R0 min value is negative",
|
||||
.errstr = "unbounded min value",
|
||||
.result = REJECT,
|
||||
},
|
||||
{
|
||||
|
@ -6651,7 +6637,7 @@ static struct bpf_test tests[] = {
|
|||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map1 = { 3 },
|
||||
.errstr = "R0 min value is negative",
|
||||
.errstr = "unbounded min value",
|
||||
.result = REJECT,
|
||||
},
|
||||
{
|
||||
|
@ -6681,7 +6667,7 @@ static struct bpf_test tests[] = {
|
|||
BPF_JMP_IMM(BPF_JA, 0, 0, -7),
|
||||
},
|
||||
.fixup_map1 = { 4 },
|
||||
.errstr = "R0 min value is negative",
|
||||
.errstr = "unbounded min value",
|
||||
.result = REJECT,
|
||||
},
|
||||
{
|
||||
|
@ -6709,8 +6695,7 @@ static struct bpf_test tests[] = {
|
|||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map1 = { 3 },
|
||||
.errstr_unpriv = "R0 pointer comparison prohibited",
|
||||
.errstr = "R0 min value is negative",
|
||||
.errstr = "unbounded min value",
|
||||
.result = REJECT,
|
||||
.result_unpriv = REJECT,
|
||||
},
|
||||
|
@ -6765,6 +6750,462 @@ static struct bpf_test tests[] = {
|
|||
.errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
|
||||
.result = REJECT,
|
||||
},
|
||||
{
|
||||
"bounds check based on zero-extended MOV",
|
||||
.insns = {
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
|
||||
BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
|
||||
/* r2 = 0x0000'0000'ffff'ffff */
|
||||
BPF_MOV32_IMM(BPF_REG_2, 0xffffffff),
|
||||
/* r2 = 0 */
|
||||
BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
|
||||
/* no-op */
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
|
||||
/* access at offset 0 */
|
||||
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
|
||||
/* exit */
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map1 = { 3 },
|
||||
.result = ACCEPT
|
||||
},
|
||||
{
|
||||
"bounds check based on sign-extended MOV. test1",
|
||||
.insns = {
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
|
||||
BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
|
||||
/* r2 = 0xffff'ffff'ffff'ffff */
|
||||
BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
|
||||
/* r2 = 0xffff'ffff */
|
||||
BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
|
||||
/* r0 = <oob pointer> */
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
|
||||
/* access to OOB pointer */
|
||||
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
|
||||
/* exit */
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map1 = { 3 },
|
||||
.errstr = "map_value pointer and 4294967295",
|
||||
.result = REJECT
|
||||
},
|
||||
{
|
||||
"bounds check based on sign-extended MOV. test2",
|
||||
.insns = {
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
|
||||
BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
|
||||
/* r2 = 0xffff'ffff'ffff'ffff */
|
||||
BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
|
||||
/* r2 = 0xfff'ffff */
|
||||
BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 36),
|
||||
/* r0 = <oob pointer> */
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
|
||||
/* access to OOB pointer */
|
||||
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
|
||||
/* exit */
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map1 = { 3 },
|
||||
.errstr = "R0 min value is outside of the array range",
|
||||
.result = REJECT
|
||||
},
|
||||
{
|
||||
"bounds check based on reg_off + var_off + insn_off. test1",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, mark)),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
|
||||
BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
|
||||
BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 29) - 1),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
|
||||
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map1 = { 4 },
|
||||
.errstr = "value_size=8 off=1073741825",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
},
|
||||
{
|
||||
"bounds check based on reg_off + var_off + insn_off. test2",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, mark)),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
|
||||
BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
|
||||
BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 30) - 1),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
|
||||
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map1 = { 4 },
|
||||
.errstr = "value 1073741823",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
},
|
||||
{
|
||||
"bounds check after truncation of non-boundary-crossing range",
|
||||
.insns = {
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
|
||||
BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
|
||||
/* r1 = [0x00, 0xff] */
|
||||
BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 1),
|
||||
/* r2 = 0x10'0000'0000 */
|
||||
BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 36),
|
||||
/* r1 = [0x10'0000'0000, 0x10'0000'00ff] */
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
|
||||
/* r1 = [0x10'7fff'ffff, 0x10'8000'00fe] */
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
|
||||
/* r1 = [0x00, 0xff] */
|
||||
BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 0x7fffffff),
|
||||
/* r1 = 0 */
|
||||
BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
|
||||
/* no-op */
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
|
||||
/* access at offset 0 */
|
||||
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
|
||||
/* exit */
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map1 = { 3 },
|
||||
.result = ACCEPT
|
||||
},
|
||||
{
|
||||
"bounds check after truncation of boundary-crossing range (1)",
|
||||
.insns = {
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
|
||||
BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
|
||||
/* r1 = [0x00, 0xff] */
|
||||
BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
|
||||
/* r1 = [0xffff'ff80, 0x1'0000'007f] */
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
|
||||
/* r1 = [0xffff'ff80, 0xffff'ffff] or
|
||||
* [0x0000'0000, 0x0000'007f]
|
||||
*/
|
||||
BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 0),
|
||||
BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
|
||||
/* r1 = [0x00, 0xff] or
|
||||
* [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
|
||||
*/
|
||||
BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
|
||||
/* r1 = 0 or
|
||||
* [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
|
||||
*/
|
||||
BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
|
||||
/* no-op or OOB pointer computation */
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
|
||||
/* potentially OOB access */
|
||||
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
|
||||
/* exit */
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map1 = { 3 },
|
||||
/* not actually fully unbounded, but the bound is very high */
|
||||
.errstr = "R0 unbounded memory access",
|
||||
.result = REJECT
|
||||
},
|
||||
{
|
||||
"bounds check after truncation of boundary-crossing range (2)",
|
||||
.insns = {
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
|
||||
BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
|
||||
/* r1 = [0x00, 0xff] */
|
||||
BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
|
||||
/* r1 = [0xffff'ff80, 0x1'0000'007f] */
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
|
||||
/* r1 = [0xffff'ff80, 0xffff'ffff] or
|
||||
* [0x0000'0000, 0x0000'007f]
|
||||
* difference to previous test: truncation via MOV32
|
||||
* instead of ALU32.
|
||||
*/
|
||||
BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),
|
||||
BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
|
||||
/* r1 = [0x00, 0xff] or
|
||||
* [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
|
||||
*/
|
||||
BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
|
||||
/* r1 = 0 or
|
||||
* [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
|
||||
*/
|
||||
BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
|
||||
/* no-op or OOB pointer computation */
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
|
||||
/* potentially OOB access */
|
||||
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
|
||||
/* exit */
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map1 = { 3 },
|
||||
/* not actually fully unbounded, but the bound is very high */
|
||||
.errstr = "R0 unbounded memory access",
|
||||
.result = REJECT
|
||||
},
|
||||
{
|
||||
"bounds check after wrapping 32-bit addition",
|
||||
.insns = {
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
|
||||
BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
|
||||
/* r1 = 0x7fff'ffff */
|
||||
BPF_MOV64_IMM(BPF_REG_1, 0x7fffffff),
|
||||
/* r1 = 0xffff'fffe */
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
|
||||
/* r1 = 0 */
|
||||
BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 2),
|
||||
/* no-op */
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
|
||||
/* access at offset 0 */
|
||||
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
|
||||
/* exit */
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map1 = { 3 },
|
||||
.result = ACCEPT
|
||||
},
|
||||
{
|
||||
"bounds check after shift with oversized count operand",
|
||||
.insns = {
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
|
||||
BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 32),
|
||||
BPF_MOV64_IMM(BPF_REG_1, 1),
|
||||
/* r1 = (u32)1 << (u32)32 = ? */
|
||||
BPF_ALU32_REG(BPF_LSH, BPF_REG_1, BPF_REG_2),
|
||||
/* r1 = [0x0000, 0xffff] */
|
||||
BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xffff),
|
||||
/* computes unknown pointer, potentially OOB */
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
|
||||
/* potentially OOB access */
|
||||
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
|
||||
/* exit */
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map1 = { 3 },
|
||||
.errstr = "R0 max value is outside of the array range",
|
||||
.result = REJECT
|
||||
},
|
||||
{
|
||||
"bounds check after right shift of maybe-negative number",
|
||||
.insns = {
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
|
||||
BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
|
||||
/* r1 = [0x00, 0xff] */
|
||||
BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
|
||||
/* r1 = [-0x01, 0xfe] */
|
||||
BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1),
|
||||
/* r1 = 0 or 0xff'ffff'ffff'ffff */
|
||||
BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
|
||||
/* r1 = 0 or 0xffff'ffff'ffff */
|
||||
BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
|
||||
/* computes unknown pointer, potentially OOB */
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
|
||||
/* potentially OOB access */
|
||||
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
|
||||
/* exit */
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map1 = { 3 },
|
||||
.errstr = "R0 unbounded memory access",
|
||||
.result = REJECT
|
||||
},
|
||||
{
|
||||
"bounds check map access with off+size signed 32bit overflow. test1",
|
||||
.insns = {
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
|
||||
BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7ffffffe),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
|
||||
BPF_JMP_A(0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map1 = { 3 },
|
||||
.errstr = "map_value pointer and 2147483646",
|
||||
.result = REJECT
|
||||
},
|
||||
{
|
||||
"bounds check map access with off+size signed 32bit overflow. test2",
|
||||
.insns = {
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
|
||||
BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
|
||||
BPF_JMP_A(0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map1 = { 3 },
|
||||
.errstr = "pointer offset 1073741822",
|
||||
.result = REJECT
|
||||
},
|
||||
{
|
||||
"bounds check map access with off+size signed 32bit overflow. test3",
|
||||
.insns = {
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
|
||||
BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
|
||||
BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
|
||||
BPF_JMP_A(0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map1 = { 3 },
|
||||
.errstr = "pointer offset -1073741822",
|
||||
.result = REJECT
|
||||
},
|
||||
{
|
||||
"bounds check map access with off+size signed 32bit overflow. test4",
|
||||
.insns = {
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
|
||||
BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_MOV64_IMM(BPF_REG_1, 1000000),
|
||||
BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 1000000),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
|
||||
BPF_JMP_A(0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map1 = { 3 },
|
||||
.errstr = "map_value pointer and 1000000000000",
|
||||
.result = REJECT
|
||||
},
|
||||
{
|
||||
"pointer/scalar confusion in state equality check (way 1)",
|
||||
.insns = {
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
|
||||
BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
|
||||
BPF_JMP_A(1),
|
||||
BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
|
||||
BPF_JMP_A(0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map1 = { 3 },
|
||||
.result = ACCEPT,
|
||||
.result_unpriv = REJECT,
|
||||
.errstr_unpriv = "R0 leaks addr as return value"
|
||||
},
|
||||
{
|
||||
"pointer/scalar confusion in state equality check (way 2)",
|
||||
.insns = {
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
|
||||
BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
|
||||
BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
|
||||
BPF_JMP_A(1),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map1 = { 3 },
|
||||
.result = ACCEPT,
|
||||
.result_unpriv = REJECT,
|
||||
.errstr_unpriv = "R0 leaks addr as return value"
|
||||
},
|
||||
{
|
||||
"variable-offset ctx access",
|
||||
.insns = {
|
||||
|
@ -6806,6 +7247,71 @@ static struct bpf_test tests[] = {
|
|||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_LWT_IN,
|
||||
},
|
||||
{
|
||||
"indirect variable-offset stack access",
|
||||
.insns = {
|
||||
/* Fill the top 8 bytes of the stack */
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
/* Get an unknown value */
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
|
||||
/* Make it small and 4-byte aligned */
|
||||
BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
|
||||
BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
|
||||
/* add it to fp. We now have either fp-4 or fp-8, but
|
||||
* we don't know which
|
||||
*/
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
|
||||
/* dereference it indirectly */
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
|
||||
BPF_FUNC_map_lookup_elem),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map1 = { 5 },
|
||||
.errstr = "variable stack read R2",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_LWT_IN,
|
||||
},
|
||||
{
|
||||
"direct stack access with 32-bit wraparound. test1",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
|
||||
BPF_MOV32_IMM(BPF_REG_0, 0),
|
||||
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN()
|
||||
},
|
||||
.errstr = "fp pointer and 2147483647",
|
||||
.result = REJECT
|
||||
},
|
||||
{
|
||||
"direct stack access with 32-bit wraparound. test2",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
|
||||
BPF_MOV32_IMM(BPF_REG_0, 0),
|
||||
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN()
|
||||
},
|
||||
.errstr = "fp pointer and 1073741823",
|
||||
.result = REJECT
|
||||
},
|
||||
{
|
||||
"direct stack access with 32-bit wraparound. test3",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
|
||||
BPF_MOV32_IMM(BPF_REG_0, 0),
|
||||
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN()
|
||||
},
|
||||
.errstr = "fp pointer offset 1073741822",
|
||||
.result = REJECT
|
||||
},
|
||||
{
|
||||
"liveness pruning and write screening",
|
||||
.insns = {
|
||||
|
@ -7127,6 +7633,19 @@ static struct bpf_test tests[] = {
|
|||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
},
|
||||
{
|
||||
"pkt_end - pkt_start is allowed",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, data_end)),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, data)),
|
||||
BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
},
|
||||
{
|
||||
"XDP pkt read, pkt_end mangling, bad access 1",
|
||||
.insns = {
|
||||
|
@ -7142,7 +7661,7 @@ static struct bpf_test tests[] = {
|
|||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "R1 offset is outside of the packet",
|
||||
.errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_XDP,
|
||||
},
|
||||
|
@ -7161,7 +7680,7 @@ static struct bpf_test tests[] = {
|
|||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "R1 offset is outside of the packet",
|
||||
.errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_XDP,
|
||||
},
|
||||
|
|
Loading…
Reference in New Issue