Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
Alexei Starovoitov says: ==================== pull-request: bpf 2019-06-15 The following pull-request contains BPF updates for your *net* tree. The main changes are: 1) fix stack layout of JITed x64 bpf code, from Alexei. 2) fix out of bounds memory access in bpf_sk_storage, from Arthur. 3) fix lpm trie walk, from Jonathan. 4) fix nested bpf_perf_event_output, from Matt. 5) and several other fixes. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
1eb4169c1e
|
@ -338,6 +338,7 @@
|
|||
#define PPC_INST_MADDLD 0x10000033
|
||||
#define PPC_INST_DIVWU 0x7c000396
|
||||
#define PPC_INST_DIVD 0x7c0003d2
|
||||
#define PPC_INST_DIVDU 0x7c000392
|
||||
#define PPC_INST_RLWINM 0x54000000
|
||||
#define PPC_INST_RLWINM_DOT 0x54000001
|
||||
#define PPC_INST_RLWIMI 0x50000000
|
||||
|
|
|
@ -116,7 +116,7 @@
|
|||
___PPC_RA(a) | IMM_L(i))
|
||||
#define PPC_DIVWU(d, a, b) EMIT(PPC_INST_DIVWU | ___PPC_RT(d) | \
|
||||
___PPC_RA(a) | ___PPC_RB(b))
|
||||
#define PPC_DIVD(d, a, b) EMIT(PPC_INST_DIVD | ___PPC_RT(d) | \
|
||||
#define PPC_DIVDU(d, a, b) EMIT(PPC_INST_DIVDU | ___PPC_RT(d) | \
|
||||
___PPC_RA(a) | ___PPC_RB(b))
|
||||
#define PPC_AND(d, a, b) EMIT(PPC_INST_AND | ___PPC_RA(d) | \
|
||||
___PPC_RS(a) | ___PPC_RB(b))
|
||||
|
|
|
@ -399,12 +399,12 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
|
|||
case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */
|
||||
case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */
|
||||
if (BPF_OP(code) == BPF_MOD) {
|
||||
PPC_DIVD(b2p[TMP_REG_1], dst_reg, src_reg);
|
||||
PPC_DIVDU(b2p[TMP_REG_1], dst_reg, src_reg);
|
||||
PPC_MULD(b2p[TMP_REG_1], src_reg,
|
||||
b2p[TMP_REG_1]);
|
||||
PPC_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]);
|
||||
} else
|
||||
PPC_DIVD(dst_reg, dst_reg, src_reg);
|
||||
PPC_DIVDU(dst_reg, dst_reg, src_reg);
|
||||
break;
|
||||
case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */
|
||||
case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */
|
||||
|
@ -432,7 +432,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
|
|||
break;
|
||||
case BPF_ALU64:
|
||||
if (BPF_OP(code) == BPF_MOD) {
|
||||
PPC_DIVD(b2p[TMP_REG_2], dst_reg,
|
||||
PPC_DIVDU(b2p[TMP_REG_2], dst_reg,
|
||||
b2p[TMP_REG_1]);
|
||||
PPC_MULD(b2p[TMP_REG_1],
|
||||
b2p[TMP_REG_1],
|
||||
|
@ -440,7 +440,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
|
|||
PPC_SUB(dst_reg, dst_reg,
|
||||
b2p[TMP_REG_1]);
|
||||
} else
|
||||
PPC_DIVD(dst_reg, dst_reg,
|
||||
PPC_DIVDU(dst_reg, dst_reg,
|
||||
b2p[TMP_REG_1]);
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -190,9 +190,7 @@ struct jit_context {
|
|||
#define BPF_MAX_INSN_SIZE 128
|
||||
#define BPF_INSN_SAFETY 64
|
||||
|
||||
#define AUX_STACK_SPACE 40 /* Space for RBX, R13, R14, R15, tailcnt */
|
||||
|
||||
#define PROLOGUE_SIZE 37
|
||||
#define PROLOGUE_SIZE 20
|
||||
|
||||
/*
|
||||
* Emit x86-64 prologue code for BPF program and check its size.
|
||||
|
@ -203,44 +201,19 @@ static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf)
|
|||
u8 *prog = *pprog;
|
||||
int cnt = 0;
|
||||
|
||||
/* push rbp */
|
||||
EMIT1(0x55);
|
||||
|
||||
/* mov rbp,rsp */
|
||||
EMIT3(0x48, 0x89, 0xE5);
|
||||
|
||||
/* sub rsp, rounded_stack_depth + AUX_STACK_SPACE */
|
||||
EMIT3_off32(0x48, 0x81, 0xEC,
|
||||
round_up(stack_depth, 8) + AUX_STACK_SPACE);
|
||||
|
||||
/* sub rbp, AUX_STACK_SPACE */
|
||||
EMIT4(0x48, 0x83, 0xED, AUX_STACK_SPACE);
|
||||
|
||||
/* mov qword ptr [rbp+0],rbx */
|
||||
EMIT4(0x48, 0x89, 0x5D, 0);
|
||||
/* mov qword ptr [rbp+8],r13 */
|
||||
EMIT4(0x4C, 0x89, 0x6D, 8);
|
||||
/* mov qword ptr [rbp+16],r14 */
|
||||
EMIT4(0x4C, 0x89, 0x75, 16);
|
||||
/* mov qword ptr [rbp+24],r15 */
|
||||
EMIT4(0x4C, 0x89, 0x7D, 24);
|
||||
|
||||
EMIT1(0x55); /* push rbp */
|
||||
EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
|
||||
/* sub rsp, rounded_stack_depth */
|
||||
EMIT3_off32(0x48, 0x81, 0xEC, round_up(stack_depth, 8));
|
||||
EMIT1(0x53); /* push rbx */
|
||||
EMIT2(0x41, 0x55); /* push r13 */
|
||||
EMIT2(0x41, 0x56); /* push r14 */
|
||||
EMIT2(0x41, 0x57); /* push r15 */
|
||||
if (!ebpf_from_cbpf) {
|
||||
/*
|
||||
* Clear the tail call counter (tail_call_cnt): for eBPF tail
|
||||
* calls we need to reset the counter to 0. It's done in two
|
||||
* instructions, resetting RAX register to 0, and moving it
|
||||
* to the counter location.
|
||||
*/
|
||||
|
||||
/* xor eax, eax */
|
||||
EMIT2(0x31, 0xc0);
|
||||
/* mov qword ptr [rbp+32], rax */
|
||||
EMIT4(0x48, 0x89, 0x45, 32);
|
||||
|
||||
/* zero init tail_call_cnt */
|
||||
EMIT2(0x6a, 0x00);
|
||||
BUILD_BUG_ON(cnt != PROLOGUE_SIZE);
|
||||
}
|
||||
|
||||
*pprog = prog;
|
||||
}
|
||||
|
||||
|
@ -285,13 +258,13 @@ static void emit_bpf_tail_call(u8 **pprog)
|
|||
* if (tail_call_cnt > MAX_TAIL_CALL_CNT)
|
||||
* goto out;
|
||||
*/
|
||||
EMIT2_off32(0x8B, 0x85, 36); /* mov eax, dword ptr [rbp + 36] */
|
||||
EMIT2_off32(0x8B, 0x85, -36 - MAX_BPF_STACK); /* mov eax, dword ptr [rbp - 548] */
|
||||
EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */
|
||||
#define OFFSET2 (30 + RETPOLINE_RAX_BPF_JIT_SIZE)
|
||||
EMIT2(X86_JA, OFFSET2); /* ja out */
|
||||
label2 = cnt;
|
||||
EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
|
||||
EMIT2_off32(0x89, 0x85, 36); /* mov dword ptr [rbp + 36], eax */
|
||||
EMIT2_off32(0x89, 0x85, -36 - MAX_BPF_STACK); /* mov dword ptr [rbp -548], eax */
|
||||
|
||||
/* prog = array->ptrs[index]; */
|
||||
EMIT4_off32(0x48, 0x8B, 0x84, 0xD6, /* mov rax, [rsi + rdx * 8 + offsetof(...)] */
|
||||
|
@ -1040,17 +1013,12 @@ emit_jmp:
|
|||
seen_exit = true;
|
||||
/* Update cleanup_addr */
|
||||
ctx->cleanup_addr = proglen;
|
||||
/* mov rbx, qword ptr [rbp+0] */
|
||||
EMIT4(0x48, 0x8B, 0x5D, 0);
|
||||
/* mov r13, qword ptr [rbp+8] */
|
||||
EMIT4(0x4C, 0x8B, 0x6D, 8);
|
||||
/* mov r14, qword ptr [rbp+16] */
|
||||
EMIT4(0x4C, 0x8B, 0x75, 16);
|
||||
/* mov r15, qword ptr [rbp+24] */
|
||||
EMIT4(0x4C, 0x8B, 0x7D, 24);
|
||||
|
||||
/* add rbp, AUX_STACK_SPACE */
|
||||
EMIT4(0x48, 0x83, 0xC5, AUX_STACK_SPACE);
|
||||
if (!bpf_prog_was_classic(bpf_prog))
|
||||
EMIT1(0x5B); /* get rid of tail_call_cnt */
|
||||
EMIT2(0x41, 0x5F); /* pop r15 */
|
||||
EMIT2(0x41, 0x5E); /* pop r14 */
|
||||
EMIT2(0x41, 0x5D); /* pop r13 */
|
||||
EMIT1(0x5B); /* pop rbx */
|
||||
EMIT1(0xC9); /* leave */
|
||||
EMIT1(0xC3); /* ret */
|
||||
break;
|
||||
|
|
|
@ -3378,8 +3378,8 @@ struct bpf_raw_tracepoint_args {
|
|||
/* DIRECT: Skip the FIB rules and go to FIB table associated with device
|
||||
* OUTPUT: Do lookup from egress perspective; default is ingress
|
||||
*/
|
||||
#define BPF_FIB_LOOKUP_DIRECT BIT(0)
|
||||
#define BPF_FIB_LOOKUP_OUTPUT BIT(1)
|
||||
#define BPF_FIB_LOOKUP_DIRECT (1U << 0)
|
||||
#define BPF_FIB_LOOKUP_OUTPUT (1U << 1)
|
||||
|
||||
enum {
|
||||
BPF_FIB_LKUP_RET_SUCCESS, /* lookup successful */
|
||||
|
|
|
@ -186,6 +186,7 @@ static void dev_map_free(struct bpf_map *map)
|
|||
if (!dev)
|
||||
continue;
|
||||
|
||||
free_percpu(dev->bulkq);
|
||||
dev_put(dev->dev);
|
||||
kfree(dev);
|
||||
}
|
||||
|
@ -281,6 +282,7 @@ void __dev_map_flush(struct bpf_map *map)
|
|||
unsigned long *bitmap = this_cpu_ptr(dtab->flush_needed);
|
||||
u32 bit;
|
||||
|
||||
rcu_read_lock();
|
||||
for_each_set_bit(bit, bitmap, map->max_entries) {
|
||||
struct bpf_dtab_netdev *dev = READ_ONCE(dtab->netdev_map[bit]);
|
||||
struct xdp_bulk_queue *bq;
|
||||
|
@ -291,11 +293,12 @@ void __dev_map_flush(struct bpf_map *map)
|
|||
if (unlikely(!dev))
|
||||
continue;
|
||||
|
||||
__clear_bit(bit, bitmap);
|
||||
|
||||
bq = this_cpu_ptr(dev->bulkq);
|
||||
bq_xmit_all(dev, bq, XDP_XMIT_FLUSH, true);
|
||||
|
||||
__clear_bit(bit, bitmap);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
/* rcu_read_lock (from syscall and BPF contexts) ensures that if a delete and/or
|
||||
|
@ -388,6 +391,7 @@ static void dev_map_flush_old(struct bpf_dtab_netdev *dev)
|
|||
|
||||
int cpu;
|
||||
|
||||
rcu_read_lock();
|
||||
for_each_online_cpu(cpu) {
|
||||
bitmap = per_cpu_ptr(dev->dtab->flush_needed, cpu);
|
||||
__clear_bit(dev->bit, bitmap);
|
||||
|
@ -395,6 +399,7 @@ static void dev_map_flush_old(struct bpf_dtab_netdev *dev)
|
|||
bq = per_cpu_ptr(dev->bulkq, cpu);
|
||||
bq_xmit_all(dev, bq, XDP_XMIT_FLUSH, false);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -716,9 +716,14 @@ find_leftmost:
|
|||
* have exact two children, so this function will never return NULL.
|
||||
*/
|
||||
for (node = search_root; node;) {
|
||||
if (!(node->flags & LPM_TREE_NODE_FLAG_IM))
|
||||
if (node->flags & LPM_TREE_NODE_FLAG_IM) {
|
||||
node = rcu_dereference(node->child[0]);
|
||||
} else {
|
||||
next_node = node;
|
||||
node = rcu_dereference(node->child[0]);
|
||||
if (!node)
|
||||
node = rcu_dereference(next_node->child[1]);
|
||||
}
|
||||
}
|
||||
do_copy:
|
||||
next_key->prefixlen = next_node->prefixlen;
|
||||
|
|
|
@ -410,8 +410,6 @@ static const struct bpf_func_proto bpf_perf_event_read_value_proto = {
|
|||
.arg4_type = ARG_CONST_SIZE,
|
||||
};
|
||||
|
||||
static DEFINE_PER_CPU(struct perf_sample_data, bpf_trace_sd);
|
||||
|
||||
static __always_inline u64
|
||||
__bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
|
||||
u64 flags, struct perf_sample_data *sd)
|
||||
|
@ -442,24 +440,50 @@ __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
|
|||
return perf_event_output(event, sd, regs);
|
||||
}
|
||||
|
||||
/*
|
||||
* Support executing tracepoints in normal, irq, and nmi context that each call
|
||||
* bpf_perf_event_output
|
||||
*/
|
||||
struct bpf_trace_sample_data {
|
||||
struct perf_sample_data sds[3];
|
||||
};
|
||||
|
||||
static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds);
|
||||
static DEFINE_PER_CPU(int, bpf_trace_nest_level);
|
||||
BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
|
||||
u64, flags, void *, data, u64, size)
|
||||
{
|
||||
struct perf_sample_data *sd = this_cpu_ptr(&bpf_trace_sd);
|
||||
struct bpf_trace_sample_data *sds = this_cpu_ptr(&bpf_trace_sds);
|
||||
int nest_level = this_cpu_inc_return(bpf_trace_nest_level);
|
||||
struct perf_raw_record raw = {
|
||||
.frag = {
|
||||
.size = size,
|
||||
.data = data,
|
||||
},
|
||||
};
|
||||
struct perf_sample_data *sd;
|
||||
int err;
|
||||
|
||||
if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
|
||||
return -EINVAL;
|
||||
if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) {
|
||||
err = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
|
||||
sd = &sds->sds[nest_level - 1];
|
||||
|
||||
if (unlikely(flags & ~(BPF_F_INDEX_MASK))) {
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
perf_sample_data_init(sd, 0, 0);
|
||||
sd->raw = &raw;
|
||||
|
||||
return __bpf_perf_event_output(regs, map, flags, sd);
|
||||
err = __bpf_perf_event_output(regs, map, flags, sd);
|
||||
|
||||
out:
|
||||
this_cpu_dec(bpf_trace_nest_level);
|
||||
return err;
|
||||
}
|
||||
|
||||
static const struct bpf_func_proto bpf_perf_event_output_proto = {
|
||||
|
@ -822,16 +846,48 @@ pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
|||
/*
|
||||
* bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp
|
||||
* to avoid potential recursive reuse issue when/if tracepoints are added
|
||||
* inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack
|
||||
* inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack.
|
||||
*
|
||||
* Since raw tracepoints run despite bpf_prog_active, support concurrent usage
|
||||
* in normal, irq, and nmi context.
|
||||
*/
|
||||
static DEFINE_PER_CPU(struct pt_regs, bpf_raw_tp_regs);
|
||||
struct bpf_raw_tp_regs {
|
||||
struct pt_regs regs[3];
|
||||
};
|
||||
static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs);
|
||||
static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level);
|
||||
static struct pt_regs *get_bpf_raw_tp_regs(void)
|
||||
{
|
||||
struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs);
|
||||
int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level);
|
||||
|
||||
if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) {
|
||||
this_cpu_dec(bpf_raw_tp_nest_level);
|
||||
return ERR_PTR(-EBUSY);
|
||||
}
|
||||
|
||||
return &tp_regs->regs[nest_level - 1];
|
||||
}
|
||||
|
||||
static void put_bpf_raw_tp_regs(void)
|
||||
{
|
||||
this_cpu_dec(bpf_raw_tp_nest_level);
|
||||
}
|
||||
|
||||
BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args,
|
||||
struct bpf_map *, map, u64, flags, void *, data, u64, size)
|
||||
{
|
||||
struct pt_regs *regs = this_cpu_ptr(&bpf_raw_tp_regs);
|
||||
struct pt_regs *regs = get_bpf_raw_tp_regs();
|
||||
int ret;
|
||||
|
||||
if (IS_ERR(regs))
|
||||
return PTR_ERR(regs);
|
||||
|
||||
perf_fetch_caller_regs(regs);
|
||||
return ____bpf_perf_event_output(regs, map, flags, data, size);
|
||||
ret = ____bpf_perf_event_output(regs, map, flags, data, size);
|
||||
|
||||
put_bpf_raw_tp_regs();
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
|
||||
|
@ -848,12 +904,18 @@ static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
|
|||
BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args,
|
||||
struct bpf_map *, map, u64, flags)
|
||||
{
|
||||
struct pt_regs *regs = this_cpu_ptr(&bpf_raw_tp_regs);
|
||||
struct pt_regs *regs = get_bpf_raw_tp_regs();
|
||||
int ret;
|
||||
|
||||
if (IS_ERR(regs))
|
||||
return PTR_ERR(regs);
|
||||
|
||||
perf_fetch_caller_regs(regs);
|
||||
/* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */
|
||||
return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
|
||||
ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map,
|
||||
flags, 0, 0);
|
||||
put_bpf_raw_tp_regs();
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = {
|
||||
|
@ -868,11 +930,17 @@ static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = {
|
|||
BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args,
|
||||
void *, buf, u32, size, u64, flags)
|
||||
{
|
||||
struct pt_regs *regs = this_cpu_ptr(&bpf_raw_tp_regs);
|
||||
struct pt_regs *regs = get_bpf_raw_tp_regs();
|
||||
int ret;
|
||||
|
||||
if (IS_ERR(regs))
|
||||
return PTR_ERR(regs);
|
||||
|
||||
perf_fetch_caller_regs(regs);
|
||||
return bpf_get_stack((unsigned long) regs, (unsigned long) buf,
|
||||
ret = bpf_get_stack((unsigned long) regs, (unsigned long) buf,
|
||||
(unsigned long) size, flags, 0);
|
||||
put_bpf_raw_tp_regs();
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = {
|
||||
|
|
|
@ -633,7 +633,8 @@ static struct bpf_map *bpf_sk_storage_map_alloc(union bpf_attr *attr)
|
|||
return ERR_PTR(-ENOMEM);
|
||||
bpf_map_init_from_attr(&smap->map, attr);
|
||||
|
||||
smap->bucket_log = ilog2(roundup_pow_of_two(num_possible_cpus()));
|
||||
/* Use at least 2 buckets, select_bucket() is undefined behavior with 1 bucket */
|
||||
smap->bucket_log = max_t(u32, 1, ilog2(roundup_pow_of_two(num_possible_cpus())));
|
||||
nbuckets = 1U << smap->bucket_log;
|
||||
smap->buckets = kvcalloc(sizeof(*smap->buckets), nbuckets,
|
||||
GFP_USER | __GFP_NOWARN);
|
||||
|
|
|
@ -1850,6 +1850,9 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
|
|||
goto out;
|
||||
}
|
||||
RCU_INIT_POINTER(newsk->sk_reuseport_cb, NULL);
|
||||
#ifdef CONFIG_BPF_SYSCALL
|
||||
RCU_INIT_POINTER(newsk->sk_bpf_storage, NULL);
|
||||
#endif
|
||||
|
||||
newsk->sk_err = 0;
|
||||
newsk->sk_err_soft = 0;
|
||||
|
|
|
@ -143,6 +143,9 @@ static void xdp_umem_clear_dev(struct xdp_umem *umem)
|
|||
struct netdev_bpf bpf;
|
||||
int err;
|
||||
|
||||
if (!umem->dev)
|
||||
return;
|
||||
|
||||
if (umem->zc) {
|
||||
bpf.command = XDP_SETUP_XSK_UMEM;
|
||||
bpf.xsk.umem = NULL;
|
||||
|
@ -156,11 +159,9 @@ static void xdp_umem_clear_dev(struct xdp_umem *umem)
|
|||
WARN(1, "failed to disable umem!\n");
|
||||
}
|
||||
|
||||
if (umem->dev) {
|
||||
rtnl_lock();
|
||||
xdp_clear_umem_at_qid(umem->dev, umem->queue_id);
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
if (umem->zc) {
|
||||
dev_put(umem->dev);
|
||||
|
|
|
@ -3378,8 +3378,8 @@ struct bpf_raw_tracepoint_args {
|
|||
/* DIRECT: Skip the FIB rules and go to FIB table associated with device
|
||||
* OUTPUT: Do lookup from egress perspective; default is ingress
|
||||
*/
|
||||
#define BPF_FIB_LOOKUP_DIRECT BIT(0)
|
||||
#define BPF_FIB_LOOKUP_OUTPUT BIT(1)
|
||||
#define BPF_FIB_LOOKUP_DIRECT (1U << 0)
|
||||
#define BPF_FIB_LOOKUP_OUTPUT (1U << 1)
|
||||
|
||||
enum {
|
||||
BPF_FIB_LKUP_RET_SUCCESS, /* lookup successful */
|
||||
|
|
|
@ -573,13 +573,13 @@ static void test_lpm_get_next_key(void)
|
|||
|
||||
/* add one more element (total two) */
|
||||
key_p->prefixlen = 24;
|
||||
inet_pton(AF_INET, "192.168.0.0", key_p->data);
|
||||
inet_pton(AF_INET, "192.168.128.0", key_p->data);
|
||||
assert(bpf_map_update_elem(map_fd, key_p, &value, 0) == 0);
|
||||
|
||||
memset(key_p, 0, key_size);
|
||||
assert(bpf_map_get_next_key(map_fd, NULL, key_p) == 0);
|
||||
assert(key_p->prefixlen == 24 && key_p->data[0] == 192 &&
|
||||
key_p->data[1] == 168 && key_p->data[2] == 0);
|
||||
key_p->data[1] == 168 && key_p->data[2] == 128);
|
||||
|
||||
memset(next_key_p, 0, key_size);
|
||||
assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0);
|
||||
|
@ -592,7 +592,7 @@ static void test_lpm_get_next_key(void)
|
|||
|
||||
/* Add one more element (total three) */
|
||||
key_p->prefixlen = 24;
|
||||
inet_pton(AF_INET, "192.168.128.0", key_p->data);
|
||||
inet_pton(AF_INET, "192.168.0.0", key_p->data);
|
||||
assert(bpf_map_update_elem(map_fd, key_p, &value, 0) == 0);
|
||||
|
||||
memset(key_p, 0, key_size);
|
||||
|
@ -643,6 +643,41 @@ static void test_lpm_get_next_key(void)
|
|||
assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -1 &&
|
||||
errno == ENOENT);
|
||||
|
||||
/* Add one more element (total five) */
|
||||
key_p->prefixlen = 28;
|
||||
inet_pton(AF_INET, "192.168.1.128", key_p->data);
|
||||
assert(bpf_map_update_elem(map_fd, key_p, &value, 0) == 0);
|
||||
|
||||
memset(key_p, 0, key_size);
|
||||
assert(bpf_map_get_next_key(map_fd, NULL, key_p) == 0);
|
||||
assert(key_p->prefixlen == 24 && key_p->data[0] == 192 &&
|
||||
key_p->data[1] == 168 && key_p->data[2] == 0);
|
||||
|
||||
memset(next_key_p, 0, key_size);
|
||||
assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0);
|
||||
assert(next_key_p->prefixlen == 28 && next_key_p->data[0] == 192 &&
|
||||
next_key_p->data[1] == 168 && next_key_p->data[2] == 1 &&
|
||||
next_key_p->data[3] == 128);
|
||||
|
||||
memcpy(key_p, next_key_p, key_size);
|
||||
assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0);
|
||||
assert(next_key_p->prefixlen == 24 && next_key_p->data[0] == 192 &&
|
||||
next_key_p->data[1] == 168 && next_key_p->data[2] == 1);
|
||||
|
||||
memcpy(key_p, next_key_p, key_size);
|
||||
assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0);
|
||||
assert(next_key_p->prefixlen == 24 && next_key_p->data[0] == 192 &&
|
||||
next_key_p->data[1] == 168 && next_key_p->data[2] == 128);
|
||||
|
||||
memcpy(key_p, next_key_p, key_size);
|
||||
assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0);
|
||||
assert(next_key_p->prefixlen == 16 && next_key_p->data[0] == 192 &&
|
||||
next_key_p->data[1] == 168);
|
||||
|
||||
memcpy(key_p, next_key_p, key_size);
|
||||
assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -1 &&
|
||||
errno == ENOENT);
|
||||
|
||||
/* no exact matching key should return the first one in post order */
|
||||
key_p->prefixlen = 22;
|
||||
inet_pton(AF_INET, "192.168.1.0", key_p->data);
|
||||
|
|
|
@ -29,8 +29,11 @@
|
|||
"DIV64 overflow, check 1",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_1, -1),
|
||||
BPF_LD_IMM64(BPF_REG_0, LLONG_MIN),
|
||||
BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
|
||||
BPF_LD_IMM64(BPF_REG_2, LLONG_MIN),
|
||||
BPF_ALU64_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
|
||||
BPF_MOV32_IMM(BPF_REG_0, 0),
|
||||
BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 1),
|
||||
BPF_MOV32_IMM(BPF_REG_0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
|
@ -40,8 +43,11 @@
|
|||
{
|
||||
"DIV64 overflow, check 2",
|
||||
.insns = {
|
||||
BPF_LD_IMM64(BPF_REG_0, LLONG_MIN),
|
||||
BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, -1),
|
||||
BPF_LD_IMM64(BPF_REG_1, LLONG_MIN),
|
||||
BPF_ALU64_IMM(BPF_DIV, BPF_REG_1, -1),
|
||||
BPF_MOV32_IMM(BPF_REG_0, 0),
|
||||
BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_1, 1),
|
||||
BPF_MOV32_IMM(BPF_REG_0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
|
|
Loading…
Reference in New Issue