Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
Daniel borkmann says: ==================== The following pull-request contains BPF updates for your *net* tree. We've added 11 non-merge commits during the last 14 day(s) which contain a total of 13 files changed, 61 insertions(+), 24 deletions(-). The main changes are: 1) Fix BPF verifier's precision tracking around BPF ring buffer, from Kumar Kartikeya Dwivedi. 2) Fix regression in tunnel key infra when passing FLOWI_FLAG_ANYSRC, from Eyal Birger. 3) Fix insufficient permissions for bpf_sys_bpf() helper, from YiFei Zhu. 4) Fix splat from hitting BUG when purging effective cgroup programs, from Pu Lehui. 5) Fix range tracking for array poke descriptors, from Daniel Borkmann. 6) Fix corrupted packets for XDP_SHARED_UMEM in aligned mode, from Magnus Karlsson. 7) Fix NULL pointer splat in BPF sockmap sk_msg_recvmsg(), from Liu Jian. 8) Add READ_ONCE() to bpf_jit_limit when reading from sysctl, from Kuniyuki Iwashima. 9) Add BPF selftest lru_bug check to s390x deny list, from Daniel Müller. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
2e085ec0e2
|
@ -423,7 +423,8 @@ mlxsw_sp_span_gretap4_route(const struct net_device *to_dev,
|
|||
|
||||
parms = mlxsw_sp_ipip_netdev_parms4(to_dev);
|
||||
ip_tunnel_init_flow(&fl4, parms.iph.protocol, *daddrp, *saddrp,
|
||||
0, 0, dev_net(to_dev), parms.link, tun->fwmark, 0);
|
||||
0, 0, dev_net(to_dev), parms.link, tun->fwmark, 0,
|
||||
0);
|
||||
|
||||
rt = ip_route_output_key(tun->net, &fl4);
|
||||
if (IS_ERR(rt))
|
||||
|
|
|
@ -246,7 +246,8 @@ static inline void ip_tunnel_init_flow(struct flowi4 *fl4,
|
|||
__be32 daddr, __be32 saddr,
|
||||
__be32 key, __u8 tos,
|
||||
struct net *net, int oif,
|
||||
__u32 mark, __u32 tun_inner_hash)
|
||||
__u32 mark, __u32 tun_inner_hash,
|
||||
__u8 flow_flags)
|
||||
{
|
||||
memset(fl4, 0, sizeof(*fl4));
|
||||
|
||||
|
@ -263,6 +264,7 @@ static inline void ip_tunnel_init_flow(struct flowi4 *fl4,
|
|||
fl4->fl4_gre_key = key;
|
||||
fl4->flowi4_mark = mark;
|
||||
fl4->flowi4_multipath_hash = tun_inner_hash;
|
||||
fl4->flowi4_flags = flow_flags;
|
||||
}
|
||||
|
||||
int ip_tunnel_init(struct net_device *dev);
|
||||
|
|
|
@ -79,7 +79,7 @@ struct bpf_insn {
|
|||
/* Key of an a BPF_MAP_TYPE_LPM_TRIE entry */
|
||||
struct bpf_lpm_trie_key {
|
||||
__u32 prefixlen; /* up to 32 for AF_INET, 128 for AF_INET6 */
|
||||
__u8 data[]; /* Arbitrary size */
|
||||
__u8 data[0]; /* Arbitrary size */
|
||||
};
|
||||
|
||||
struct bpf_cgroup_storage_key {
|
||||
|
|
|
@ -921,8 +921,10 @@ static void purge_effective_progs(struct cgroup *cgrp, struct bpf_prog *prog,
|
|||
pos++;
|
||||
}
|
||||
}
|
||||
|
||||
/* no link or prog match, skip the cgroup of this layer */
|
||||
continue;
|
||||
found:
|
||||
BUG_ON(!cg);
|
||||
progs = rcu_dereference_protected(
|
||||
desc->bpf.effective[atype],
|
||||
lockdep_is_held(&cgroup_mutex));
|
||||
|
|
|
@ -971,7 +971,7 @@ pure_initcall(bpf_jit_charge_init);
|
|||
|
||||
int bpf_jit_charge_modmem(u32 size)
|
||||
{
|
||||
if (atomic_long_add_return(size, &bpf_jit_current) > bpf_jit_limit) {
|
||||
if (atomic_long_add_return(size, &bpf_jit_current) > READ_ONCE(bpf_jit_limit)) {
|
||||
if (!bpf_capable()) {
|
||||
atomic_long_sub(size, &bpf_jit_current);
|
||||
return -EPERM;
|
||||
|
|
|
@ -5197,7 +5197,7 @@ syscall_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
|||
{
|
||||
switch (func_id) {
|
||||
case BPF_FUNC_sys_bpf:
|
||||
return &bpf_sys_bpf_proto;
|
||||
return !perfmon_capable() ? NULL : &bpf_sys_bpf_proto;
|
||||
case BPF_FUNC_btf_find_by_name_kind:
|
||||
return &bpf_btf_find_by_name_kind_proto;
|
||||
case BPF_FUNC_sys_close:
|
||||
|
|
|
@ -6066,6 +6066,9 @@ skip_type_check:
|
|||
return -EACCES;
|
||||
}
|
||||
meta->mem_size = reg->var_off.value;
|
||||
err = mark_chain_precision(env, regno);
|
||||
if (err)
|
||||
return err;
|
||||
break;
|
||||
case ARG_PTR_TO_INT:
|
||||
case ARG_PTR_TO_LONG:
|
||||
|
@ -7030,8 +7033,7 @@ record_func_key(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
|
|||
struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx];
|
||||
struct bpf_reg_state *regs = cur_regs(env), *reg;
|
||||
struct bpf_map *map = meta->map_ptr;
|
||||
struct tnum range;
|
||||
u64 val;
|
||||
u64 val, max;
|
||||
int err;
|
||||
|
||||
if (func_id != BPF_FUNC_tail_call)
|
||||
|
@ -7041,10 +7043,11 @@ record_func_key(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
range = tnum_range(0, map->max_entries - 1);
|
||||
reg = ®s[BPF_REG_3];
|
||||
val = reg->var_off.value;
|
||||
max = map->max_entries;
|
||||
|
||||
if (!register_is_const(reg) || !tnum_in(range, reg->var_off)) {
|
||||
if (!(register_is_const(reg) && val < max)) {
|
||||
bpf_map_key_store(aux, BPF_MAP_KEY_POISON);
|
||||
return 0;
|
||||
}
|
||||
|
@ -7052,8 +7055,6 @@ record_func_key(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
|
|||
err = mark_chain_precision(env, BPF_REG_3);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
val = reg->var_off.value;
|
||||
if (bpf_map_key_unseen(aux))
|
||||
bpf_map_key_store(aux, val);
|
||||
else if (!bpf_map_key_poisoned(aux) &&
|
||||
|
|
|
@ -461,7 +461,7 @@ int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
|
|||
|
||||
if (copied == len)
|
||||
break;
|
||||
} while (!sg_is_last(sge));
|
||||
} while ((i != msg_rx->sg.end) && !sg_is_last(sge));
|
||||
|
||||
if (unlikely(peek)) {
|
||||
msg_rx = sk_psock_next_msg(psock, msg_rx);
|
||||
|
@ -471,7 +471,7 @@ int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
|
|||
}
|
||||
|
||||
msg_rx->sg.start = i;
|
||||
if (!sge->length && sg_is_last(sge)) {
|
||||
if (!sge->length && (i == msg_rx->sg.end || sg_is_last(sge))) {
|
||||
msg_rx = sk_psock_dequeue_msg(psock);
|
||||
kfree_sk_msg(msg_rx);
|
||||
}
|
||||
|
|
|
@ -609,7 +609,7 @@ static int gre_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
|
|||
ip_tunnel_init_flow(&fl4, IPPROTO_GRE, key->u.ipv4.dst, key->u.ipv4.src,
|
||||
tunnel_id_to_key32(key->tun_id),
|
||||
key->tos & ~INET_ECN_MASK, dev_net(dev), 0,
|
||||
skb->mark, skb_get_hash(skb));
|
||||
skb->mark, skb_get_hash(skb), key->flow_flags);
|
||||
rt = ip_route_output_key(dev_net(dev), &fl4);
|
||||
if (IS_ERR(rt))
|
||||
return PTR_ERR(rt);
|
||||
|
|
|
@ -295,7 +295,7 @@ static int ip_tunnel_bind_dev(struct net_device *dev)
|
|||
ip_tunnel_init_flow(&fl4, iph->protocol, iph->daddr,
|
||||
iph->saddr, tunnel->parms.o_key,
|
||||
RT_TOS(iph->tos), dev_net(dev),
|
||||
tunnel->parms.link, tunnel->fwmark, 0);
|
||||
tunnel->parms.link, tunnel->fwmark, 0, 0);
|
||||
rt = ip_route_output_key(tunnel->net, &fl4);
|
||||
|
||||
if (!IS_ERR(rt)) {
|
||||
|
@ -570,7 +570,8 @@ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
|
|||
}
|
||||
ip_tunnel_init_flow(&fl4, proto, key->u.ipv4.dst, key->u.ipv4.src,
|
||||
tunnel_id_to_key32(key->tun_id), RT_TOS(tos),
|
||||
dev_net(dev), 0, skb->mark, skb_get_hash(skb));
|
||||
dev_net(dev), 0, skb->mark, skb_get_hash(skb),
|
||||
key->flow_flags);
|
||||
if (tunnel->encap.type != TUNNEL_ENCAP_NONE)
|
||||
goto tx_error;
|
||||
|
||||
|
@ -729,7 +730,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
|
|||
ip_tunnel_init_flow(&fl4, protocol, dst, tnl_params->saddr,
|
||||
tunnel->parms.o_key, RT_TOS(tos),
|
||||
dev_net(dev), tunnel->parms.link,
|
||||
tunnel->fwmark, skb_get_hash(skb));
|
||||
tunnel->fwmark, skb_get_hash(skb), 0);
|
||||
|
||||
if (ip_tunnel_encap(skb, tunnel, &protocol, &fl4) < 0)
|
||||
goto tx_error;
|
||||
|
|
|
@ -379,6 +379,16 @@ static void xp_check_dma_contiguity(struct xsk_dma_map *dma_map)
|
|||
|
||||
static int xp_init_dma_info(struct xsk_buff_pool *pool, struct xsk_dma_map *dma_map)
|
||||
{
|
||||
if (!pool->unaligned) {
|
||||
u32 i;
|
||||
|
||||
for (i = 0; i < pool->heads_cnt; i++) {
|
||||
struct xdp_buff_xsk *xskb = &pool->heads[i];
|
||||
|
||||
xp_init_xskb_dma(xskb, pool, dma_map->dma_pages, xskb->orig_addr);
|
||||
}
|
||||
}
|
||||
|
||||
pool->dma_pages = kvcalloc(dma_map->dma_pages_cnt, sizeof(*pool->dma_pages), GFP_KERNEL);
|
||||
if (!pool->dma_pages)
|
||||
return -ENOMEM;
|
||||
|
@ -428,12 +438,6 @@ int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
|
|||
|
||||
if (pool->unaligned)
|
||||
xp_check_dma_contiguity(dma_map);
|
||||
else
|
||||
for (i = 0; i < pool->heads_cnt; i++) {
|
||||
struct xdp_buff_xsk *xskb = &pool->heads[i];
|
||||
|
||||
xp_init_xskb_dma(xskb, pool, dma_map->dma_pages, xskb->orig_addr);
|
||||
}
|
||||
|
||||
err = xp_init_dma_info(pool, dma_map);
|
||||
if (err) {
|
||||
|
|
|
@ -65,3 +65,4 @@ send_signal # intermittently fails to receive signa
|
|||
select_reuseport # intermittently fails on new s390x setup
|
||||
xdp_synproxy # JIT does not support calling kernel function (kfunc)
|
||||
unpriv_bpf_disabled # fentry
|
||||
lru_bug # prog 'printk': failed to auto-attach: -524
|
||||
|
|
|
@ -192,3 +192,28 @@
|
|||
.result = VERBOSE_ACCEPT,
|
||||
.retval = -1,
|
||||
},
|
||||
{
|
||||
"precise: mark_chain_precision for ARG_CONST_ALLOC_SIZE_OR_ZERO",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, offsetof(struct xdp_md, ingress_ifindex)),
|
||||
BPF_LD_MAP_FD(BPF_REG_6, 0),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 1),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_4, 0, 1),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 0x1000),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_reserve),
|
||||
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 42),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_submit),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_ringbuf = { 1 },
|
||||
.prog_type = BPF_PROG_TYPE_XDP,
|
||||
.flags = BPF_F_TEST_STATE_FREQ,
|
||||
.errstr = "invalid access to memory, mem_size=1 off=42 size=8",
|
||||
.result = REJECT,
|
||||
},
|
||||
|
|
Loading…
Reference in New Issue