Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
Daniel Borkmann says: ==================== pull-request: bpf 2019-10-27 The following pull-request contains BPF updates for your *net* tree. We've added 7 non-merge commits during the last 11 day(s) which contain a total of 7 files changed, 66 insertions(+), 16 deletions(-). The main changes are: 1) Fix two use-after-free bugs in relation to RCU in jited symbol exposure to kallsyms, from Daniel Borkmann. 2) Fix NULL pointer dereference in AF_XDP rx-only sockets, from Magnus Karlsson. 3) Fix hang in netdev unregister for hash based devmap as well as another overflow bug on 32 bit archs in memlock cost calculation, from Toke Høiland-Jørgensen. 4) Fix wrong memory access in LWT BPF programs on reroute due to invalid dst. Also fix BPF selftests to use more compatible nc options, from Jiri Benc. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
1a51a47491
|
@ -1099,7 +1099,6 @@ static inline void bpf_get_prog_name(const struct bpf_prog *prog, char *sym)
|
||||||
|
|
||||||
#endif /* CONFIG_BPF_JIT */
|
#endif /* CONFIG_BPF_JIT */
|
||||||
|
|
||||||
void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp);
|
|
||||||
void bpf_prog_kallsyms_del_all(struct bpf_prog *fp);
|
void bpf_prog_kallsyms_del_all(struct bpf_prog *fp);
|
||||||
|
|
||||||
#define BPF_ANC BIT(15)
|
#define BPF_ANC BIT(15)
|
||||||
|
|
|
@ -502,7 +502,7 @@ int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt)
|
||||||
return WARN_ON_ONCE(bpf_adj_branches(prog, off, off + cnt, off, false));
|
return WARN_ON_ONCE(bpf_adj_branches(prog, off, off + cnt, off, false));
|
||||||
}
|
}
|
||||||
|
|
||||||
void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
|
static void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
|
|
@ -128,7 +128,7 @@ static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
|
||||||
|
|
||||||
if (!dtab->n_buckets) /* Overflow check */
|
if (!dtab->n_buckets) /* Overflow check */
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
cost += sizeof(struct hlist_head) * dtab->n_buckets;
|
cost += (u64) sizeof(struct hlist_head) * dtab->n_buckets;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* if map size is larger than memlock limit, reject it */
|
/* if map size is larger than memlock limit, reject it */
|
||||||
|
@ -719,6 +719,32 @@ const struct bpf_map_ops dev_map_hash_ops = {
|
||||||
.map_check_btf = map_check_no_btf,
|
.map_check_btf = map_check_no_btf,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static void dev_map_hash_remove_netdev(struct bpf_dtab *dtab,
|
||||||
|
struct net_device *netdev)
|
||||||
|
{
|
||||||
|
unsigned long flags;
|
||||||
|
u32 i;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&dtab->index_lock, flags);
|
||||||
|
for (i = 0; i < dtab->n_buckets; i++) {
|
||||||
|
struct bpf_dtab_netdev *dev;
|
||||||
|
struct hlist_head *head;
|
||||||
|
struct hlist_node *next;
|
||||||
|
|
||||||
|
head = dev_map_index_hash(dtab, i);
|
||||||
|
|
||||||
|
hlist_for_each_entry_safe(dev, next, head, index_hlist) {
|
||||||
|
if (netdev != dev->dev)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
dtab->items--;
|
||||||
|
hlist_del_rcu(&dev->index_hlist);
|
||||||
|
call_rcu(&dev->rcu, __dev_map_entry_free);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
spin_unlock_irqrestore(&dtab->index_lock, flags);
|
||||||
|
}
|
||||||
|
|
||||||
static int dev_map_notification(struct notifier_block *notifier,
|
static int dev_map_notification(struct notifier_block *notifier,
|
||||||
ulong event, void *ptr)
|
ulong event, void *ptr)
|
||||||
{
|
{
|
||||||
|
@ -735,6 +761,11 @@ static int dev_map_notification(struct notifier_block *notifier,
|
||||||
*/
|
*/
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
list_for_each_entry_rcu(dtab, &dev_map_list, list) {
|
list_for_each_entry_rcu(dtab, &dev_map_list, list) {
|
||||||
|
if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
|
||||||
|
dev_map_hash_remove_netdev(dtab, netdev);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
for (i = 0; i < dtab->map.max_entries; i++) {
|
for (i = 0; i < dtab->map.max_entries; i++) {
|
||||||
struct bpf_dtab_netdev *dev, *odev;
|
struct bpf_dtab_netdev *dev, *odev;
|
||||||
|
|
||||||
|
|
|
@ -1326,24 +1326,32 @@ static void __bpf_prog_put_rcu(struct rcu_head *rcu)
|
||||||
{
|
{
|
||||||
struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
|
struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
|
||||||
|
|
||||||
|
kvfree(aux->func_info);
|
||||||
free_used_maps(aux);
|
free_used_maps(aux);
|
||||||
bpf_prog_uncharge_memlock(aux->prog);
|
bpf_prog_uncharge_memlock(aux->prog);
|
||||||
security_bpf_prog_free(aux);
|
security_bpf_prog_free(aux);
|
||||||
bpf_prog_free(aux->prog);
|
bpf_prog_free(aux->prog);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void __bpf_prog_put_noref(struct bpf_prog *prog, bool deferred)
|
||||||
|
{
|
||||||
|
bpf_prog_kallsyms_del_all(prog);
|
||||||
|
btf_put(prog->aux->btf);
|
||||||
|
bpf_prog_free_linfo(prog);
|
||||||
|
|
||||||
|
if (deferred)
|
||||||
|
call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
|
||||||
|
else
|
||||||
|
__bpf_prog_put_rcu(&prog->aux->rcu);
|
||||||
|
}
|
||||||
|
|
||||||
static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
|
static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
|
||||||
{
|
{
|
||||||
if (atomic_dec_and_test(&prog->aux->refcnt)) {
|
if (atomic_dec_and_test(&prog->aux->refcnt)) {
|
||||||
perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0);
|
perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0);
|
||||||
/* bpf_prog_free_id() must be called first */
|
/* bpf_prog_free_id() must be called first */
|
||||||
bpf_prog_free_id(prog, do_idr_lock);
|
bpf_prog_free_id(prog, do_idr_lock);
|
||||||
bpf_prog_kallsyms_del_all(prog);
|
__bpf_prog_put_noref(prog, true);
|
||||||
btf_put(prog->aux->btf);
|
|
||||||
kvfree(prog->aux->func_info);
|
|
||||||
bpf_prog_free_linfo(prog);
|
|
||||||
|
|
||||||
call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1741,11 +1749,12 @@ static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
free_used_maps:
|
free_used_maps:
|
||||||
bpf_prog_free_linfo(prog);
|
/* In case we have subprogs, we need to wait for a grace
|
||||||
kvfree(prog->aux->func_info);
|
* period before we can tear down JIT memory since symbols
|
||||||
btf_put(prog->aux->btf);
|
* are already exposed under kallsyms.
|
||||||
bpf_prog_kallsyms_del_subprogs(prog);
|
*/
|
||||||
free_used_maps(prog->aux);
|
__bpf_prog_put_noref(prog, prog->aux->func_cnt);
|
||||||
|
return err;
|
||||||
free_prog:
|
free_prog:
|
||||||
bpf_prog_uncharge_memlock(prog);
|
bpf_prog_uncharge_memlock(prog);
|
||||||
free_prog_sec:
|
free_prog_sec:
|
||||||
|
|
|
@ -88,11 +88,16 @@ static int bpf_lwt_input_reroute(struct sk_buff *skb)
|
||||||
int err = -EINVAL;
|
int err = -EINVAL;
|
||||||
|
|
||||||
if (skb->protocol == htons(ETH_P_IP)) {
|
if (skb->protocol == htons(ETH_P_IP)) {
|
||||||
|
struct net_device *dev = skb_dst(skb)->dev;
|
||||||
struct iphdr *iph = ip_hdr(skb);
|
struct iphdr *iph = ip_hdr(skb);
|
||||||
|
|
||||||
|
dev_hold(dev);
|
||||||
|
skb_dst_drop(skb);
|
||||||
err = ip_route_input_noref(skb, iph->daddr, iph->saddr,
|
err = ip_route_input_noref(skb, iph->daddr, iph->saddr,
|
||||||
iph->tos, skb_dst(skb)->dev);
|
iph->tos, dev);
|
||||||
|
dev_put(dev);
|
||||||
} else if (skb->protocol == htons(ETH_P_IPV6)) {
|
} else if (skb->protocol == htons(ETH_P_IPV6)) {
|
||||||
|
skb_dst_drop(skb);
|
||||||
err = ipv6_stub->ipv6_route_input(skb);
|
err = ipv6_stub->ipv6_route_input(skb);
|
||||||
} else {
|
} else {
|
||||||
err = -EAFNOSUPPORT;
|
err = -EAFNOSUPPORT;
|
||||||
|
|
|
@ -27,6 +27,9 @@ void xdp_add_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
|
if (!xs->tx)
|
||||||
|
return;
|
||||||
|
|
||||||
spin_lock_irqsave(&umem->xsk_list_lock, flags);
|
spin_lock_irqsave(&umem->xsk_list_lock, flags);
|
||||||
list_add_rcu(&xs->list, &umem->xsk_list);
|
list_add_rcu(&xs->list, &umem->xsk_list);
|
||||||
spin_unlock_irqrestore(&umem->xsk_list_lock, flags);
|
spin_unlock_irqrestore(&umem->xsk_list_lock, flags);
|
||||||
|
@ -36,6 +39,9 @@ void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
|
if (!xs->tx)
|
||||||
|
return;
|
||||||
|
|
||||||
spin_lock_irqsave(&umem->xsk_list_lock, flags);
|
spin_lock_irqsave(&umem->xsk_list_lock, flags);
|
||||||
list_del_rcu(&xs->list);
|
list_del_rcu(&xs->list);
|
||||||
spin_unlock_irqrestore(&umem->xsk_list_lock, flags);
|
spin_unlock_irqrestore(&umem->xsk_list_lock, flags);
|
||||||
|
|
|
@ -59,7 +59,7 @@ ip netns exec ${NS_SRC} tc filter add dev veth_src egress \
|
||||||
|
|
||||||
# start the listener
|
# start the listener
|
||||||
ip netns exec ${NS_DST} bash -c \
|
ip netns exec ${NS_DST} bash -c \
|
||||||
"nc -4 -l -s ${IP_DST} -p 9000 >/dev/null &"
|
"nc -4 -l -p 9000 >/dev/null &"
|
||||||
declare -i NC_PID=$!
|
declare -i NC_PID=$!
|
||||||
sleep 1
|
sleep 1
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue