Merge branch 'net-few-debug-refinements'

Eric Dumazet says:

====================
net: few debug refinements

Adopt DEBUG_NET_WARN_ON_ONCE() or WARN_ON_ONCE()
in some places where it makes sense.

Add checks in napi_consume_skb() and __napi_alloc_skb()

Make sure napi_get_frags() does not use page fragments
for skb->head.
====================

Link: https://lore.kernel.org/r/20220608160438.1342569-1-eric.dumazet@gmail.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2022-06-09 21:54:00 -07:00
commit 3000024ccc
6 changed files with 34 additions and 15 deletions

View File

@ -3925,7 +3925,7 @@ int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
skb->pkt_type = PACKET_LOOPBACK;
if (skb->ip_summed == CHECKSUM_NONE)
skb->ip_summed = CHECKSUM_UNNECESSARY;
WARN_ON(!skb_dst(skb));
DEBUG_NET_WARN_ON_ONCE(!skb_dst(skb));
skb_dst_force(skb);
netif_rx(skb);
return 0;
@ -6351,6 +6351,23 @@ int dev_set_threaded(struct net_device *dev, bool threaded)
}
EXPORT_SYMBOL(dev_set_threaded);
/* Double check that napi_get_frags() allocates skbs with
* skb->head being backed by slab, not a page fragment.
* This is to make sure bug fixed in 3226b158e67c
* ("net: avoid 32 x truesize under-estimation for tiny skbs")
* does not accidentally come back.
*/
static void napi_get_frags_check(struct napi_struct *napi)
{
struct sk_buff *skb;
local_bh_disable();
skb = napi_get_frags(napi);
WARN_ON_ONCE(skb && skb->head_frag);
napi_free_frags(napi);
local_bh_enable();
}
void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi,
int (*poll)(struct napi_struct *, int), int weight)
{
@ -6378,6 +6395,7 @@ void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi,
set_bit(NAPI_STATE_NPSVC, &napi->state);
list_add_rcu(&napi->dev_list, &dev->napi_list);
napi_hash_add(napi);
napi_get_frags_check(napi);
/* Create kthread for this napi if dev->threaded is set.
* Clear dev->threaded if kthread creation failed so that
* threaded mode will not be enabled in napi_enable().

View File

@ -560,6 +560,7 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
struct sk_buff *skb;
void *data;
DEBUG_NET_WARN_ON_ONCE(!in_softirq());
len += NET_SKB_PAD + NET_IP_ALIGN;
/* If requested length is either too small or too big,
@ -728,7 +729,7 @@ void skb_release_head_state(struct sk_buff *skb)
{
skb_dst_drop(skb);
if (skb->destructor) {
WARN_ON(in_hardirq());
DEBUG_NET_WARN_ON_ONCE(in_hardirq());
skb->destructor(skb);
}
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
@ -981,7 +982,7 @@ void napi_consume_skb(struct sk_buff *skb, int budget)
return;
}
lockdep_assert_in_softirq();
DEBUG_NET_WARN_ON_ONCE(!in_softirq());
if (!skb_unref(skb))
return;

View File

@ -2844,7 +2844,7 @@ void __release_sock(struct sock *sk)
do {
next = skb->next;
prefetch(next);
WARN_ON_ONCE(skb_dst_is_noref(skb));
DEBUG_NET_WARN_ON_ONCE(skb_dst_is_noref(skb));
skb_mark_not_on_list(skb);
sk_backlog_rcv(sk, skb);

View File

@ -196,13 +196,13 @@ void sk_stream_kill_queues(struct sock *sk)
__skb_queue_purge(&sk->sk_receive_queue);
/* Next, the write queue. */
WARN_ON(!skb_queue_empty(&sk->sk_write_queue));
WARN_ON_ONCE(!skb_queue_empty(&sk->sk_write_queue));
/* Account for returned memory. */
sk_mem_reclaim_final(sk);
WARN_ON(sk->sk_wmem_queued);
WARN_ON(sk->sk_forward_alloc);
WARN_ON_ONCE(sk->sk_wmem_queued);
WARN_ON_ONCE(sk->sk_forward_alloc);
/* It is _impossible_ for the backlog to contain anything
* when we get here. All user references to this socket

View File

@ -148,10 +148,10 @@ void inet_sock_destruct(struct sock *sk)
return;
}
WARN_ON(atomic_read(&sk->sk_rmem_alloc));
WARN_ON(refcount_read(&sk->sk_wmem_alloc));
WARN_ON(sk->sk_wmem_queued);
WARN_ON(sk_forward_alloc_get(sk));
WARN_ON_ONCE(atomic_read(&sk->sk_rmem_alloc));
WARN_ON_ONCE(refcount_read(&sk->sk_wmem_alloc));
WARN_ON_ONCE(sk->sk_wmem_queued);
WARN_ON_ONCE(sk_forward_alloc_get(sk));
kfree(rcu_dereference_protected(inet->inet_opt, 1));
dst_release(rcu_dereference_protected(sk->sk_dst_cache, 1));

View File

@ -302,7 +302,7 @@ static void __unix_remove_socket(struct sock *sk)
static void __unix_insert_socket(struct sock *sk)
{
WARN_ON(!sk_unhashed(sk));
DEBUG_NET_WARN_ON_ONCE(!sk_unhashed(sk));
sk_add_node(sk, &unix_socket_table[sk->sk_hash]);
}
@ -554,9 +554,9 @@ static void unix_sock_destructor(struct sock *sk)
u->oob_skb = NULL;
}
#endif
WARN_ON(refcount_read(&sk->sk_wmem_alloc));
WARN_ON(!sk_unhashed(sk));
WARN_ON(sk->sk_socket);
DEBUG_NET_WARN_ON_ONCE(refcount_read(&sk->sk_wmem_alloc));
DEBUG_NET_WARN_ON_ONCE(!sk_unhashed(sk));
DEBUG_NET_WARN_ON_ONCE(sk->sk_socket);
if (!sock_flag(sk, SOCK_DEAD)) {
pr_info("Attempt to release alive unix socket: %p\n", sk);
return;