netfilter: nf_conntrack_ipv6: fix inconsistent lock state in nf_ct_frag6_gather()
[ 63.531438] ================================= [ 63.531520] [ INFO: inconsistent lock state ] [ 63.531520] 2.6.26-rc4 #7 [ 63.531520] --------------------------------- [ 63.531520] inconsistent {softirq-on-W} -> {in-softirq-W} usage. [ 63.531520] tcpsic6/3864 [HC0[0]:SC1[1]:HE1:SE0] takes: [ 63.531520] (&q->lock#2){-+..}, at: [<c07175b0>] ipv6_frag_rcv+0xd0/0xbd0 [ 63.531520] {softirq-on-W} state was registered at: [ 63.531520] [<c0143bba>] __lock_acquire+0x3aa/0x1080 [ 63.531520] [<c0144906>] lock_acquire+0x76/0xa0 [ 63.531520] [<c07a8f0b>] _spin_lock+0x2b/0x40 [ 63.531520] [<c0727636>] nf_ct_frag6_gather+0x3f6/0x910 ... According to this and another similar lockdep report inet_fragment locks are taken from nf_ct_frag6_gather() with softirqs enabled, but these locks are mainly used in softirq context, so disabling BHs is necessary. Reported-and-tested-by: Eric Sesterhenn <snakebyte@gmx.de> Signed-off-by: Jarek Poplawski <jarkao2@gmail.com> Signed-off-by: Patrick McHardy <kaber@trash.net> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
d2ee3f2c4b
commit
b9c6989646
|
@ -209,7 +209,9 @@ fq_find(__be32 id, struct in6_addr *src, struct in6_addr *dst)
|
||||||
arg.dst = dst;
|
arg.dst = dst;
|
||||||
hash = ip6qhashfn(id, src, dst);
|
hash = ip6qhashfn(id, src, dst);
|
||||||
|
|
||||||
|
local_bh_disable();
|
||||||
q = inet_frag_find(&nf_init_frags, &nf_frags, &arg, hash);
|
q = inet_frag_find(&nf_init_frags, &nf_frags, &arg, hash);
|
||||||
|
local_bh_enable();
|
||||||
if (q == NULL)
|
if (q == NULL)
|
||||||
goto oom;
|
goto oom;
|
||||||
|
|
||||||
|
@ -638,10 +640,10 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb)
|
||||||
goto ret_orig;
|
goto ret_orig;
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock(&fq->q.lock);
|
spin_lock_bh(&fq->q.lock);
|
||||||
|
|
||||||
if (nf_ct_frag6_queue(fq, clone, fhdr, nhoff) < 0) {
|
if (nf_ct_frag6_queue(fq, clone, fhdr, nhoff) < 0) {
|
||||||
spin_unlock(&fq->q.lock);
|
spin_unlock_bh(&fq->q.lock);
|
||||||
pr_debug("Can't insert skb to queue\n");
|
pr_debug("Can't insert skb to queue\n");
|
||||||
fq_put(fq);
|
fq_put(fq);
|
||||||
goto ret_orig;
|
goto ret_orig;
|
||||||
|
@ -653,7 +655,7 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb)
|
||||||
if (ret_skb == NULL)
|
if (ret_skb == NULL)
|
||||||
pr_debug("Can't reassemble fragmented packets\n");
|
pr_debug("Can't reassemble fragmented packets\n");
|
||||||
}
|
}
|
||||||
spin_unlock(&fq->q.lock);
|
spin_unlock_bh(&fq->q.lock);
|
||||||
|
|
||||||
fq_put(fq);
|
fq_put(fq);
|
||||||
return ret_skb;
|
return ret_skb;
|
||||||
|
|
Loading…
Reference in New Issue