[ATM]: fix possible recursive locking in skb_migrate()
ok this is a real potential deadlock in a way, it takes two locks of 2 skbuffs without doing any kind of lock ordering; I think the following patch should fix it. Just sort the lock taking order by address of the skb.. it's not pretty but it's the best this can do in a minimally invasive way. Signed-off-by: Arjan van de Ven <arjan@linux.intel.com> Signed-off-by: Chas Williams <chas@cmf.nrl.navy.mil> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
00181fc946
commit
1252ecf63f
|
@ -25,22 +25,27 @@
|
||||||
/*
|
/*
|
||||||
* skb_migrate appends the list at "from" to "to", emptying "from" in the
|
* skb_migrate appends the list at "from" to "to", emptying "from" in the
|
||||||
* process. skb_migrate is atomic with respect to all other skb operations on
|
* process. skb_migrate is atomic with respect to all other skb operations on
|
||||||
* "from" and "to". Note that it locks both lists at the same time, so beware
|
* "from" and "to". Note that it locks both lists at the same time, so to deal
|
||||||
* of potential deadlocks.
|
* with the lock ordering, the locks are taken in address order.
|
||||||
*
|
*
|
||||||
* This function should live in skbuff.c or skbuff.h.
|
* This function should live in skbuff.c or skbuff.h.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
|
||||||
void skb_migrate(struct sk_buff_head *from,struct sk_buff_head *to)
|
void skb_migrate(struct sk_buff_head *from, struct sk_buff_head *to)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct sk_buff *skb_from = (struct sk_buff *) from;
|
struct sk_buff *skb_from = (struct sk_buff *) from;
|
||||||
struct sk_buff *skb_to = (struct sk_buff *) to;
|
struct sk_buff *skb_to = (struct sk_buff *) to;
|
||||||
struct sk_buff *prev;
|
struct sk_buff *prev;
|
||||||
|
|
||||||
spin_lock_irqsave(&from->lock,flags);
|
if ((unsigned long) from < (unsigned long) to) {
|
||||||
spin_lock(&to->lock);
|
spin_lock_irqsave(&from->lock, flags);
|
||||||
|
spin_lock_nested(&to->lock, SINGLE_DEPTH_NESTING);
|
||||||
|
} else {
|
||||||
|
spin_lock_irqsave(&to->lock, flags);
|
||||||
|
spin_lock_nested(&from->lock, SINGLE_DEPTH_NESTING);
|
||||||
|
}
|
||||||
prev = from->prev;
|
prev = from->prev;
|
||||||
from->next->prev = to->prev;
|
from->next->prev = to->prev;
|
||||||
prev->next = skb_to;
|
prev->next = skb_to;
|
||||||
|
@ -51,7 +56,7 @@ void skb_migrate(struct sk_buff_head *from,struct sk_buff_head *to)
|
||||||
from->prev = skb_from;
|
from->prev = skb_from;
|
||||||
from->next = skb_from;
|
from->next = skb_from;
|
||||||
from->qlen = 0;
|
from->qlen = 0;
|
||||||
spin_unlock_irqrestore(&from->lock,flags);
|
spin_unlock_irqrestore(&from->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue