[IPV4/IPV6]: Replace spin_lock_irq with spin_lock_bh
In light of my recent patch to net/ipv4/udp.c that replaced the spin_lock_irq calls on the receive queue lock with spin_lock_bh, here is a similar patch for all other occurences of spin_lock_irq on receive/error queue locks in IPv4 and IPv6. In these stacks, we know that they can only be entered from user or softirq context. Therefore it's safe to disable BH only. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
9ed19f339e
commit
e0f9f8586a
|
@ -360,14 +360,14 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len)
|
|||
err = copied;
|
||||
|
||||
/* Reset and regenerate socket error */
|
||||
spin_lock_irq(&sk->sk_error_queue.lock);
|
||||
spin_lock_bh(&sk->sk_error_queue.lock);
|
||||
sk->sk_err = 0;
|
||||
if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) {
|
||||
sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno;
|
||||
spin_unlock_irq(&sk->sk_error_queue.lock);
|
||||
spin_unlock_bh(&sk->sk_error_queue.lock);
|
||||
sk->sk_error_report(sk);
|
||||
} else
|
||||
spin_unlock_irq(&sk->sk_error_queue.lock);
|
||||
spin_unlock_bh(&sk->sk_error_queue.lock);
|
||||
|
||||
out_free_skb:
|
||||
kfree_skb(skb);
|
||||
|
|
|
@ -691,11 +691,11 @@ static int raw_ioctl(struct sock *sk, int cmd, unsigned long arg)
|
|||
struct sk_buff *skb;
|
||||
int amount = 0;
|
||||
|
||||
spin_lock_irq(&sk->sk_receive_queue.lock);
|
||||
spin_lock_bh(&sk->sk_receive_queue.lock);
|
||||
skb = skb_peek(&sk->sk_receive_queue);
|
||||
if (skb != NULL)
|
||||
amount = skb->len;
|
||||
spin_unlock_irq(&sk->sk_receive_queue.lock);
|
||||
spin_unlock_bh(&sk->sk_receive_queue.lock);
|
||||
return put_user(amount, (int __user *)arg);
|
||||
}
|
||||
|
||||
|
|
|
@ -353,14 +353,14 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
|
|||
err = copied;
|
||||
|
||||
/* Reset and regenerate socket error */
|
||||
spin_lock_irq(&sk->sk_error_queue.lock);
|
||||
spin_lock_bh(&sk->sk_error_queue.lock);
|
||||
sk->sk_err = 0;
|
||||
if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) {
|
||||
sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno;
|
||||
spin_unlock_irq(&sk->sk_error_queue.lock);
|
||||
spin_unlock_bh(&sk->sk_error_queue.lock);
|
||||
sk->sk_error_report(sk);
|
||||
} else {
|
||||
spin_unlock_irq(&sk->sk_error_queue.lock);
|
||||
spin_unlock_bh(&sk->sk_error_queue.lock);
|
||||
}
|
||||
|
||||
out_free_skb:
|
||||
|
|
|
@ -434,12 +434,12 @@ csum_copy_err:
|
|||
/* Clear queue. */
|
||||
if (flags&MSG_PEEK) {
|
||||
int clear = 0;
|
||||
spin_lock_irq(&sk->sk_receive_queue.lock);
|
||||
spin_lock_bh(&sk->sk_receive_queue.lock);
|
||||
if (skb == skb_peek(&sk->sk_receive_queue)) {
|
||||
__skb_unlink(skb, &sk->sk_receive_queue);
|
||||
clear = 1;
|
||||
}
|
||||
spin_unlock_irq(&sk->sk_receive_queue.lock);
|
||||
spin_unlock_bh(&sk->sk_receive_queue.lock);
|
||||
if (clear)
|
||||
kfree_skb(skb);
|
||||
}
|
||||
|
@ -971,11 +971,11 @@ static int rawv6_ioctl(struct sock *sk, int cmd, unsigned long arg)
|
|||
struct sk_buff *skb;
|
||||
int amount = 0;
|
||||
|
||||
spin_lock_irq(&sk->sk_receive_queue.lock);
|
||||
spin_lock_bh(&sk->sk_receive_queue.lock);
|
||||
skb = skb_peek(&sk->sk_receive_queue);
|
||||
if (skb != NULL)
|
||||
amount = skb->tail - skb->h.raw;
|
||||
spin_unlock_irq(&sk->sk_receive_queue.lock);
|
||||
spin_unlock_bh(&sk->sk_receive_queue.lock);
|
||||
return put_user(amount, (int __user *)arg);
|
||||
}
|
||||
|
||||
|
|
|
@ -300,12 +300,12 @@ csum_copy_err:
|
|||
/* Clear queue. */
|
||||
if (flags&MSG_PEEK) {
|
||||
int clear = 0;
|
||||
spin_lock_irq(&sk->sk_receive_queue.lock);
|
||||
spin_lock_bh(&sk->sk_receive_queue.lock);
|
||||
if (skb == skb_peek(&sk->sk_receive_queue)) {
|
||||
__skb_unlink(skb, &sk->sk_receive_queue);
|
||||
clear = 1;
|
||||
}
|
||||
spin_unlock_irq(&sk->sk_receive_queue.lock);
|
||||
spin_unlock_bh(&sk->sk_receive_queue.lock);
|
||||
if (clear)
|
||||
kfree_skb(skb);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue