ipv4: irq safe sk_dst_[re]set() and ipv4_sk_update_pmtu() fix
We have two different ways to handle changes to sk->sk_dst First way (used by TCP) assumes socket lock is owned by caller, and use no extra lock : __sk_dst_set() & __sk_dst_reset() Another way (used by UDP) uses sk_dst_lock because socket lock is not always taken. Note that sk_dst_lock is not softirq safe. These ways are not inter changeable for a given socket type. ipv4_sk_update_pmtu(), added in linux-3.8, added a race, as it used the socket lock as synchronization, but users might be UDP sockets. Instead of converting sk_dst_lock to a softirq safe version, use xchg() as we did for sk_rx_dst in commite47eb5dfb2
("udp: ipv4: do not use sk_dst_lock from softirq context") In a follow up patch, we probably can remove sk_dst_lock, as it is only used in IPv6. Signed-off-by: Eric Dumazet <edumazet@google.com> Cc: Steffen Klassert <steffen.klassert@secunet.com> Fixes:9cb3a50c5f
("ipv4: Invalidate the socket cached route on pmtu events if possible") Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
dba63115ce
commit
7f50236153
|
@ -1768,9 +1768,11 @@ __sk_dst_set(struct sock *sk, struct dst_entry *dst)
|
|||
static inline void
|
||||
sk_dst_set(struct sock *sk, struct dst_entry *dst)
|
||||
{
|
||||
spin_lock(&sk->sk_dst_lock);
|
||||
__sk_dst_set(sk, dst);
|
||||
spin_unlock(&sk->sk_dst_lock);
|
||||
struct dst_entry *old_dst;
|
||||
|
||||
sk_tx_queue_clear(sk);
|
||||
old_dst = xchg(&sk->sk_dst_cache, dst);
|
||||
dst_release(old_dst);
|
||||
}
|
||||
|
||||
static inline void
|
||||
|
@ -1782,9 +1784,7 @@ __sk_dst_reset(struct sock *sk)
|
|||
static inline void
|
||||
sk_dst_reset(struct sock *sk)
|
||||
{
|
||||
spin_lock(&sk->sk_dst_lock);
|
||||
__sk_dst_reset(sk);
|
||||
spin_unlock(&sk->sk_dst_lock);
|
||||
sk_dst_set(sk, NULL);
|
||||
}
|
||||
|
||||
struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
|
||||
|
|
|
@ -1010,7 +1010,7 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
|
|||
const struct iphdr *iph = (const struct iphdr *) skb->data;
|
||||
struct flowi4 fl4;
|
||||
struct rtable *rt;
|
||||
struct dst_entry *dst;
|
||||
struct dst_entry *odst = NULL;
|
||||
bool new = false;
|
||||
|
||||
bh_lock_sock(sk);
|
||||
|
@ -1018,16 +1018,17 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
|
|||
if (!ip_sk_accept_pmtu(sk))
|
||||
goto out;
|
||||
|
||||
rt = (struct rtable *) __sk_dst_get(sk);
|
||||
odst = sk_dst_get(sk);
|
||||
|
||||
if (sock_owned_by_user(sk) || !rt) {
|
||||
if (sock_owned_by_user(sk) || !odst) {
|
||||
__ipv4_sk_update_pmtu(skb, sk, mtu);
|
||||
goto out;
|
||||
}
|
||||
|
||||
__build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
|
||||
|
||||
if (!__sk_dst_check(sk, 0)) {
|
||||
rt = (struct rtable *)odst;
|
||||
if (odst->obsolete && odst->ops->check(odst, 0) == NULL) {
|
||||
rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
|
||||
if (IS_ERR(rt))
|
||||
goto out;
|
||||
|
@ -1037,8 +1038,7 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
|
|||
|
||||
__ip_rt_update_pmtu((struct rtable *) rt->dst.path, &fl4, mtu);
|
||||
|
||||
dst = dst_check(&rt->dst, 0);
|
||||
if (!dst) {
|
||||
if (!dst_check(&rt->dst, 0)) {
|
||||
if (new)
|
||||
dst_release(&rt->dst);
|
||||
|
||||
|
@ -1050,10 +1050,11 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
|
|||
}
|
||||
|
||||
if (new)
|
||||
__sk_dst_set(sk, &rt->dst);
|
||||
sk_dst_set(sk, &rt->dst);
|
||||
|
||||
out:
|
||||
bh_unlock_sock(sk);
|
||||
dst_release(odst);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
|
||||
|
||||
|
|
Loading…
Reference in New Issue