ipv6: add priority parameter to ip6_xmit()
Currently, ip6_xmit() sets skb->priority based on sk->sk_priority This is not desirable for TCP since TCP shares the same ctl socket for a given netns. We want to be able to send RST or ACK packets with a non zero skb->priority. This patch has no functional change. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
2b6fd3ea43
commit
4f6570d720
|
@ -981,7 +981,7 @@ int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb);
|
|||
* upper-layer output functions
|
||||
*/
|
||||
int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
|
||||
__u32 mark, struct ipv6_txoptions *opt, int tclass);
|
||||
__u32 mark, struct ipv6_txoptions *opt, int tclass, u32 priority);
|
||||
|
||||
int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr);
|
||||
|
||||
|
|
|
@ -230,7 +230,8 @@ static int dccp_v6_send_response(const struct sock *sk, struct request_sock *req
|
|||
opt = ireq->ipv6_opt;
|
||||
if (!opt)
|
||||
opt = rcu_dereference(np->opt);
|
||||
err = ip6_xmit(sk, skb, &fl6, sk->sk_mark, opt, np->tclass);
|
||||
err = ip6_xmit(sk, skb, &fl6, sk->sk_mark, opt, np->tclass,
|
||||
sk->sk_priority);
|
||||
rcu_read_unlock();
|
||||
err = net_xmit_eval(err);
|
||||
}
|
||||
|
@ -284,7 +285,7 @@ static void dccp_v6_ctl_send_reset(const struct sock *sk, struct sk_buff *rxskb)
|
|||
dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
|
||||
if (!IS_ERR(dst)) {
|
||||
skb_dst_set(skb, dst);
|
||||
ip6_xmit(ctl_sk, skb, &fl6, 0, NULL, 0);
|
||||
ip6_xmit(ctl_sk, skb, &fl6, 0, NULL, 0, 0);
|
||||
DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
|
||||
DCCP_INC_STATS(DCCP_MIB_OUTRSTS);
|
||||
return;
|
||||
|
|
|
@ -133,7 +133,7 @@ int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl_unused
|
|||
fl6.daddr = sk->sk_v6_daddr;
|
||||
|
||||
res = ip6_xmit(sk, skb, &fl6, sk->sk_mark, rcu_dereference(np->opt),
|
||||
np->tclass);
|
||||
np->tclass, sk->sk_priority);
|
||||
rcu_read_unlock();
|
||||
return res;
|
||||
}
|
||||
|
|
|
@ -193,7 +193,7 @@ bool ip6_autoflowlabel(struct net *net, const struct ipv6_pinfo *np)
|
|||
* which are using proper atomic operations or spinlocks.
|
||||
*/
|
||||
int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
|
||||
__u32 mark, struct ipv6_txoptions *opt, int tclass)
|
||||
__u32 mark, struct ipv6_txoptions *opt, int tclass, u32 priority)
|
||||
{
|
||||
struct net *net = sock_net(sk);
|
||||
const struct ipv6_pinfo *np = inet6_sk(sk);
|
||||
|
@ -258,7 +258,7 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
|
|||
hdr->daddr = *first_hop;
|
||||
|
||||
skb->protocol = htons(ETH_P_IPV6);
|
||||
skb->priority = sk->sk_priority;
|
||||
skb->priority = priority;
|
||||
skb->mark = mark;
|
||||
|
||||
mtu = dst_mtu(dst);
|
||||
|
|
|
@ -512,7 +512,8 @@ static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
|
|||
opt = ireq->ipv6_opt;
|
||||
if (!opt)
|
||||
opt = rcu_dereference(np->opt);
|
||||
err = ip6_xmit(sk, skb, fl6, sk->sk_mark, opt, np->tclass);
|
||||
err = ip6_xmit(sk, skb, fl6, sk->sk_mark, opt, np->tclass,
|
||||
sk->sk_priority);
|
||||
rcu_read_unlock();
|
||||
err = net_xmit_eval(err);
|
||||
}
|
||||
|
@ -907,7 +908,8 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32
|
|||
dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
|
||||
if (!IS_ERR(dst)) {
|
||||
skb_dst_set(buff, dst);
|
||||
ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, tclass);
|
||||
ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, tclass,
|
||||
0);
|
||||
TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
|
||||
if (rst)
|
||||
TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
|
||||
|
|
|
@ -215,7 +215,7 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport)
|
|||
|
||||
rcu_read_lock();
|
||||
res = ip6_xmit(sk, skb, fl6, sk->sk_mark, rcu_dereference(np->opt),
|
||||
tclass);
|
||||
tclass, sk->sk_priority);
|
||||
rcu_read_unlock();
|
||||
return res;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue