Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (56 commits) l2tp: Fix possible oops if transmitting or receiving when tunnel goes down tcp: Fix for race due to temporary drop of the socket lock in skb_splice_bits. tcp: Increment OUTRSTS in tcp_send_active_reset() raw: Raw socket leak. lt2p: Fix possible WARN_ON from socket code when UDP socket is closed USB ID for Philips CPWUA054/00 Wireless USB Adapter 11g ssb: Fix context assertion in ssb_pcicore_dev_irqvecs_enable libertas: fix command size for CMD_802_11_SUBSCRIBE_EVENT ipw2200: expire and use oldest BSS on adhoc create airo warning fix b43legacy: Fix controller restart crash sctp: Fix ECN markings for IPv6 sctp: Flush the queue only once during fast retransmit. sctp: Start T3-RTX timer when fast retransmitting lowest TSN sctp: Correctly implement Fast Recovery cwnd manipulations. sctp: Move sctp_v4_dst_saddr out of loop sctp: retran_path update bug fix tcp: fix skb vs fack_count out-of-sync condition sunhme: Cleanup use of deprecated calls to save_and_cli and restore_flags. xfrm: xfrm_algo: correct usage of RIPEMD-160 ...
This commit is contained in:
commit
3e387fcdc4
|
@ -1,6 +1,6 @@
|
|||
In order to use the Ethernet bridging functionality, you'll need the
|
||||
userspace tools. These programs and documentation are available
|
||||
at http://bridge.sourceforge.net. The download page is
|
||||
at http://www.linux-foundation.org/en/Net:Bridge. The download page is
|
||||
http://prdownloads.sourceforge.net/bridge.
|
||||
|
||||
If you still have questions, don't hesitate to post to the mailing list
|
||||
|
|
|
@ -1611,7 +1611,7 @@ ETHERNET BRIDGE
|
|||
P: Stephen Hemminger
|
||||
M: shemminger@linux-foundation.org
|
||||
L: bridge@lists.linux-foundation.org
|
||||
W: http://bridge.sourceforge.net/
|
||||
W: http://www.linux-foundation.org/en/Net:Bridge
|
||||
S: Maintained
|
||||
|
||||
ETHERTEAM 16I DRIVER
|
||||
|
|
|
@ -2023,6 +2023,7 @@ rrd_ok:
|
|||
/* Good Receive */
|
||||
pci_unmap_page(adapter->pdev, buffer_info->dma,
|
||||
buffer_info->length, PCI_DMA_FROMDEVICE);
|
||||
buffer_info->dma = 0;
|
||||
skb = buffer_info->skb;
|
||||
length = le16_to_cpu(rrd->xsz.xsum_sz.pkt_size);
|
||||
|
||||
|
|
|
@ -1394,7 +1394,11 @@ net_open(struct net_device *dev)
|
|||
#endif
|
||||
if (!result) {
|
||||
printk(KERN_ERR "%s: EEPROM is configured for unavailable media\n", dev->name);
|
||||
release_irq:
|
||||
release_dma:
|
||||
#if ALLOW_DMA
|
||||
free_dma(dev->dma);
|
||||
#endif
|
||||
release_irq:
|
||||
#if ALLOW_DMA
|
||||
release_dma_buff(lp);
|
||||
#endif
|
||||
|
@ -1442,12 +1446,12 @@ net_open(struct net_device *dev)
|
|||
if ((result = detect_bnc(dev)) != DETECTED_NONE)
|
||||
break;
|
||||
printk(KERN_ERR "%s: no media detected\n", dev->name);
|
||||
goto release_irq;
|
||||
goto release_dma;
|
||||
}
|
||||
switch(result) {
|
||||
case DETECTED_NONE:
|
||||
printk(KERN_ERR "%s: no network cable attached to configured media\n", dev->name);
|
||||
goto release_irq;
|
||||
goto release_dma;
|
||||
case DETECTED_RJ45H:
|
||||
printk(KERN_INFO "%s: using half-duplex 10Base-T (RJ-45)\n", dev->name);
|
||||
break;
|
||||
|
|
|
@ -75,7 +75,7 @@
|
|||
#include "myri10ge_mcp.h"
|
||||
#include "myri10ge_mcp_gen_header.h"
|
||||
|
||||
#define MYRI10GE_VERSION_STR "1.3.2-1.287"
|
||||
#define MYRI10GE_VERSION_STR "1.3.99-1.347"
|
||||
|
||||
MODULE_DESCRIPTION("Myricom 10G driver (10GbE)");
|
||||
MODULE_AUTHOR("Maintainer: help@myri.com");
|
||||
|
|
|
@ -240,12 +240,15 @@ static inline struct pppol2tp_session *pppol2tp_sock_to_session(struct sock *sk)
|
|||
if (sk == NULL)
|
||||
return NULL;
|
||||
|
||||
sock_hold(sk);
|
||||
session = (struct pppol2tp_session *)(sk->sk_user_data);
|
||||
if (session == NULL)
|
||||
return NULL;
|
||||
if (session == NULL) {
|
||||
sock_put(sk);
|
||||
goto out;
|
||||
}
|
||||
|
||||
BUG_ON(session->magic != L2TP_SESSION_MAGIC);
|
||||
|
||||
out:
|
||||
return session;
|
||||
}
|
||||
|
||||
|
@ -256,12 +259,15 @@ static inline struct pppol2tp_tunnel *pppol2tp_sock_to_tunnel(struct sock *sk)
|
|||
if (sk == NULL)
|
||||
return NULL;
|
||||
|
||||
sock_hold(sk);
|
||||
tunnel = (struct pppol2tp_tunnel *)(sk->sk_user_data);
|
||||
if (tunnel == NULL)
|
||||
return NULL;
|
||||
if (tunnel == NULL) {
|
||||
sock_put(sk);
|
||||
goto out;
|
||||
}
|
||||
|
||||
BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);
|
||||
|
||||
out:
|
||||
return tunnel;
|
||||
}
|
||||
|
||||
|
@ -716,12 +722,14 @@ discard:
|
|||
session->stats.rx_errors++;
|
||||
kfree_skb(skb);
|
||||
sock_put(session->sock);
|
||||
sock_put(sock);
|
||||
|
||||
return 0;
|
||||
|
||||
error:
|
||||
/* Put UDP header back */
|
||||
__skb_push(skb, sizeof(struct udphdr));
|
||||
sock_put(sock);
|
||||
|
||||
no_tunnel:
|
||||
return 1;
|
||||
|
@ -745,10 +753,13 @@ static int pppol2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
|
|||
"%s: received %d bytes\n", tunnel->name, skb->len);
|
||||
|
||||
if (pppol2tp_recv_core(sk, skb))
|
||||
goto pass_up;
|
||||
goto pass_up_put;
|
||||
|
||||
sock_put(sk);
|
||||
return 0;
|
||||
|
||||
pass_up_put:
|
||||
sock_put(sk);
|
||||
pass_up:
|
||||
return 1;
|
||||
}
|
||||
|
@ -858,7 +869,7 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh
|
|||
|
||||
tunnel = pppol2tp_sock_to_tunnel(session->tunnel_sock);
|
||||
if (tunnel == NULL)
|
||||
goto error;
|
||||
goto error_put_sess;
|
||||
|
||||
/* What header length is configured for this session? */
|
||||
hdr_len = pppol2tp_l2tp_header_len(session);
|
||||
|
@ -870,7 +881,7 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh
|
|||
sizeof(ppph) + total_len,
|
||||
0, GFP_KERNEL);
|
||||
if (!skb)
|
||||
goto error;
|
||||
goto error_put_sess_tun;
|
||||
|
||||
/* Reserve space for headers. */
|
||||
skb_reserve(skb, NET_SKB_PAD);
|
||||
|
@ -900,7 +911,7 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh
|
|||
error = memcpy_fromiovec(skb->data, m->msg_iov, total_len);
|
||||
if (error < 0) {
|
||||
kfree_skb(skb);
|
||||
goto error;
|
||||
goto error_put_sess_tun;
|
||||
}
|
||||
skb_put(skb, total_len);
|
||||
|
||||
|
@ -947,10 +958,33 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh
|
|||
session->stats.tx_errors++;
|
||||
}
|
||||
|
||||
return error;
|
||||
|
||||
error_put_sess_tun:
|
||||
sock_put(session->tunnel_sock);
|
||||
error_put_sess:
|
||||
sock_put(sk);
|
||||
error:
|
||||
return error;
|
||||
}
|
||||
|
||||
/* Automatically called when the skb is freed.
|
||||
*/
|
||||
static void pppol2tp_sock_wfree(struct sk_buff *skb)
|
||||
{
|
||||
sock_put(skb->sk);
|
||||
}
|
||||
|
||||
/* For data skbs that we transmit, we associate with the tunnel socket
|
||||
* but don't do accounting.
|
||||
*/
|
||||
static inline void pppol2tp_skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
|
||||
{
|
||||
sock_hold(sk);
|
||||
skb->sk = sk;
|
||||
skb->destructor = pppol2tp_sock_wfree;
|
||||
}
|
||||
|
||||
/* Transmit function called by generic PPP driver. Sends PPP frame
|
||||
* over PPPoL2TP socket.
|
||||
*
|
||||
|
@ -993,10 +1027,10 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
|
|||
|
||||
sk_tun = session->tunnel_sock;
|
||||
if (sk_tun == NULL)
|
||||
goto abort;
|
||||
goto abort_put_sess;
|
||||
tunnel = pppol2tp_sock_to_tunnel(sk_tun);
|
||||
if (tunnel == NULL)
|
||||
goto abort;
|
||||
goto abort_put_sess;
|
||||
|
||||
/* What header length is configured for this session? */
|
||||
hdr_len = pppol2tp_l2tp_header_len(session);
|
||||
|
@ -1009,7 +1043,7 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
|
|||
sizeof(struct udphdr) + hdr_len + sizeof(ppph);
|
||||
old_headroom = skb_headroom(skb);
|
||||
if (skb_cow_head(skb, headroom))
|
||||
goto abort;
|
||||
goto abort_put_sess_tun;
|
||||
|
||||
new_headroom = skb_headroom(skb);
|
||||
skb_orphan(skb);
|
||||
|
@ -1069,7 +1103,7 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
|
|||
/* Get routing info from the tunnel socket */
|
||||
dst_release(skb->dst);
|
||||
skb->dst = dst_clone(__sk_dst_get(sk_tun));
|
||||
skb->sk = sk_tun;
|
||||
pppol2tp_skb_set_owner_w(skb, sk_tun);
|
||||
|
||||
/* Queue the packet to IP for output */
|
||||
len = skb->len;
|
||||
|
@ -1086,8 +1120,14 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
|
|||
session->stats.tx_errors++;
|
||||
}
|
||||
|
||||
sock_put(sk_tun);
|
||||
sock_put(sk);
|
||||
return 1;
|
||||
|
||||
abort_put_sess_tun:
|
||||
sock_put(sk_tun);
|
||||
abort_put_sess:
|
||||
sock_put(sk);
|
||||
abort:
|
||||
/* Free the original skb */
|
||||
kfree_skb(skb);
|
||||
|
@ -1191,7 +1231,7 @@ static void pppol2tp_tunnel_destruct(struct sock *sk)
|
|||
{
|
||||
struct pppol2tp_tunnel *tunnel;
|
||||
|
||||
tunnel = pppol2tp_sock_to_tunnel(sk);
|
||||
tunnel = sk->sk_user_data;
|
||||
if (tunnel == NULL)
|
||||
goto end;
|
||||
|
||||
|
@ -1230,10 +1270,12 @@ static void pppol2tp_session_destruct(struct sock *sk)
|
|||
if (sk->sk_user_data != NULL) {
|
||||
struct pppol2tp_tunnel *tunnel;
|
||||
|
||||
session = pppol2tp_sock_to_session(sk);
|
||||
session = sk->sk_user_data;
|
||||
if (session == NULL)
|
||||
goto out;
|
||||
|
||||
BUG_ON(session->magic != L2TP_SESSION_MAGIC);
|
||||
|
||||
/* Don't use pppol2tp_sock_to_tunnel() here to
|
||||
* get the tunnel context because the tunnel
|
||||
* socket might have already been closed (its
|
||||
|
@ -1279,6 +1321,7 @@ out:
|
|||
static int pppol2tp_release(struct socket *sock)
|
||||
{
|
||||
struct sock *sk = sock->sk;
|
||||
struct pppol2tp_session *session;
|
||||
int error;
|
||||
|
||||
if (!sk)
|
||||
|
@ -1296,9 +1339,18 @@ static int pppol2tp_release(struct socket *sock)
|
|||
sock_orphan(sk);
|
||||
sock->sk = NULL;
|
||||
|
||||
session = pppol2tp_sock_to_session(sk);
|
||||
|
||||
/* Purge any queued data */
|
||||
skb_queue_purge(&sk->sk_receive_queue);
|
||||
skb_queue_purge(&sk->sk_write_queue);
|
||||
if (session != NULL) {
|
||||
struct sk_buff *skb;
|
||||
while ((skb = skb_dequeue(&session->reorder_q))) {
|
||||
kfree_skb(skb);
|
||||
sock_put(sk);
|
||||
}
|
||||
}
|
||||
|
||||
release_sock(sk);
|
||||
|
||||
|
@ -1601,7 +1653,7 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
|
|||
|
||||
error = ppp_register_channel(&po->chan);
|
||||
if (error)
|
||||
goto end;
|
||||
goto end_put_tun;
|
||||
|
||||
/* This is how we get the session context from the socket. */
|
||||
sk->sk_user_data = session;
|
||||
|
@ -1621,6 +1673,8 @@ out_no_ppp:
|
|||
PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
|
||||
"%s: created\n", session->name);
|
||||
|
||||
end_put_tun:
|
||||
sock_put(tunnel_sock);
|
||||
end:
|
||||
release_sock(sk);
|
||||
|
||||
|
@ -1668,6 +1722,7 @@ static int pppol2tp_getname(struct socket *sock, struct sockaddr *uaddr,
|
|||
*usockaddr_len = len;
|
||||
|
||||
error = 0;
|
||||
sock_put(sock->sk);
|
||||
|
||||
end:
|
||||
return error;
|
||||
|
@ -1906,14 +1961,17 @@ static int pppol2tp_ioctl(struct socket *sock, unsigned int cmd,
|
|||
err = -EBADF;
|
||||
tunnel = pppol2tp_sock_to_tunnel(session->tunnel_sock);
|
||||
if (tunnel == NULL)
|
||||
goto end;
|
||||
goto end_put_sess;
|
||||
|
||||
err = pppol2tp_tunnel_ioctl(tunnel, cmd, arg);
|
||||
goto end;
|
||||
sock_put(session->tunnel_sock);
|
||||
goto end_put_sess;
|
||||
}
|
||||
|
||||
err = pppol2tp_session_ioctl(session, cmd, arg);
|
||||
|
||||
end_put_sess:
|
||||
sock_put(sk);
|
||||
end:
|
||||
return err;
|
||||
}
|
||||
|
@ -2059,14 +2117,17 @@ static int pppol2tp_setsockopt(struct socket *sock, int level, int optname,
|
|||
err = -EBADF;
|
||||
tunnel = pppol2tp_sock_to_tunnel(session->tunnel_sock);
|
||||
if (tunnel == NULL)
|
||||
goto end;
|
||||
goto end_put_sess;
|
||||
|
||||
err = pppol2tp_tunnel_setsockopt(sk, tunnel, optname, val);
|
||||
sock_put(session->tunnel_sock);
|
||||
} else
|
||||
err = pppol2tp_session_setsockopt(sk, session, optname, val);
|
||||
|
||||
err = 0;
|
||||
|
||||
end_put_sess:
|
||||
sock_put(sk);
|
||||
end:
|
||||
return err;
|
||||
}
|
||||
|
@ -2181,20 +2242,24 @@ static int pppol2tp_getsockopt(struct socket *sock, int level,
|
|||
err = -EBADF;
|
||||
tunnel = pppol2tp_sock_to_tunnel(session->tunnel_sock);
|
||||
if (tunnel == NULL)
|
||||
goto end;
|
||||
goto end_put_sess;
|
||||
|
||||
err = pppol2tp_tunnel_getsockopt(sk, tunnel, optname, &val);
|
||||
sock_put(session->tunnel_sock);
|
||||
} else
|
||||
err = pppol2tp_session_getsockopt(sk, session, optname, &val);
|
||||
|
||||
err = -EFAULT;
|
||||
if (put_user(len, (int __user *) optlen))
|
||||
goto end;
|
||||
goto end_put_sess;
|
||||
|
||||
if (copy_to_user((void __user *) optval, &val, len))
|
||||
goto end;
|
||||
goto end_put_sess;
|
||||
|
||||
err = 0;
|
||||
|
||||
end_put_sess:
|
||||
sock_put(sk);
|
||||
end:
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -972,7 +972,7 @@ static int sc92031_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
skb_copy_and_csum_dev(skb, priv->tx_bufs + entry * TX_BUF_SIZE);
|
||||
|
||||
len = skb->len;
|
||||
if (unlikely(len < ETH_ZLEN)) {
|
||||
if (len < ETH_ZLEN) {
|
||||
memset(priv->tx_bufs + entry * TX_BUF_SIZE + len,
|
||||
0, ETH_ZLEN - len);
|
||||
len = ETH_ZLEN;
|
||||
|
|
|
@ -459,7 +459,7 @@ static int falcon_check_xaui_link_up(struct efx_nic *efx)
|
|||
tries--;
|
||||
}
|
||||
|
||||
EFX_ERR(efx, "Failed to bring XAUI link back up in %d tries!\n",
|
||||
EFX_LOG(efx, "Failed to bring XAUI link back up in %d tries!\n",
|
||||
max_tries);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -111,7 +111,7 @@ static __inline__ void tx_add_log(struct happy_meal *hp, unsigned int a, unsigne
|
|||
struct hme_tx_logent *tlp;
|
||||
unsigned long flags;
|
||||
|
||||
save_and_cli(flags);
|
||||
local_irq_save(flags);
|
||||
tlp = &tx_log[txlog_cur_entry];
|
||||
tlp->tstamp = (unsigned int)jiffies;
|
||||
tlp->tx_new = hp->tx_new;
|
||||
|
@ -119,7 +119,7 @@ static __inline__ void tx_add_log(struct happy_meal *hp, unsigned int a, unsigne
|
|||
tlp->action = a;
|
||||
tlp->status = s;
|
||||
txlog_cur_entry = (txlog_cur_entry + 1) & (TX_LOG_LEN - 1);
|
||||
restore_flags(flags);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
static __inline__ void tx_dump_log(void)
|
||||
{
|
||||
|
|
|
@ -1729,12 +1729,15 @@ static int tulip_suspend (struct pci_dev *pdev, pm_message_t state)
|
|||
if (!dev)
|
||||
return -EINVAL;
|
||||
|
||||
if (netif_running(dev))
|
||||
tulip_down(dev);
|
||||
if (!netif_running(dev))
|
||||
goto save_state;
|
||||
|
||||
tulip_down(dev);
|
||||
|
||||
netif_device_detach(dev);
|
||||
free_irq(dev->irq, dev);
|
||||
|
||||
save_state:
|
||||
pci_save_state(pdev);
|
||||
pci_disable_device(pdev);
|
||||
pci_set_power_state(pdev, pci_choose_state(pdev, state));
|
||||
|
@ -1754,6 +1757,9 @@ static int tulip_resume(struct pci_dev *pdev)
|
|||
pci_set_power_state(pdev, PCI_D0);
|
||||
pci_restore_state(pdev);
|
||||
|
||||
if (!netif_running(dev))
|
||||
return 0;
|
||||
|
||||
if ((retval = pci_enable_device(pdev))) {
|
||||
printk (KERN_ERR "tulip: pci_enable_device failed in resume\n");
|
||||
return retval;
|
||||
|
|
|
@ -73,6 +73,7 @@ static char tx_fw_stat_gstrings[][ETH_GSTRING_LEN] = {
|
|||
"tx-frames-ok",
|
||||
"tx-excessive-differ-frames",
|
||||
"tx-256-511-frames",
|
||||
"tx-512-1023-frames",
|
||||
"tx-1024-1518-frames",
|
||||
"tx-jumbo-frames",
|
||||
};
|
||||
|
@ -308,7 +309,7 @@ static void uec_get_strings(struct net_device *netdev, u32 stringset, u8 *buf)
|
|||
buf += UEC_TX_FW_STATS_LEN * ETH_GSTRING_LEN;
|
||||
}
|
||||
if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX)
|
||||
memcpy(buf, tx_fw_stat_gstrings, UEC_RX_FW_STATS_LEN *
|
||||
memcpy(buf, rx_fw_stat_gstrings, UEC_RX_FW_STATS_LEN *
|
||||
ETH_GSTRING_LEN);
|
||||
}
|
||||
|
||||
|
|
|
@ -47,6 +47,9 @@ struct virtnet_info
|
|||
/* Number of input buffers, and max we've ever had. */
|
||||
unsigned int num, max;
|
||||
|
||||
/* For cleaning up after transmission. */
|
||||
struct tasklet_struct tasklet;
|
||||
|
||||
/* Receive & send queues. */
|
||||
struct sk_buff_head recv;
|
||||
struct sk_buff_head send;
|
||||
|
@ -68,8 +71,13 @@ static void skb_xmit_done(struct virtqueue *svq)
|
|||
|
||||
/* Suppress further interrupts. */
|
||||
svq->vq_ops->disable_cb(svq);
|
||||
|
||||
/* We were waiting for more output buffers. */
|
||||
netif_wake_queue(vi->dev);
|
||||
|
||||
/* Make sure we re-xmit last_xmit_skb: if there are no more packets
|
||||
* queued, start_xmit won't be called. */
|
||||
tasklet_schedule(&vi->tasklet);
|
||||
}
|
||||
|
||||
static void receive_skb(struct net_device *dev, struct sk_buff *skb,
|
||||
|
@ -278,6 +286,18 @@ static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
|
|||
return vi->svq->vq_ops->add_buf(vi->svq, sg, num, 0, skb);
|
||||
}
|
||||
|
||||
static void xmit_tasklet(unsigned long data)
|
||||
{
|
||||
struct virtnet_info *vi = (void *)data;
|
||||
|
||||
netif_tx_lock_bh(vi->dev);
|
||||
if (vi->last_xmit_skb && xmit_skb(vi, vi->last_xmit_skb) == 0) {
|
||||
vi->svq->vq_ops->kick(vi->svq);
|
||||
vi->last_xmit_skb = NULL;
|
||||
}
|
||||
netif_tx_unlock_bh(vi->dev);
|
||||
}
|
||||
|
||||
static int start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
struct virtnet_info *vi = netdev_priv(dev);
|
||||
|
@ -287,21 +307,25 @@ again:
|
|||
free_old_xmit_skbs(vi);
|
||||
|
||||
/* If we has a buffer left over from last time, send it now. */
|
||||
if (vi->last_xmit_skb) {
|
||||
if (unlikely(vi->last_xmit_skb)) {
|
||||
if (xmit_skb(vi, vi->last_xmit_skb) != 0) {
|
||||
/* Drop this skb: we only queue one. */
|
||||
vi->dev->stats.tx_dropped++;
|
||||
kfree_skb(skb);
|
||||
skb = NULL;
|
||||
goto stop_queue;
|
||||
}
|
||||
vi->last_xmit_skb = NULL;
|
||||
}
|
||||
|
||||
/* Put new one in send queue and do transmit */
|
||||
__skb_queue_head(&vi->send, skb);
|
||||
if (xmit_skb(vi, skb) != 0) {
|
||||
vi->last_xmit_skb = skb;
|
||||
goto stop_queue;
|
||||
if (likely(skb)) {
|
||||
__skb_queue_head(&vi->send, skb);
|
||||
if (xmit_skb(vi, skb) != 0) {
|
||||
vi->last_xmit_skb = skb;
|
||||
skb = NULL;
|
||||
goto stop_queue;
|
||||
}
|
||||
}
|
||||
done:
|
||||
vi->svq->vq_ops->kick(vi->svq);
|
||||
|
@ -428,6 +452,8 @@ static int virtnet_probe(struct virtio_device *vdev)
|
|||
skb_queue_head_init(&vi->recv);
|
||||
skb_queue_head_init(&vi->send);
|
||||
|
||||
tasklet_init(&vi->tasklet, xmit_tasklet, (unsigned long)vi);
|
||||
|
||||
err = register_netdev(dev);
|
||||
if (err) {
|
||||
pr_debug("virtio_net: registering device failed\n");
|
||||
|
|
|
@ -2905,7 +2905,7 @@ EXPORT_SYMBOL(init_airo_card);
|
|||
|
||||
static int waitbusy (struct airo_info *ai) {
|
||||
int delay = 0;
|
||||
while ((IN4500 (ai, COMMAND) & COMMAND_BUSY) && (delay < 10000)) {
|
||||
while ((IN4500(ai, COMMAND) & COMMAND_BUSY) && (delay < 10000)) {
|
||||
udelay (10);
|
||||
if ((++delay % 20) == 0)
|
||||
OUT4500(ai, EVACK, EV_CLEARCOMMANDBUSY);
|
||||
|
|
|
@ -3039,7 +3039,6 @@ static void b43legacy_set_pretbtt(struct b43legacy_wldev *dev)
|
|||
/* Locking: wl->mutex */
|
||||
static void b43legacy_wireless_core_exit(struct b43legacy_wldev *dev)
|
||||
{
|
||||
struct b43legacy_wl *wl = dev->wl;
|
||||
struct b43legacy_phy *phy = &dev->phy;
|
||||
u32 macctl;
|
||||
|
||||
|
@ -3054,12 +3053,6 @@ static void b43legacy_wireless_core_exit(struct b43legacy_wldev *dev)
|
|||
macctl |= B43legacy_MACCTL_PSM_JMP0;
|
||||
b43legacy_write32(dev, B43legacy_MMIO_MACCTL, macctl);
|
||||
|
||||
mutex_unlock(&wl->mutex);
|
||||
/* Must unlock as it would otherwise deadlock. No races here.
|
||||
* Cancel possibly pending workqueues. */
|
||||
cancel_work_sync(&dev->restart_work);
|
||||
mutex_lock(&wl->mutex);
|
||||
|
||||
b43legacy_leds_exit(dev);
|
||||
b43legacy_rng_exit(dev->wl);
|
||||
b43legacy_pio_free(dev);
|
||||
|
@ -3486,6 +3479,8 @@ static void b43legacy_chip_reset(struct work_struct *work)
|
|||
}
|
||||
}
|
||||
out:
|
||||
if (err)
|
||||
wl->current_dev = NULL; /* Failed to init the dev. */
|
||||
mutex_unlock(&wl->mutex);
|
||||
if (err)
|
||||
b43legacyerr(wl, "Controller restart FAILED\n");
|
||||
|
@ -3618,9 +3613,11 @@ static void b43legacy_one_core_detach(struct ssb_device *dev)
|
|||
struct b43legacy_wldev *wldev;
|
||||
struct b43legacy_wl *wl;
|
||||
|
||||
/* Do not cancel ieee80211-workqueue based work here.
|
||||
* See comment in b43legacy_remove(). */
|
||||
|
||||
wldev = ssb_get_drvdata(dev);
|
||||
wl = wldev->wl;
|
||||
cancel_work_sync(&wldev->restart_work);
|
||||
b43legacy_debugfs_remove_device(wldev);
|
||||
b43legacy_wireless_core_detach(wldev);
|
||||
list_del(&wldev->list);
|
||||
|
@ -3789,6 +3786,10 @@ static void b43legacy_remove(struct ssb_device *dev)
|
|||
struct b43legacy_wl *wl = ssb_get_devtypedata(dev);
|
||||
struct b43legacy_wldev *wldev = ssb_get_drvdata(dev);
|
||||
|
||||
/* We must cancel any work here before unregistering from ieee80211,
|
||||
* as the ieee80211 unreg will destroy the workqueue. */
|
||||
cancel_work_sync(&wldev->restart_work);
|
||||
|
||||
B43legacy_WARN_ON(!wl);
|
||||
if (wl->current_dev == wldev)
|
||||
ieee80211_unregister_hw(wl->hw);
|
||||
|
|
|
@ -7558,8 +7558,31 @@ static int ipw_associate(void *data)
|
|||
priv->ieee->iw_mode == IW_MODE_ADHOC &&
|
||||
priv->config & CFG_ADHOC_CREATE &&
|
||||
priv->config & CFG_STATIC_ESSID &&
|
||||
priv->config & CFG_STATIC_CHANNEL &&
|
||||
!list_empty(&priv->ieee->network_free_list)) {
|
||||
priv->config & CFG_STATIC_CHANNEL) {
|
||||
/* Use oldest network if the free list is empty */
|
||||
if (list_empty(&priv->ieee->network_free_list)) {
|
||||
struct ieee80211_network *oldest = NULL;
|
||||
struct ieee80211_network *target;
|
||||
DECLARE_MAC_BUF(mac);
|
||||
|
||||
list_for_each_entry(target, &priv->ieee->network_list, list) {
|
||||
if ((oldest == NULL) ||
|
||||
(target->last_scanned < oldest->last_scanned))
|
||||
oldest = target;
|
||||
}
|
||||
|
||||
/* If there are no more slots, expire the oldest */
|
||||
list_del(&oldest->list);
|
||||
target = oldest;
|
||||
IPW_DEBUG_ASSOC("Expired '%s' (%s) from "
|
||||
"network list.\n",
|
||||
escape_essid(target->ssid,
|
||||
target->ssid_len),
|
||||
print_mac(mac, target->bssid));
|
||||
list_add_tail(&target->list,
|
||||
&priv->ieee->network_free_list);
|
||||
}
|
||||
|
||||
element = priv->ieee->network_free_list.next;
|
||||
network = list_entry(element, struct ieee80211_network, list);
|
||||
ipw_adhoc_create(priv, network);
|
||||
|
|
|
@ -312,8 +312,8 @@ static ssize_t lbs_threshold_write(uint16_t tlv_type, uint16_t event_mask,
|
|||
if (tlv_type != TLV_TYPE_BCNMISS)
|
||||
tlv->freq = freq;
|
||||
|
||||
/* The command header, the event mask, and the one TLV */
|
||||
events->hdr.size = cpu_to_le16(sizeof(events->hdr) + 2 + sizeof(*tlv));
|
||||
/* The command header, the action, the event mask, and one TLV */
|
||||
events->hdr.size = cpu_to_le16(sizeof(events->hdr) + 4 + sizeof(*tlv));
|
||||
|
||||
ret = lbs_cmd_with_response(priv, CMD_802_11_SUBSCRIBE_EVENT, events);
|
||||
|
||||
|
|
|
@ -49,6 +49,7 @@ static struct usb_device_id p54u_table[] __devinitdata = {
|
|||
{USB_DEVICE(0x5041, 0x2235)}, /* Linksys WUSB54G Portable */
|
||||
|
||||
/* Version 2 devices (3887) */
|
||||
{USB_DEVICE(0x0471, 0x1230)}, /* Philips CPWUA054/00 */
|
||||
{USB_DEVICE(0x050d, 0x7050)}, /* Belkin F5D7050 ver 1000 */
|
||||
{USB_DEVICE(0x0572, 0x2000)}, /* Cohiba Proto board */
|
||||
{USB_DEVICE(0x0572, 0x2002)}, /* Cohiba Proto board */
|
||||
|
|
|
@ -537,12 +537,12 @@ int ssb_pcicore_dev_irqvecs_enable(struct ssb_pcicore *pc,
|
|||
int err = 0;
|
||||
u32 tmp;
|
||||
|
||||
might_sleep();
|
||||
|
||||
if (!pdev)
|
||||
goto out;
|
||||
bus = pdev->bus;
|
||||
|
||||
might_sleep_if(pdev->id.coreid != SSB_DEV_PCI);
|
||||
|
||||
/* Enable interrupts for this device. */
|
||||
if (bus->host_pci &&
|
||||
((pdev->id.revision >= 6) || (pdev->id.coreid == SSB_DEV_PCIE))) {
|
||||
|
|
|
@ -10,19 +10,19 @@
|
|||
#define RTCF_NOPMTUDISC RTM_F_NOPMTUDISC
|
||||
|
||||
#define RTCF_NOTIFY 0x00010000
|
||||
#define RTCF_DIRECTDST 0x00020000
|
||||
#define RTCF_DIRECTDST 0x00020000 /* unused */
|
||||
#define RTCF_REDIRECTED 0x00040000
|
||||
#define RTCF_TPROXY 0x00080000
|
||||
#define RTCF_TPROXY 0x00080000 /* unused */
|
||||
|
||||
#define RTCF_FAST 0x00200000
|
||||
#define RTCF_MASQ 0x00400000
|
||||
#define RTCF_SNAT 0x00800000
|
||||
#define RTCF_FAST 0x00200000 /* unused */
|
||||
#define RTCF_MASQ 0x00400000 /* unused */
|
||||
#define RTCF_SNAT 0x00800000 /* unused */
|
||||
#define RTCF_DOREDIRECT 0x01000000
|
||||
#define RTCF_DIRECTSRC 0x04000000
|
||||
#define RTCF_DNAT 0x08000000
|
||||
#define RTCF_BROADCAST 0x10000000
|
||||
#define RTCF_MULTICAST 0x20000000
|
||||
#define RTCF_REJECT 0x40000000
|
||||
#define RTCF_REJECT 0x40000000 /* unused */
|
||||
#define RTCF_LOCAL 0x80000000
|
||||
|
||||
#define RTCF_NAT (RTCF_DNAT|RTCF_SNAT)
|
||||
|
|
|
@ -117,7 +117,6 @@ struct in_ifaddr
|
|||
__be32 ifa_address;
|
||||
__be32 ifa_mask;
|
||||
__be32 ifa_broadcast;
|
||||
__be32 ifa_anycast;
|
||||
unsigned char ifa_scope;
|
||||
unsigned char ifa_flags;
|
||||
unsigned char ifa_prefixlen;
|
||||
|
|
|
@ -267,10 +267,10 @@ enum rtattr_type_t
|
|||
RTA_PREFSRC,
|
||||
RTA_METRICS,
|
||||
RTA_MULTIPATH,
|
||||
RTA_PROTOINFO,
|
||||
RTA_PROTOINFO, /* no longer used */
|
||||
RTA_FLOW,
|
||||
RTA_CACHEINFO,
|
||||
RTA_SESSION,
|
||||
RTA_SESSION, /* no longer used */
|
||||
RTA_MP_ALGO, /* no longer used */
|
||||
RTA_TABLE,
|
||||
__RTA_MAX
|
||||
|
|
|
@ -94,6 +94,28 @@ extern void addrconf_join_solict(struct net_device *dev,
|
|||
extern void addrconf_leave_solict(struct inet6_dev *idev,
|
||||
struct in6_addr *addr);
|
||||
|
||||
static inline unsigned long addrconf_timeout_fixup(u32 timeout,
|
||||
unsigned unit)
|
||||
{
|
||||
if (timeout == 0xffffffff)
|
||||
return ~0UL;
|
||||
|
||||
/*
|
||||
* Avoid arithmetic overflow.
|
||||
* Assuming unit is constant and non-zero, this "if" statement
|
||||
* will go away on 64bit archs.
|
||||
*/
|
||||
if (0xfffffffe > LONG_MAX / unit && timeout > LONG_MAX / unit)
|
||||
return LONG_MAX / unit;
|
||||
|
||||
return timeout;
|
||||
}
|
||||
|
||||
static inline int addrconf_finite_timeout(unsigned long timeout)
|
||||
{
|
||||
return ~timeout;
|
||||
}
|
||||
|
||||
/*
|
||||
* IPv6 Address Label subsystem (addrlabel.c)
|
||||
*/
|
||||
|
|
|
@ -162,9 +162,9 @@ static inline int genlmsg_end(struct sk_buff *skb, void *hdr)
|
|||
* @skb: socket buffer the message is stored in
|
||||
* @hdr: generic netlink message header
|
||||
*/
|
||||
static inline int genlmsg_cancel(struct sk_buff *skb, void *hdr)
|
||||
static inline void genlmsg_cancel(struct sk_buff *skb, void *hdr)
|
||||
{
|
||||
return nlmsg_cancel(skb, hdr - GENL_HDRLEN - NLMSG_HDRLEN);
|
||||
nlmsg_cancel(skb, hdr - GENL_HDRLEN - NLMSG_HDRLEN);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -556,14 +556,12 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
|
|||
* @skb: socket buffer the message is stored in
|
||||
* @mark: mark to trim to
|
||||
*
|
||||
* Trims the message to the provided mark. Returns -1.
|
||||
* Trims the message to the provided mark.
|
||||
*/
|
||||
static inline int nlmsg_trim(struct sk_buff *skb, const void *mark)
|
||||
static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
|
||||
{
|
||||
if (mark)
|
||||
skb_trim(skb, (unsigned char *) mark - skb->data);
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -572,11 +570,11 @@ static inline int nlmsg_trim(struct sk_buff *skb, const void *mark)
|
|||
* @nlh: netlink message header
|
||||
*
|
||||
* Removes the complete netlink message including all
|
||||
* attributes from the socket buffer again. Returns -1.
|
||||
* attributes from the socket buffer again.
|
||||
*/
|
||||
static inline int nlmsg_cancel(struct sk_buff *skb, struct nlmsghdr *nlh)
|
||||
static inline void nlmsg_cancel(struct sk_buff *skb, struct nlmsghdr *nlh)
|
||||
{
|
||||
return nlmsg_trim(skb, nlh);
|
||||
nlmsg_trim(skb, nlh);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -775,7 +773,7 @@ static inline int __nla_parse_nested_compat(struct nlattr *tb[], int maxtype,
|
|||
int nested_len = nla_len(nla) - NLA_ALIGN(len);
|
||||
|
||||
if (nested_len < 0)
|
||||
return -1;
|
||||
return -EINVAL;
|
||||
if (nested_len >= nla_attr_size(0))
|
||||
return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len),
|
||||
nested_len, policy);
|
||||
|
@ -1080,11 +1078,11 @@ static inline int nla_nest_compat_end(struct sk_buff *skb, struct nlattr *start)
|
|||
* @start: container attribute
|
||||
*
|
||||
* Removes the container attribute and including all nested
|
||||
* attributes. Returns -1.
|
||||
* attributes. Returns -EMSGSIZE
|
||||
*/
|
||||
static inline int nla_nest_cancel(struct sk_buff *skb, struct nlattr *start)
|
||||
static inline void nla_nest_cancel(struct sk_buff *skb, struct nlattr *start)
|
||||
{
|
||||
return nlmsg_trim(skb, start);
|
||||
nlmsg_trim(skb, start);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -548,7 +548,8 @@ struct sctp_af {
|
|||
struct dst_entry *(*get_dst) (struct sctp_association *asoc,
|
||||
union sctp_addr *daddr,
|
||||
union sctp_addr *saddr);
|
||||
void (*get_saddr) (struct sctp_association *asoc,
|
||||
void (*get_saddr) (struct sctp_sock *sk,
|
||||
struct sctp_association *asoc,
|
||||
struct dst_entry *dst,
|
||||
union sctp_addr *daddr,
|
||||
union sctp_addr *saddr);
|
||||
|
@ -587,6 +588,7 @@ struct sctp_af {
|
|||
int (*is_ce) (const struct sk_buff *sk);
|
||||
void (*seq_dump_addr)(struct seq_file *seq,
|
||||
union sctp_addr *addr);
|
||||
void (*ecn_capable)(struct sock *sk);
|
||||
__u16 net_header_len;
|
||||
int sockaddr_len;
|
||||
sa_family_t sa_family;
|
||||
|
@ -901,7 +903,10 @@ struct sctp_transport {
|
|||
* calculation completes (i.e. the DATA chunk
|
||||
* is SACK'd) clear this flag.
|
||||
*/
|
||||
int rto_pending;
|
||||
__u8 rto_pending;
|
||||
|
||||
/* Flag to track the current fast recovery state */
|
||||
__u8 fast_recovery;
|
||||
|
||||
/*
|
||||
* These are the congestion stats.
|
||||
|
@ -920,6 +925,9 @@ struct sctp_transport {
|
|||
/* Data that has been sent, but not acknowledged. */
|
||||
__u32 flight_size;
|
||||
|
||||
/* TSN marking the fast recovery exit point */
|
||||
__u32 fast_recovery_exit;
|
||||
|
||||
/* Destination */
|
||||
struct dst_entry *dst;
|
||||
/* Source address. */
|
||||
|
@ -1044,7 +1052,7 @@ void sctp_transport_route(struct sctp_transport *, union sctp_addr *,
|
|||
struct sctp_sock *);
|
||||
void sctp_transport_pmtu(struct sctp_transport *);
|
||||
void sctp_transport_free(struct sctp_transport *);
|
||||
void sctp_transport_reset_timers(struct sctp_transport *);
|
||||
void sctp_transport_reset_timers(struct sctp_transport *, int);
|
||||
void sctp_transport_hold(struct sctp_transport *);
|
||||
void sctp_transport_put(struct sctp_transport *);
|
||||
void sctp_transport_update_rto(struct sctp_transport *, __u32);
|
||||
|
@ -1134,6 +1142,9 @@ struct sctp_outq {
|
|||
/* How many unackd bytes do we have in-flight? */
|
||||
__u32 outstanding_bytes;
|
||||
|
||||
/* Are we doing fast-rtx on this queue */
|
||||
char fast_rtx;
|
||||
|
||||
/* Corked? */
|
||||
char cork;
|
||||
|
||||
|
|
|
@ -40,7 +40,8 @@ extern int datagram_recv_ctl(struct sock *sk,
|
|||
struct msghdr *msg,
|
||||
struct sk_buff *skb);
|
||||
|
||||
extern int datagram_send_ctl(struct msghdr *msg,
|
||||
extern int datagram_send_ctl(struct net *net,
|
||||
struct msghdr *msg,
|
||||
struct flowi *fl,
|
||||
struct ipv6_txoptions *opt,
|
||||
int *hlimit, int *tclass);
|
||||
|
|
|
@ -135,6 +135,7 @@ extern void udp_err(struct sk_buff *, u32);
|
|||
|
||||
extern int udp_sendmsg(struct kiocb *iocb, struct sock *sk,
|
||||
struct msghdr *msg, size_t len);
|
||||
extern void udp_flush_pending_frames(struct sock *sk);
|
||||
|
||||
extern int udp_rcv(struct sk_buff *skb);
|
||||
extern int udp_ioctl(struct sock *sk, int cmd, unsigned long arg);
|
||||
|
|
|
@ -64,20 +64,15 @@ void ax25_frames_acked(ax25_cb *ax25, unsigned short nr)
|
|||
|
||||
void ax25_requeue_frames(ax25_cb *ax25)
|
||||
{
|
||||
struct sk_buff *skb, *skb_prev = NULL;
|
||||
struct sk_buff *skb;
|
||||
|
||||
/*
|
||||
* Requeue all the un-ack-ed frames on the output queue to be picked
|
||||
* up by ax25_kick called from the timer. This arrangement handles the
|
||||
* possibility of an empty output queue.
|
||||
*/
|
||||
while ((skb = skb_dequeue(&ax25->ack_queue)) != NULL) {
|
||||
if (skb_prev == NULL)
|
||||
skb_queue_head(&ax25->write_queue, skb);
|
||||
else
|
||||
skb_append(skb_prev, skb, &ax25->write_queue);
|
||||
skb_prev = skb;
|
||||
}
|
||||
while ((skb = skb_dequeue_tail(&ax25->ack_queue)) != NULL)
|
||||
skb_queue_head(&ax25->write_queue, skb);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -566,11 +566,22 @@ static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err)
|
|||
if (dlc->state == BT_CLOSED) {
|
||||
if (!dev->tty) {
|
||||
if (test_bit(RFCOMM_RELEASE_ONHUP, &dev->flags)) {
|
||||
if (rfcomm_dev_get(dev->id) == NULL)
|
||||
/* Drop DLC lock here to avoid deadlock
|
||||
* 1. rfcomm_dev_get will take rfcomm_dev_lock
|
||||
* but in rfcomm_dev_add there's lock order:
|
||||
* rfcomm_dev_lock -> dlc lock
|
||||
* 2. rfcomm_dev_put will deadlock if it's
|
||||
* the last reference
|
||||
*/
|
||||
rfcomm_dlc_unlock(dlc);
|
||||
if (rfcomm_dev_get(dev->id) == NULL) {
|
||||
rfcomm_dlc_lock(dlc);
|
||||
return;
|
||||
}
|
||||
|
||||
rfcomm_dev_del(dev);
|
||||
rfcomm_dev_put(dev);
|
||||
rfcomm_dlc_lock(dlc);
|
||||
}
|
||||
} else
|
||||
tty_hangup(dev->tty);
|
||||
|
|
|
@ -1714,7 +1714,8 @@ static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
|
|||
return nla_nest_end(skb, nest);
|
||||
|
||||
nla_put_failure:
|
||||
return nla_nest_cancel(skb, nest);
|
||||
nla_nest_cancel(skb, nest);
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
|
||||
static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
|
||||
|
@ -2057,9 +2058,9 @@ static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
|
|||
goto nla_put_failure;
|
||||
}
|
||||
|
||||
ci.ndm_used = now - neigh->used;
|
||||
ci.ndm_confirmed = now - neigh->confirmed;
|
||||
ci.ndm_updated = now - neigh->updated;
|
||||
ci.ndm_used = jiffies_to_clock_t(now - neigh->used);
|
||||
ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed);
|
||||
ci.ndm_updated = jiffies_to_clock_t(now - neigh->updated);
|
||||
ci.ndm_refcnt = atomic_read(&neigh->refcnt) - 1;
|
||||
read_unlock_bh(&neigh->lock);
|
||||
|
||||
|
|
|
@ -498,7 +498,8 @@ int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics)
|
|||
return nla_nest_end(skb, mx);
|
||||
|
||||
nla_put_failure:
|
||||
return nla_nest_cancel(skb, mx);
|
||||
nla_nest_cancel(skb, mx);
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
|
||||
int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id,
|
||||
|
|
|
@ -1445,6 +1445,7 @@ done:
|
|||
|
||||
if (spd.nr_pages) {
|
||||
int ret;
|
||||
struct sock *sk = __skb->sk;
|
||||
|
||||
/*
|
||||
* Drop the socket lock, otherwise we have reverse
|
||||
|
@ -1455,9 +1456,9 @@ done:
|
|||
* we call into ->sendpage() with the i_mutex lock held
|
||||
* and networking will grab the socket lock.
|
||||
*/
|
||||
release_sock(__skb->sk);
|
||||
release_sock(sk);
|
||||
ret = splice_to_pipe(pipe, &spd);
|
||||
lock_sock(__skb->sk);
|
||||
lock_sock(sk);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -75,7 +75,7 @@ int dma_skb_copy_datagram_iovec(struct dma_chan *chan,
|
|||
|
||||
end = start + skb_shinfo(skb)->frags[i].size;
|
||||
copy = end - offset;
|
||||
if ((copy = end - offset) > 0) {
|
||||
if (copy > 0) {
|
||||
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
|
||||
struct page *page = frag->page;
|
||||
|
||||
|
|
|
@ -90,7 +90,6 @@ static const struct nla_policy ifa_ipv4_policy[IFA_MAX+1] = {
|
|||
[IFA_LOCAL] = { .type = NLA_U32 },
|
||||
[IFA_ADDRESS] = { .type = NLA_U32 },
|
||||
[IFA_BROADCAST] = { .type = NLA_U32 },
|
||||
[IFA_ANYCAST] = { .type = NLA_U32 },
|
||||
[IFA_LABEL] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 },
|
||||
};
|
||||
|
||||
|
@ -536,9 +535,6 @@ static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh)
|
|||
if (tb[IFA_BROADCAST])
|
||||
ifa->ifa_broadcast = nla_get_be32(tb[IFA_BROADCAST]);
|
||||
|
||||
if (tb[IFA_ANYCAST])
|
||||
ifa->ifa_anycast = nla_get_be32(tb[IFA_ANYCAST]);
|
||||
|
||||
if (tb[IFA_LABEL])
|
||||
nla_strlcpy(ifa->ifa_label, tb[IFA_LABEL], IFNAMSIZ);
|
||||
else
|
||||
|
@ -745,7 +741,6 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
|
|||
break;
|
||||
inet_del_ifa(in_dev, ifap, 0);
|
||||
ifa->ifa_broadcast = 0;
|
||||
ifa->ifa_anycast = 0;
|
||||
ifa->ifa_scope = 0;
|
||||
}
|
||||
|
||||
|
@ -1113,7 +1108,6 @@ static inline size_t inet_nlmsg_size(void)
|
|||
+ nla_total_size(4) /* IFA_ADDRESS */
|
||||
+ nla_total_size(4) /* IFA_LOCAL */
|
||||
+ nla_total_size(4) /* IFA_BROADCAST */
|
||||
+ nla_total_size(4) /* IFA_ANYCAST */
|
||||
+ nla_total_size(IFNAMSIZ); /* IFA_LABEL */
|
||||
}
|
||||
|
||||
|
@ -1143,9 +1137,6 @@ static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa,
|
|||
if (ifa->ifa_broadcast)
|
||||
NLA_PUT_BE32(skb, IFA_BROADCAST, ifa->ifa_broadcast);
|
||||
|
||||
if (ifa->ifa_anycast)
|
||||
NLA_PUT_BE32(skb, IFA_ANYCAST, ifa->ifa_anycast);
|
||||
|
||||
if (ifa->ifa_label[0])
|
||||
NLA_PUT_STRING(skb, IFA_LABEL, ifa->ifa_label);
|
||||
|
||||
|
|
|
@ -506,7 +506,6 @@ const struct nla_policy rtm_ipv4_policy[RTA_MAX+1] = {
|
|||
[RTA_PREFSRC] = { .type = NLA_U32 },
|
||||
[RTA_METRICS] = { .type = NLA_NESTED },
|
||||
[RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) },
|
||||
[RTA_PROTOINFO] = { .type = NLA_U32 },
|
||||
[RTA_FLOW] = { .type = NLA_U32 },
|
||||
};
|
||||
|
||||
|
|
|
@ -608,6 +608,14 @@ static void raw_close(struct sock *sk, long timeout)
|
|||
sk_common_release(sk);
|
||||
}
|
||||
|
||||
static int raw_destroy(struct sock *sk)
|
||||
{
|
||||
lock_sock(sk);
|
||||
ip_flush_pending_frames(sk);
|
||||
release_sock(sk);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* This gets rid of all the nasties in af_inet. -DaveM */
|
||||
static int raw_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
|
||||
{
|
||||
|
@ -820,6 +828,7 @@ struct proto raw_prot = {
|
|||
.name = "RAW",
|
||||
.owner = THIS_MODULE,
|
||||
.close = raw_close,
|
||||
.destroy = raw_destroy,
|
||||
.connect = ip4_datagram_connect,
|
||||
.disconnect = udp_disconnect,
|
||||
.ioctl = raw_ioctl,
|
||||
|
|
|
@ -1792,7 +1792,7 @@ static int __mkroute_input(struct sk_buff *skb,
|
|||
if (err)
|
||||
flags |= RTCF_DIRECTSRC;
|
||||
|
||||
if (out_dev == in_dev && err && !(flags & RTCF_MASQ) &&
|
||||
if (out_dev == in_dev && err &&
|
||||
(IN_DEV_SHARED_MEDIA(out_dev) ||
|
||||
inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res))))
|
||||
flags |= RTCF_DOREDIRECT;
|
||||
|
|
|
@ -1227,7 +1227,14 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
|
|||
copied += used;
|
||||
offset += used;
|
||||
}
|
||||
if (offset != skb->len)
|
||||
/*
|
||||
* If recv_actor drops the lock (e.g. TCP splice
|
||||
* receive) the skb pointer might be invalid when
|
||||
* getting here: tcp_collapse might have deleted it
|
||||
* while aggregating skbs from the socket queue.
|
||||
*/
|
||||
skb = tcp_recv_skb(sk, seq-1, &offset);
|
||||
if (!skb || (offset+1 != skb->len))
|
||||
break;
|
||||
}
|
||||
if (tcp_hdr(skb)->fin) {
|
||||
|
|
|
@ -1392,9 +1392,9 @@ static struct sk_buff *tcp_maybe_skipping_dsack(struct sk_buff *skb,
|
|||
|
||||
if (before(next_dup->start_seq, skip_to_seq)) {
|
||||
skb = tcp_sacktag_skip(skb, sk, next_dup->start_seq, fack_count);
|
||||
tcp_sacktag_walk(skb, sk, NULL,
|
||||
next_dup->start_seq, next_dup->end_seq,
|
||||
1, fack_count, reord, flag);
|
||||
skb = tcp_sacktag_walk(skb, sk, NULL,
|
||||
next_dup->start_seq, next_dup->end_seq,
|
||||
1, fack_count, reord, flag);
|
||||
}
|
||||
|
||||
return skb;
|
||||
|
@ -2483,6 +2483,20 @@ static inline void tcp_complete_cwr(struct sock *sk)
|
|||
tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR);
|
||||
}
|
||||
|
||||
static void tcp_try_keep_open(struct sock *sk)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
int state = TCP_CA_Open;
|
||||
|
||||
if (tcp_left_out(tp) || tp->retrans_out || tp->undo_marker)
|
||||
state = TCP_CA_Disorder;
|
||||
|
||||
if (inet_csk(sk)->icsk_ca_state != state) {
|
||||
tcp_set_ca_state(sk, state);
|
||||
tp->high_seq = tp->snd_nxt;
|
||||
}
|
||||
}
|
||||
|
||||
static void tcp_try_to_open(struct sock *sk, int flag)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
|
@ -2496,15 +2510,7 @@ static void tcp_try_to_open(struct sock *sk, int flag)
|
|||
tcp_enter_cwr(sk, 1);
|
||||
|
||||
if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) {
|
||||
int state = TCP_CA_Open;
|
||||
|
||||
if (tcp_left_out(tp) || tp->retrans_out || tp->undo_marker)
|
||||
state = TCP_CA_Disorder;
|
||||
|
||||
if (inet_csk(sk)->icsk_ca_state != state) {
|
||||
tcp_set_ca_state(sk, state);
|
||||
tp->high_seq = tp->snd_nxt;
|
||||
}
|
||||
tcp_try_keep_open(sk);
|
||||
tcp_moderate_cwnd(tp);
|
||||
} else {
|
||||
tcp_cwnd_down(sk, flag);
|
||||
|
@ -3310,8 +3316,11 @@ no_queue:
|
|||
return 1;
|
||||
|
||||
old_ack:
|
||||
if (TCP_SKB_CB(skb)->sacked)
|
||||
if (TCP_SKB_CB(skb)->sacked) {
|
||||
tcp_sacktag_write_queue(sk, skb, prior_snd_una);
|
||||
if (icsk->icsk_ca_state == TCP_CA_Open)
|
||||
tcp_try_keep_open(sk);
|
||||
}
|
||||
|
||||
uninteresting_ack:
|
||||
SOCK_DEBUG(sk, "Ack %u out of %u:%u\n", ack, tp->snd_una, tp->snd_nxt);
|
||||
|
|
|
@ -2131,6 +2131,8 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority)
|
|||
TCP_SKB_CB(skb)->when = tcp_time_stamp;
|
||||
if (tcp_transmit_skb(sk, skb, 0, priority))
|
||||
NET_INC_STATS(LINUX_MIB_TCPABORTFAILED);
|
||||
|
||||
TCP_INC_STATS(TCP_MIB_OUTRSTS);
|
||||
}
|
||||
|
||||
/* WARNING: This routine must only be called when we have already sent
|
||||
|
|
|
@ -97,7 +97,7 @@ static int tunnel64_rcv(struct sk_buff *skb)
|
|||
{
|
||||
struct xfrm_tunnel *handler;
|
||||
|
||||
if (!pskb_may_pull(skb, sizeof(struct iphdr)))
|
||||
if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
|
||||
goto drop;
|
||||
|
||||
for (handler = tunnel64_handlers; handler; handler = handler->next)
|
||||
|
|
|
@ -420,7 +420,7 @@ void udp_err(struct sk_buff *skb, u32 info)
|
|||
/*
|
||||
* Throw away all pending data and cancel the corking. Socket is locked.
|
||||
*/
|
||||
static void udp_flush_pending_frames(struct sock *sk)
|
||||
void udp_flush_pending_frames(struct sock *sk)
|
||||
{
|
||||
struct udp_sock *up = udp_sk(sk);
|
||||
|
||||
|
@ -430,6 +430,7 @@ static void udp_flush_pending_frames(struct sock *sk)
|
|||
ip_flush_pending_frames(sk);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(udp_flush_pending_frames);
|
||||
|
||||
/**
|
||||
* udp4_hwcsum_outgoing - handle outgoing HW checksumming
|
||||
|
|
|
@ -731,8 +731,13 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
|
|||
onlink = -1;
|
||||
|
||||
spin_lock(&ifa->lock);
|
||||
lifetime = min_t(unsigned long,
|
||||
ifa->valid_lft, 0x7fffffffUL/HZ);
|
||||
|
||||
lifetime = addrconf_timeout_fixup(ifa->valid_lft, HZ);
|
||||
/*
|
||||
* Note: Because this address is
|
||||
* not permanent, lifetime <
|
||||
* LONG_MAX / HZ here.
|
||||
*/
|
||||
if (time_before(expires,
|
||||
ifa->tstamp + lifetime * HZ))
|
||||
expires = ifa->tstamp + lifetime * HZ;
|
||||
|
@ -1722,7 +1727,6 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len)
|
|||
__u32 valid_lft;
|
||||
__u32 prefered_lft;
|
||||
int addr_type;
|
||||
unsigned long rt_expires;
|
||||
struct inet6_dev *in6_dev;
|
||||
|
||||
pinfo = (struct prefix_info *) opt;
|
||||
|
@ -1764,28 +1768,23 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len)
|
|||
* 2) Configure prefixes with the auto flag set
|
||||
*/
|
||||
|
||||
if (valid_lft == INFINITY_LIFE_TIME)
|
||||
rt_expires = ~0UL;
|
||||
else if (valid_lft >= 0x7FFFFFFF/HZ) {
|
||||
if (pinfo->onlink) {
|
||||
struct rt6_info *rt;
|
||||
unsigned long rt_expires;
|
||||
|
||||
/* Avoid arithmetic overflow. Really, we could
|
||||
* save rt_expires in seconds, likely valid_lft,
|
||||
* but it would require division in fib gc, that it
|
||||
* not good.
|
||||
*/
|
||||
rt_expires = 0x7FFFFFFF - (0x7FFFFFFF % HZ);
|
||||
} else
|
||||
rt_expires = valid_lft * HZ;
|
||||
if (HZ > USER_HZ)
|
||||
rt_expires = addrconf_timeout_fixup(valid_lft, HZ);
|
||||
else
|
||||
rt_expires = addrconf_timeout_fixup(valid_lft, USER_HZ);
|
||||
|
||||
/*
|
||||
* We convert this (in jiffies) to clock_t later.
|
||||
* Avoid arithmetic overflow there as well.
|
||||
* Overflow can happen only if HZ < USER_HZ.
|
||||
*/
|
||||
if (HZ < USER_HZ && ~rt_expires && rt_expires > 0x7FFFFFFF / USER_HZ)
|
||||
rt_expires = 0x7FFFFFFF / USER_HZ;
|
||||
if (addrconf_finite_timeout(rt_expires))
|
||||
rt_expires *= HZ;
|
||||
|
||||
if (pinfo->onlink) {
|
||||
struct rt6_info *rt;
|
||||
rt = rt6_lookup(dev_net(dev), &pinfo->prefix, NULL,
|
||||
dev->ifindex, 1);
|
||||
|
||||
|
@ -1794,7 +1793,7 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len)
|
|||
if (valid_lft == 0) {
|
||||
ip6_del_rt(rt);
|
||||
rt = NULL;
|
||||
} else if (~rt_expires) {
|
||||
} else if (addrconf_finite_timeout(rt_expires)) {
|
||||
/* not infinity */
|
||||
rt->rt6i_expires = jiffies + rt_expires;
|
||||
rt->rt6i_flags |= RTF_EXPIRES;
|
||||
|
@ -1803,9 +1802,9 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len)
|
|||
rt->rt6i_expires = 0;
|
||||
}
|
||||
} else if (valid_lft) {
|
||||
int flags = RTF_ADDRCONF | RTF_PREFIX_RT;
|
||||
clock_t expires = 0;
|
||||
if (~rt_expires) {
|
||||
int flags = RTF_ADDRCONF | RTF_PREFIX_RT;
|
||||
if (addrconf_finite_timeout(rt_expires)) {
|
||||
/* not infinity */
|
||||
flags |= RTF_EXPIRES;
|
||||
expires = jiffies_to_clock_t(rt_expires);
|
||||
|
@ -2027,7 +2026,7 @@ err_exit:
|
|||
* Manual configuration of address on an interface
|
||||
*/
|
||||
static int inet6_addr_add(struct net *net, int ifindex, struct in6_addr *pfx,
|
||||
int plen, __u8 ifa_flags, __u32 prefered_lft,
|
||||
unsigned int plen, __u8 ifa_flags, __u32 prefered_lft,
|
||||
__u32 valid_lft)
|
||||
{
|
||||
struct inet6_ifaddr *ifp;
|
||||
|
@ -2036,9 +2035,13 @@ static int inet6_addr_add(struct net *net, int ifindex, struct in6_addr *pfx,
|
|||
int scope;
|
||||
u32 flags;
|
||||
clock_t expires;
|
||||
unsigned long timeout;
|
||||
|
||||
ASSERT_RTNL();
|
||||
|
||||
if (plen > 128)
|
||||
return -EINVAL;
|
||||
|
||||
/* check the lifetime */
|
||||
if (!valid_lft || prefered_lft > valid_lft)
|
||||
return -EINVAL;
|
||||
|
@ -2052,22 +2055,23 @@ static int inet6_addr_add(struct net *net, int ifindex, struct in6_addr *pfx,
|
|||
|
||||
scope = ipv6_addr_scope(pfx);
|
||||
|
||||
if (valid_lft == INFINITY_LIFE_TIME) {
|
||||
ifa_flags |= IFA_F_PERMANENT;
|
||||
flags = 0;
|
||||
expires = 0;
|
||||
} else {
|
||||
if (valid_lft >= 0x7FFFFFFF/HZ)
|
||||
valid_lft = 0x7FFFFFFF/HZ;
|
||||
timeout = addrconf_timeout_fixup(valid_lft, HZ);
|
||||
if (addrconf_finite_timeout(timeout)) {
|
||||
expires = jiffies_to_clock_t(timeout * HZ);
|
||||
valid_lft = timeout;
|
||||
flags = RTF_EXPIRES;
|
||||
expires = jiffies_to_clock_t(valid_lft * HZ);
|
||||
} else {
|
||||
expires = 0;
|
||||
flags = 0;
|
||||
ifa_flags |= IFA_F_PERMANENT;
|
||||
}
|
||||
|
||||
if (prefered_lft == 0)
|
||||
ifa_flags |= IFA_F_DEPRECATED;
|
||||
else if ((prefered_lft >= 0x7FFFFFFF/HZ) &&
|
||||
(prefered_lft != INFINITY_LIFE_TIME))
|
||||
prefered_lft = 0x7FFFFFFF/HZ;
|
||||
timeout = addrconf_timeout_fixup(prefered_lft, HZ);
|
||||
if (addrconf_finite_timeout(timeout)) {
|
||||
if (timeout == 0)
|
||||
ifa_flags |= IFA_F_DEPRECATED;
|
||||
prefered_lft = timeout;
|
||||
}
|
||||
|
||||
ifp = ipv6_add_addr(idev, pfx, plen, scope, ifa_flags);
|
||||
|
||||
|
@ -2095,12 +2099,15 @@ static int inet6_addr_add(struct net *net, int ifindex, struct in6_addr *pfx,
|
|||
}
|
||||
|
||||
static int inet6_addr_del(struct net *net, int ifindex, struct in6_addr *pfx,
|
||||
int plen)
|
||||
unsigned int plen)
|
||||
{
|
||||
struct inet6_ifaddr *ifp;
|
||||
struct inet6_dev *idev;
|
||||
struct net_device *dev;
|
||||
|
||||
if (plen > 128)
|
||||
return -EINVAL;
|
||||
|
||||
dev = __dev_get_by_index(net, ifindex);
|
||||
if (!dev)
|
||||
return -ENODEV;
|
||||
|
@ -3169,26 +3176,28 @@ static int inet6_addr_modify(struct inet6_ifaddr *ifp, u8 ifa_flags,
|
|||
{
|
||||
u32 flags;
|
||||
clock_t expires;
|
||||
unsigned long timeout;
|
||||
|
||||
if (!valid_lft || (prefered_lft > valid_lft))
|
||||
return -EINVAL;
|
||||
|
||||
if (valid_lft == INFINITY_LIFE_TIME) {
|
||||
ifa_flags |= IFA_F_PERMANENT;
|
||||
flags = 0;
|
||||
expires = 0;
|
||||
} else {
|
||||
if (valid_lft >= 0x7FFFFFFF/HZ)
|
||||
valid_lft = 0x7FFFFFFF/HZ;
|
||||
timeout = addrconf_timeout_fixup(valid_lft, HZ);
|
||||
if (addrconf_finite_timeout(timeout)) {
|
||||
expires = jiffies_to_clock_t(timeout * HZ);
|
||||
valid_lft = timeout;
|
||||
flags = RTF_EXPIRES;
|
||||
expires = jiffies_to_clock_t(valid_lft * HZ);
|
||||
} else {
|
||||
expires = 0;
|
||||
flags = 0;
|
||||
ifa_flags |= IFA_F_PERMANENT;
|
||||
}
|
||||
|
||||
if (prefered_lft == 0)
|
||||
ifa_flags |= IFA_F_DEPRECATED;
|
||||
else if ((prefered_lft >= 0x7FFFFFFF/HZ) &&
|
||||
(prefered_lft != INFINITY_LIFE_TIME))
|
||||
prefered_lft = 0x7FFFFFFF/HZ;
|
||||
timeout = addrconf_timeout_fixup(prefered_lft, HZ);
|
||||
if (addrconf_finite_timeout(timeout)) {
|
||||
if (timeout == 0)
|
||||
ifa_flags |= IFA_F_DEPRECATED;
|
||||
prefered_lft = timeout;
|
||||
}
|
||||
|
||||
spin_lock_bh(&ifp->lock);
|
||||
ifp->flags = (ifp->flags & ~(IFA_F_DEPRECATED | IFA_F_PERMANENT | IFA_F_NODAD | IFA_F_HOMEADDRESS)) | ifa_flags;
|
||||
|
|
|
@ -496,7 +496,8 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int datagram_send_ctl(struct msghdr *msg, struct flowi *fl,
|
||||
int datagram_send_ctl(struct net *net,
|
||||
struct msghdr *msg, struct flowi *fl,
|
||||
struct ipv6_txoptions *opt,
|
||||
int *hlimit, int *tclass)
|
||||
{
|
||||
|
@ -509,7 +510,6 @@ int datagram_send_ctl(struct msghdr *msg, struct flowi *fl,
|
|||
|
||||
for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
|
||||
int addr_type;
|
||||
struct net_device *dev = NULL;
|
||||
|
||||
if (!CMSG_OK(msg, cmsg)) {
|
||||
err = -EINVAL;
|
||||
|
@ -522,6 +522,9 @@ int datagram_send_ctl(struct msghdr *msg, struct flowi *fl,
|
|||
switch (cmsg->cmsg_type) {
|
||||
case IPV6_PKTINFO:
|
||||
case IPV6_2292PKTINFO:
|
||||
{
|
||||
struct net_device *dev = NULL;
|
||||
|
||||
if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct in6_pktinfo))) {
|
||||
err = -EINVAL;
|
||||
goto exit_f;
|
||||
|
@ -535,32 +538,32 @@ int datagram_send_ctl(struct msghdr *msg, struct flowi *fl,
|
|||
fl->oif = src_info->ipi6_ifindex;
|
||||
}
|
||||
|
||||
addr_type = ipv6_addr_type(&src_info->ipi6_addr);
|
||||
addr_type = __ipv6_addr_type(&src_info->ipi6_addr);
|
||||
|
||||
if (addr_type == IPV6_ADDR_ANY)
|
||||
break;
|
||||
if (fl->oif) {
|
||||
dev = dev_get_by_index(net, fl->oif);
|
||||
if (!dev)
|
||||
return -ENODEV;
|
||||
} else if (addr_type & IPV6_ADDR_LINKLOCAL)
|
||||
return -EINVAL;
|
||||
|
||||
if (addr_type & IPV6_ADDR_LINKLOCAL) {
|
||||
if (!src_info->ipi6_ifindex)
|
||||
return -EINVAL;
|
||||
else {
|
||||
dev = dev_get_by_index(&init_net, src_info->ipi6_ifindex);
|
||||
if (!dev)
|
||||
return -ENODEV;
|
||||
}
|
||||
}
|
||||
if (!ipv6_chk_addr(&init_net, &src_info->ipi6_addr,
|
||||
dev, 0)) {
|
||||
if (dev)
|
||||
dev_put(dev);
|
||||
err = -EINVAL;
|
||||
goto exit_f;
|
||||
if (addr_type != IPV6_ADDR_ANY) {
|
||||
int strict = __ipv6_addr_src_scope(addr_type) <= IPV6_ADDR_SCOPE_LINKLOCAL;
|
||||
if (!ipv6_chk_addr(net, &src_info->ipi6_addr,
|
||||
strict ? dev : NULL, 0))
|
||||
err = -EINVAL;
|
||||
else
|
||||
ipv6_addr_copy(&fl->fl6_src, &src_info->ipi6_addr);
|
||||
}
|
||||
|
||||
if (dev)
|
||||
dev_put(dev);
|
||||
|
||||
ipv6_addr_copy(&fl->fl6_src, &src_info->ipi6_addr);
|
||||
if (err)
|
||||
goto exit_f;
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
case IPV6_FLOWINFO:
|
||||
if (cmsg->cmsg_len < CMSG_LEN(4)) {
|
||||
|
|
|
@ -354,7 +354,7 @@ fl_create(struct net *net, struct in6_flowlabel_req *freq, char __user *optval,
|
|||
msg.msg_control = (void*)(fl->opt+1);
|
||||
flowi.oif = 0;
|
||||
|
||||
err = datagram_send_ctl(&msg, &flowi, fl->opt, &junk, &junk);
|
||||
err = datagram_send_ctl(net, &msg, &flowi, fl->opt, &junk, &junk);
|
||||
if (err)
|
||||
goto done;
|
||||
err = -EINVAL;
|
||||
|
|
|
@ -161,9 +161,17 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
|
|||
struct ipv6_txoptions *opt;
|
||||
struct sk_buff *pktopt;
|
||||
|
||||
if (sk->sk_protocol != IPPROTO_UDP &&
|
||||
sk->sk_protocol != IPPROTO_UDPLITE &&
|
||||
sk->sk_protocol != IPPROTO_TCP)
|
||||
if (sk->sk_type == SOCK_RAW)
|
||||
break;
|
||||
|
||||
if (sk->sk_protocol == IPPROTO_UDP ||
|
||||
sk->sk_protocol == IPPROTO_UDPLITE) {
|
||||
struct udp_sock *up = udp_sk(sk);
|
||||
if (up->pending == AF_INET6) {
|
||||
retv = -EBUSY;
|
||||
break;
|
||||
}
|
||||
} else if (sk->sk_protocol != IPPROTO_TCP)
|
||||
break;
|
||||
|
||||
if (sk->sk_state != TCP_ESTABLISHED) {
|
||||
|
@ -416,7 +424,7 @@ sticky_done:
|
|||
msg.msg_controllen = optlen;
|
||||
msg.msg_control = (void*)(opt+1);
|
||||
|
||||
retv = datagram_send_ctl(&msg, &fl, opt, &junk, &junk);
|
||||
retv = datagram_send_ctl(net, &msg, &fl, opt, &junk, &junk);
|
||||
if (retv)
|
||||
goto done;
|
||||
update:
|
||||
|
@ -832,7 +840,7 @@ static int ipv6_getsockopt_sticky(struct sock *sk, struct ipv6_txoptions *opt,
|
|||
len = min_t(unsigned int, len, ipv6_optlen(hdr));
|
||||
if (copy_to_user(optval, hdr, len))
|
||||
return -EFAULT;
|
||||
return ipv6_optlen(hdr);
|
||||
return len;
|
||||
}
|
||||
|
||||
static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
|
||||
|
@ -975,6 +983,9 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
|
|||
len = ipv6_getsockopt_sticky(sk, np->opt,
|
||||
optname, optval, len);
|
||||
release_sock(sk);
|
||||
/* check if ipv6_getsockopt_sticky() returns err code */
|
||||
if (len < 0)
|
||||
return len;
|
||||
return put_user(len, optlen);
|
||||
}
|
||||
|
||||
|
|
|
@ -209,7 +209,9 @@ fq_find(__be32 id, struct in6_addr *src, struct in6_addr *dst)
|
|||
arg.dst = dst;
|
||||
hash = ip6qhashfn(id, src, dst);
|
||||
|
||||
local_bh_disable();
|
||||
q = inet_frag_find(&nf_init_frags, &nf_frags, &arg, hash);
|
||||
local_bh_enable();
|
||||
if (q == NULL)
|
||||
goto oom;
|
||||
|
||||
|
@ -638,10 +640,10 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb)
|
|||
goto ret_orig;
|
||||
}
|
||||
|
||||
spin_lock(&fq->q.lock);
|
||||
spin_lock_bh(&fq->q.lock);
|
||||
|
||||
if (nf_ct_frag6_queue(fq, clone, fhdr, nhoff) < 0) {
|
||||
spin_unlock(&fq->q.lock);
|
||||
spin_unlock_bh(&fq->q.lock);
|
||||
pr_debug("Can't insert skb to queue\n");
|
||||
fq_put(fq);
|
||||
goto ret_orig;
|
||||
|
@ -653,7 +655,7 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb)
|
|||
if (ret_skb == NULL)
|
||||
pr_debug("Can't reassemble fragmented packets\n");
|
||||
}
|
||||
spin_unlock(&fq->q.lock);
|
||||
spin_unlock_bh(&fq->q.lock);
|
||||
|
||||
fq_put(fq);
|
||||
return ret_skb;
|
||||
|
|
|
@ -813,7 +813,7 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
|
|||
memset(opt, 0, sizeof(struct ipv6_txoptions));
|
||||
opt->tot_len = sizeof(struct ipv6_txoptions);
|
||||
|
||||
err = datagram_send_ctl(msg, &fl, opt, &hlimit, &tclass);
|
||||
err = datagram_send_ctl(sock_net(sk), msg, &fl, opt, &hlimit, &tclass);
|
||||
if (err < 0) {
|
||||
fl6_sock_release(flowlabel);
|
||||
return err;
|
||||
|
@ -1164,6 +1164,14 @@ static void rawv6_close(struct sock *sk, long timeout)
|
|||
sk_common_release(sk);
|
||||
}
|
||||
|
||||
static int raw6_destroy(struct sock *sk)
|
||||
{
|
||||
lock_sock(sk);
|
||||
ip6_flush_pending_frames(sk);
|
||||
release_sock(sk);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int rawv6_init_sk(struct sock *sk)
|
||||
{
|
||||
struct raw6_sock *rp = raw6_sk(sk);
|
||||
|
@ -1187,6 +1195,7 @@ struct proto rawv6_prot = {
|
|||
.name = "RAWv6",
|
||||
.owner = THIS_MODULE,
|
||||
.close = rawv6_close,
|
||||
.destroy = raw6_destroy,
|
||||
.connect = ip6_datagram_connect,
|
||||
.disconnect = udp_disconnect,
|
||||
.ioctl = rawv6_ioctl,
|
||||
|
|
|
@ -446,7 +446,7 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
|
|||
struct route_info *rinfo = (struct route_info *) opt;
|
||||
struct in6_addr prefix_buf, *prefix;
|
||||
unsigned int pref;
|
||||
u32 lifetime;
|
||||
unsigned long lifetime;
|
||||
struct rt6_info *rt;
|
||||
|
||||
if (len < sizeof(struct route_info)) {
|
||||
|
@ -472,13 +472,7 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
|
|||
if (pref == ICMPV6_ROUTER_PREF_INVALID)
|
||||
pref = ICMPV6_ROUTER_PREF_MEDIUM;
|
||||
|
||||
lifetime = ntohl(rinfo->lifetime);
|
||||
if (lifetime == 0xffffffff) {
|
||||
/* infinity */
|
||||
} else if (lifetime > 0x7fffffff/HZ - 1) {
|
||||
/* Avoid arithmetic overflow */
|
||||
lifetime = 0x7fffffff/HZ - 1;
|
||||
}
|
||||
lifetime = addrconf_timeout_fixup(ntohl(rinfo->lifetime), HZ);
|
||||
|
||||
if (rinfo->length == 3)
|
||||
prefix = (struct in6_addr *)rinfo->prefix;
|
||||
|
@ -506,7 +500,7 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
|
|||
(rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
|
||||
|
||||
if (rt) {
|
||||
if (lifetime == 0xffffffff) {
|
||||
if (!addrconf_finite_timeout(lifetime)) {
|
||||
rt->rt6i_flags &= ~RTF_EXPIRES;
|
||||
} else {
|
||||
rt->rt6i_expires = jiffies + HZ * lifetime;
|
||||
|
|
|
@ -109,7 +109,7 @@ static int tunnel46_rcv(struct sk_buff *skb)
|
|||
{
|
||||
struct xfrm6_tunnel *handler;
|
||||
|
||||
if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
|
||||
if (!pskb_may_pull(skb, sizeof(struct iphdr)))
|
||||
goto drop;
|
||||
|
||||
for (handler = tunnel46_handlers; handler; handler = handler->next)
|
||||
|
|
|
@ -534,7 +534,9 @@ static void udp_v6_flush_pending_frames(struct sock *sk)
|
|||
{
|
||||
struct udp_sock *up = udp_sk(sk);
|
||||
|
||||
if (up->pending) {
|
||||
if (up->pending == AF_INET)
|
||||
udp_flush_pending_frames(sk);
|
||||
else if (up->pending) {
|
||||
up->len = 0;
|
||||
up->pending = 0;
|
||||
ip6_flush_pending_frames(sk);
|
||||
|
@ -731,7 +733,7 @@ do_udp_sendmsg:
|
|||
memset(opt, 0, sizeof(struct ipv6_txoptions));
|
||||
opt->tot_len = sizeof(*opt);
|
||||
|
||||
err = datagram_send_ctl(msg, &fl, opt, &hlimit, &tclass);
|
||||
err = datagram_send_ctl(sock_net(sk), msg, &fl, opt, &hlimit, &tclass);
|
||||
if (err < 0) {
|
||||
fl6_sock_release(flowlabel);
|
||||
return err;
|
||||
|
@ -848,12 +850,14 @@ do_append_data:
|
|||
} else {
|
||||
dst_release(dst);
|
||||
}
|
||||
dst = NULL;
|
||||
}
|
||||
|
||||
if (err > 0)
|
||||
err = np->recverr ? net_xmit_errno(err) : 0;
|
||||
release_sock(sk);
|
||||
out:
|
||||
dst_release(dst);
|
||||
fl6_sock_release(flowlabel);
|
||||
if (!err)
|
||||
return len;
|
||||
|
|
|
@ -1093,11 +1093,6 @@ static int irda_create(struct net *net, struct socket *sock, int protocol)
|
|||
|
||||
init_waitqueue_head(&self->query_wait);
|
||||
|
||||
/* Initialise networking socket struct */
|
||||
sock_init_data(sock, sk); /* Note : set sk->sk_refcnt to 1 */
|
||||
sk->sk_family = PF_IRDA;
|
||||
sk->sk_protocol = protocol;
|
||||
|
||||
switch (sock->type) {
|
||||
case SOCK_STREAM:
|
||||
sock->ops = &irda_stream_ops;
|
||||
|
@ -1124,13 +1119,20 @@ static int irda_create(struct net *net, struct socket *sock, int protocol)
|
|||
self->max_sdu_size_rx = TTP_SAR_UNBOUND;
|
||||
break;
|
||||
default:
|
||||
sk_free(sk);
|
||||
return -ESOCKTNOSUPPORT;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
sk_free(sk);
|
||||
return -ESOCKTNOSUPPORT;
|
||||
}
|
||||
|
||||
/* Initialise networking socket struct */
|
||||
sock_init_data(sock, sk); /* Note : set sk->sk_refcnt to 1 */
|
||||
sk->sk_family = PF_IRDA;
|
||||
sk->sk_protocol = protocol;
|
||||
|
||||
/* Register as a client with IrLMP */
|
||||
self->ckey = irlmp_register_client(0, NULL, NULL, NULL);
|
||||
self->mask.word = 0xffff;
|
||||
|
|
|
@ -73,7 +73,8 @@ connlimit_iphash6(const union nf_inet_addr *addr,
|
|||
static inline bool already_closed(const struct nf_conn *conn)
|
||||
{
|
||||
if (nf_ct_protonum(conn) == IPPROTO_TCP)
|
||||
return conn->proto.tcp.state == TCP_CONNTRACK_TIME_WAIT;
|
||||
return conn->proto.tcp.state == TCP_CONNTRACK_TIME_WAIT ||
|
||||
conn->proto.tcp.state == TCP_CONNTRACK_CLOSE;
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -400,13 +400,13 @@ void __nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data)
|
|||
* @attrlen: length of attribute payload
|
||||
* @data: head of attribute payload
|
||||
*
|
||||
* Returns -1 if the tailroom of the skb is insufficient to store
|
||||
* Returns -EMSGSIZE if the tailroom of the skb is insufficient to store
|
||||
* the attribute header and payload.
|
||||
*/
|
||||
int nla_put(struct sk_buff *skb, int attrtype, int attrlen, const void *data)
|
||||
{
|
||||
if (unlikely(skb_tailroom(skb) < nla_total_size(attrlen)))
|
||||
return -1;
|
||||
return -EMSGSIZE;
|
||||
|
||||
__nla_put(skb, attrtype, attrlen, data);
|
||||
return 0;
|
||||
|
@ -418,13 +418,13 @@ int nla_put(struct sk_buff *skb, int attrtype, int attrlen, const void *data)
|
|||
* @attrlen: length of attribute payload
|
||||
* @data: head of attribute payload
|
||||
*
|
||||
* Returns -1 if the tailroom of the skb is insufficient to store
|
||||
* Returns -EMSGSIZE if the tailroom of the skb is insufficient to store
|
||||
* the attribute payload.
|
||||
*/
|
||||
int nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data)
|
||||
{
|
||||
if (unlikely(skb_tailroom(skb) < NLA_ALIGN(attrlen)))
|
||||
return -1;
|
||||
return -EMSGSIZE;
|
||||
|
||||
__nla_put_nohdr(skb, attrlen, data);
|
||||
return 0;
|
||||
|
@ -436,13 +436,13 @@ int nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data)
|
|||
* @attrlen: length of attribute payload
|
||||
* @data: head of attribute payload
|
||||
*
|
||||
* Returns -1 if the tailroom of the skb is insufficient to store
|
||||
* Returns -EMSGSIZE if the tailroom of the skb is insufficient to store
|
||||
* the attribute payload.
|
||||
*/
|
||||
int nla_append(struct sk_buff *skb, int attrlen, const void *data)
|
||||
{
|
||||
if (unlikely(skb_tailroom(skb) < NLA_ALIGN(attrlen)))
|
||||
return -1;
|
||||
return -EMSGSIZE;
|
||||
|
||||
memcpy(skb_put(skb, attrlen), data, attrlen);
|
||||
return 0;
|
||||
|
|
|
@ -554,7 +554,8 @@ static int ctrl_fill_info(struct genl_family *family, u32 pid, u32 seq,
|
|||
return genlmsg_end(skb, hdr);
|
||||
|
||||
nla_put_failure:
|
||||
return genlmsg_cancel(skb, hdr);
|
||||
genlmsg_cancel(skb, hdr);
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
|
||||
static int ctrl_fill_mcgrp_info(struct genl_multicast_group *grp, u32 pid,
|
||||
|
@ -590,7 +591,8 @@ static int ctrl_fill_mcgrp_info(struct genl_multicast_group *grp, u32 pid,
|
|||
return genlmsg_end(skb, hdr);
|
||||
|
||||
nla_put_failure:
|
||||
return genlmsg_cancel(skb, hdr);
|
||||
genlmsg_cancel(skb, hdr);
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
|
||||
static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb)
|
||||
|
|
|
@ -444,7 +444,8 @@ static int dsmark_dump_class(struct Qdisc *sch, unsigned long cl,
|
|||
return nla_nest_end(skb, opts);
|
||||
|
||||
nla_put_failure:
|
||||
return nla_nest_cancel(skb, opts);
|
||||
nla_nest_cancel(skb, opts);
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
|
||||
static int dsmark_dump(struct Qdisc *sch, struct sk_buff *skb)
|
||||
|
@ -466,7 +467,8 @@ static int dsmark_dump(struct Qdisc *sch, struct sk_buff *skb)
|
|||
return nla_nest_end(skb, opts);
|
||||
|
||||
nla_put_failure:
|
||||
return nla_nest_cancel(skb, opts);
|
||||
nla_nest_cancel(skb, opts);
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
|
||||
static const struct Qdisc_class_ops dsmark_class_ops = {
|
||||
|
|
|
@ -582,7 +582,8 @@ append_opt:
|
|||
return nla_nest_end(skb, opts);
|
||||
|
||||
nla_put_failure:
|
||||
return nla_nest_cancel(skb, opts);
|
||||
nla_nest_cancel(skb, opts);
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
|
||||
static void gred_destroy(struct Qdisc *sch)
|
||||
|
|
|
@ -1360,7 +1360,7 @@ hfsc_dump_class(struct Qdisc *sch, unsigned long arg, struct sk_buff *skb,
|
|||
|
||||
nla_put_failure:
|
||||
nla_nest_cancel(skb, nest);
|
||||
return -1;
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
|
||||
static int
|
||||
|
|
|
@ -281,7 +281,8 @@ static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
|
|||
return nla_nest_end(skb, opts);
|
||||
|
||||
nla_put_failure:
|
||||
return nla_nest_cancel(skb, opts);
|
||||
nla_nest_cancel(skb, opts);
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
|
||||
static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
|
||||
|
|
|
@ -1203,6 +1203,9 @@ void sctp_assoc_update_retran_path(struct sctp_association *asoc)
|
|||
struct list_head *head = &asoc->peer.transport_addr_list;
|
||||
struct list_head *pos;
|
||||
|
||||
if (asoc->peer.transport_count == 1)
|
||||
return;
|
||||
|
||||
/* Find the next transport in a round-robin fashion. */
|
||||
t = asoc->peer.retran_path;
|
||||
pos = &t->transports;
|
||||
|
@ -1217,6 +1220,15 @@ void sctp_assoc_update_retran_path(struct sctp_association *asoc)
|
|||
|
||||
t = list_entry(pos, struct sctp_transport, transports);
|
||||
|
||||
/* We have exhausted the list, but didn't find any
|
||||
* other active transports. If so, use the next
|
||||
* transport.
|
||||
*/
|
||||
if (t == asoc->peer.retran_path) {
|
||||
t = next;
|
||||
break;
|
||||
}
|
||||
|
||||
/* Try to find an active transport. */
|
||||
|
||||
if ((t->state == SCTP_ACTIVE) ||
|
||||
|
@ -1229,15 +1241,6 @@ void sctp_assoc_update_retran_path(struct sctp_association *asoc)
|
|||
if (!next)
|
||||
next = t;
|
||||
}
|
||||
|
||||
/* We have exhausted the list, but didn't find any
|
||||
* other active transports. If so, use the next
|
||||
* transport.
|
||||
*/
|
||||
if (t == asoc->peer.retran_path) {
|
||||
t = next;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
asoc->peer.retran_path = t;
|
||||
|
|
|
@ -299,7 +299,8 @@ static inline int sctp_v6_addr_match_len(union sctp_addr *s1,
|
|||
/* Fills in the source address(saddr) based on the destination address(daddr)
|
||||
* and asoc's bind address list.
|
||||
*/
|
||||
static void sctp_v6_get_saddr(struct sctp_association *asoc,
|
||||
static void sctp_v6_get_saddr(struct sctp_sock *sk,
|
||||
struct sctp_association *asoc,
|
||||
struct dst_entry *dst,
|
||||
union sctp_addr *daddr,
|
||||
union sctp_addr *saddr)
|
||||
|
@ -318,7 +319,7 @@ static void sctp_v6_get_saddr(struct sctp_association *asoc,
|
|||
if (!asoc) {
|
||||
ipv6_dev_get_saddr(dst ? ip6_dst_idev(dst)->dev : NULL,
|
||||
&daddr->v6.sin6_addr,
|
||||
inet6_sk(asoc->base.sk)->srcprefs,
|
||||
inet6_sk(&sk->inet.sk)->srcprefs,
|
||||
&saddr->v6.sin6_addr);
|
||||
SCTP_DEBUG_PRINTK("saddr from ipv6_get_saddr: " NIP6_FMT "\n",
|
||||
NIP6(saddr->v6.sin6_addr));
|
||||
|
@ -726,6 +727,11 @@ static void sctp_v6_seq_dump_addr(struct seq_file *seq, union sctp_addr *addr)
|
|||
seq_printf(seq, NIP6_FMT " ", NIP6(addr->v6.sin6_addr));
|
||||
}
|
||||
|
||||
static void sctp_v6_ecn_capable(struct sock *sk)
|
||||
{
|
||||
inet6_sk(sk)->tclass |= INET_ECN_ECT_0;
|
||||
}
|
||||
|
||||
/* Initialize a PF_INET6 socket msg_name. */
|
||||
static void sctp_inet6_msgname(char *msgname, int *addr_len)
|
||||
{
|
||||
|
@ -996,6 +1002,7 @@ static struct sctp_af sctp_af_inet6 = {
|
|||
.skb_iif = sctp_v6_skb_iif,
|
||||
.is_ce = sctp_v6_is_ce,
|
||||
.seq_dump_addr = sctp_v6_seq_dump_addr,
|
||||
.ecn_capable = sctp_v6_ecn_capable,
|
||||
.net_header_len = sizeof(struct ipv6hdr),
|
||||
.sockaddr_len = sizeof(struct sockaddr_in6),
|
||||
#ifdef CONFIG_COMPAT
|
||||
|
|
|
@ -548,7 +548,7 @@ int sctp_packet_transmit(struct sctp_packet *packet)
|
|||
* Note: The works for IPv6 layer checks this bit too later
|
||||
* in transmission. See IP6_ECN_flow_xmit().
|
||||
*/
|
||||
INET_ECN_xmit(nskb->sk);
|
||||
(*tp->af_specific->ecn_capable)(nskb->sk);
|
||||
|
||||
/* Set up the IP options. */
|
||||
/* BUG: not implemented
|
||||
|
|
|
@ -208,6 +208,7 @@ void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
|
|||
INIT_LIST_HEAD(&q->sacked);
|
||||
INIT_LIST_HEAD(&q->abandoned);
|
||||
|
||||
q->fast_rtx = 0;
|
||||
q->outstanding_bytes = 0;
|
||||
q->empty = 1;
|
||||
q->cork = 0;
|
||||
|
@ -500,6 +501,7 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
|
|||
case SCTP_RTXR_FAST_RTX:
|
||||
SCTP_INC_STATS(SCTP_MIB_FAST_RETRANSMITS);
|
||||
sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_FAST_RTX);
|
||||
q->fast_rtx = 1;
|
||||
break;
|
||||
case SCTP_RTXR_PMTUD:
|
||||
SCTP_INC_STATS(SCTP_MIB_PMTUD_RETRANSMITS);
|
||||
|
@ -518,9 +520,15 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
|
|||
* the sender SHOULD try to advance the "Advanced.Peer.Ack.Point" by
|
||||
* following the procedures outlined in C1 - C5.
|
||||
*/
|
||||
sctp_generate_fwdtsn(q, q->asoc->ctsn_ack_point);
|
||||
if (reason == SCTP_RTXR_T3_RTX)
|
||||
sctp_generate_fwdtsn(q, q->asoc->ctsn_ack_point);
|
||||
|
||||
error = sctp_outq_flush(q, /* rtx_timeout */ 1);
|
||||
/* Flush the queues only on timeout, since fast_rtx is only
|
||||
* triggered during sack processing and the queue
|
||||
* will be flushed at the end.
|
||||
*/
|
||||
if (reason != SCTP_RTXR_FAST_RTX)
|
||||
error = sctp_outq_flush(q, /* rtx_timeout */ 1);
|
||||
|
||||
if (error)
|
||||
q->asoc->base.sk->sk_err = -error;
|
||||
|
@ -538,17 +546,23 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
|
|||
int rtx_timeout, int *start_timer)
|
||||
{
|
||||
struct list_head *lqueue;
|
||||
struct list_head *lchunk;
|
||||
struct sctp_transport *transport = pkt->transport;
|
||||
sctp_xmit_t status;
|
||||
struct sctp_chunk *chunk, *chunk1;
|
||||
struct sctp_association *asoc;
|
||||
int fast_rtx;
|
||||
int error = 0;
|
||||
int timer = 0;
|
||||
int done = 0;
|
||||
|
||||
asoc = q->asoc;
|
||||
lqueue = &q->retransmit;
|
||||
fast_rtx = q->fast_rtx;
|
||||
|
||||
/* RFC 2960 6.3.3 Handle T3-rtx Expiration
|
||||
/* This loop handles time-out retransmissions, fast retransmissions,
|
||||
* and retransmissions due to opening of whindow.
|
||||
*
|
||||
* RFC 2960 6.3.3 Handle T3-rtx Expiration
|
||||
*
|
||||
* E3) Determine how many of the earliest (i.e., lowest TSN)
|
||||
* outstanding DATA chunks for the address for which the
|
||||
|
@ -563,12 +577,12 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
|
|||
* [Just to be painfully clear, if we are retransmitting
|
||||
* because a timeout just happened, we should send only ONE
|
||||
* packet of retransmitted data.]
|
||||
*
|
||||
* For fast retransmissions we also send only ONE packet. However,
|
||||
* if we are just flushing the queue due to open window, we'll
|
||||
* try to send as much as possible.
|
||||
*/
|
||||
lchunk = sctp_list_dequeue(lqueue);
|
||||
|
||||
while (lchunk) {
|
||||
chunk = list_entry(lchunk, struct sctp_chunk,
|
||||
transmitted_list);
|
||||
list_for_each_entry_safe(chunk, chunk1, lqueue, transmitted_list) {
|
||||
|
||||
/* Make sure that Gap Acked TSNs are not retransmitted. A
|
||||
* simple approach is just to move such TSNs out of the
|
||||
|
@ -576,58 +590,60 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
|
|||
* next chunk.
|
||||
*/
|
||||
if (chunk->tsn_gap_acked) {
|
||||
list_add_tail(lchunk, &transport->transmitted);
|
||||
lchunk = sctp_list_dequeue(lqueue);
|
||||
list_del(&chunk->transmitted_list);
|
||||
list_add_tail(&chunk->transmitted_list,
|
||||
&transport->transmitted);
|
||||
continue;
|
||||
}
|
||||
|
||||
/* If we are doing fast retransmit, ignore non-fast_rtransmit
|
||||
* chunks
|
||||
*/
|
||||
if (fast_rtx && !chunk->fast_retransmit)
|
||||
continue;
|
||||
|
||||
/* Attempt to append this chunk to the packet. */
|
||||
status = sctp_packet_append_chunk(pkt, chunk);
|
||||
|
||||
switch (status) {
|
||||
case SCTP_XMIT_PMTU_FULL:
|
||||
/* Send this packet. */
|
||||
if ((error = sctp_packet_transmit(pkt)) == 0)
|
||||
*start_timer = 1;
|
||||
error = sctp_packet_transmit(pkt);
|
||||
|
||||
/* If we are retransmitting, we should only
|
||||
* send a single packet.
|
||||
*/
|
||||
if (rtx_timeout) {
|
||||
list_add(lchunk, lqueue);
|
||||
lchunk = NULL;
|
||||
}
|
||||
if (rtx_timeout || fast_rtx)
|
||||
done = 1;
|
||||
|
||||
/* Bundle lchunk in the next round. */
|
||||
/* Bundle next chunk in the next round. */
|
||||
break;
|
||||
|
||||
case SCTP_XMIT_RWND_FULL:
|
||||
/* Send this packet. */
|
||||
if ((error = sctp_packet_transmit(pkt)) == 0)
|
||||
*start_timer = 1;
|
||||
error = sctp_packet_transmit(pkt);
|
||||
|
||||
/* Stop sending DATA as there is no more room
|
||||
* at the receiver.
|
||||
*/
|
||||
list_add(lchunk, lqueue);
|
||||
lchunk = NULL;
|
||||
done = 1;
|
||||
break;
|
||||
|
||||
case SCTP_XMIT_NAGLE_DELAY:
|
||||
/* Send this packet. */
|
||||
if ((error = sctp_packet_transmit(pkt)) == 0)
|
||||
*start_timer = 1;
|
||||
error = sctp_packet_transmit(pkt);
|
||||
|
||||
/* Stop sending DATA because of nagle delay. */
|
||||
list_add(lchunk, lqueue);
|
||||
lchunk = NULL;
|
||||
done = 1;
|
||||
break;
|
||||
|
||||
default:
|
||||
/* The append was successful, so add this chunk to
|
||||
* the transmitted list.
|
||||
*/
|
||||
list_add_tail(lchunk, &transport->transmitted);
|
||||
list_del(&chunk->transmitted_list);
|
||||
list_add_tail(&chunk->transmitted_list,
|
||||
&transport->transmitted);
|
||||
|
||||
/* Mark the chunk as ineligible for fast retransmit
|
||||
* after it is retransmitted.
|
||||
|
@ -635,27 +651,44 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
|
|||
if (chunk->fast_retransmit > 0)
|
||||
chunk->fast_retransmit = -1;
|
||||
|
||||
*start_timer = 1;
|
||||
q->empty = 0;
|
||||
/* Force start T3-rtx timer when fast retransmitting
|
||||
* the earliest outstanding TSN
|
||||
*/
|
||||
if (!timer && fast_rtx &&
|
||||
ntohl(chunk->subh.data_hdr->tsn) ==
|
||||
asoc->ctsn_ack_point + 1)
|
||||
timer = 2;
|
||||
|
||||
/* Retrieve a new chunk to bundle. */
|
||||
lchunk = sctp_list_dequeue(lqueue);
|
||||
q->empty = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
/* If we are here due to a retransmit timeout or a fast
|
||||
* retransmit and if there are any chunks left in the retransmit
|
||||
* queue that could not fit in the PMTU sized packet, they need
|
||||
* to be marked as ineligible for a subsequent fast retransmit.
|
||||
*/
|
||||
if (rtx_timeout && !lchunk) {
|
||||
list_for_each_entry(chunk1, lqueue, transmitted_list) {
|
||||
if (chunk1->fast_retransmit > 0)
|
||||
chunk1->fast_retransmit = -1;
|
||||
}
|
||||
/* Set the timer if there were no errors */
|
||||
if (!error && !timer)
|
||||
timer = 1;
|
||||
|
||||
if (done)
|
||||
break;
|
||||
}
|
||||
|
||||
/* If we are here due to a retransmit timeout or a fast
|
||||
* retransmit and if there are any chunks left in the retransmit
|
||||
* queue that could not fit in the PMTU sized packet, they need
|
||||
* to be marked as ineligible for a subsequent fast retransmit.
|
||||
*/
|
||||
if (rtx_timeout || fast_rtx) {
|
||||
list_for_each_entry(chunk1, lqueue, transmitted_list) {
|
||||
if (chunk1->fast_retransmit > 0)
|
||||
chunk1->fast_retransmit = -1;
|
||||
}
|
||||
}
|
||||
|
||||
*start_timer = timer;
|
||||
|
||||
/* Clear fast retransmit hint */
|
||||
if (fast_rtx)
|
||||
q->fast_rtx = 0;
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@ -862,7 +895,8 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
|
|||
rtx_timeout, &start_timer);
|
||||
|
||||
if (start_timer)
|
||||
sctp_transport_reset_timers(transport);
|
||||
sctp_transport_reset_timers(transport,
|
||||
start_timer-1);
|
||||
|
||||
/* This can happen on COOKIE-ECHO resend. Only
|
||||
* one chunk can get bundled with a COOKIE-ECHO.
|
||||
|
@ -977,7 +1011,7 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
|
|||
list_add_tail(&chunk->transmitted_list,
|
||||
&transport->transmitted);
|
||||
|
||||
sctp_transport_reset_timers(transport);
|
||||
sctp_transport_reset_timers(transport, start_timer-1);
|
||||
|
||||
q->empty = 0;
|
||||
|
||||
|
|
|
@ -470,11 +470,11 @@ static struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc,
|
|||
/* Walk through the bind address list and look for a bind
|
||||
* address that matches the source address of the returned dst.
|
||||
*/
|
||||
sctp_v4_dst_saddr(&dst_saddr, dst, htons(bp->port));
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(laddr, &bp->address_list, list) {
|
||||
if (!laddr->valid || (laddr->state != SCTP_ADDR_SRC))
|
||||
continue;
|
||||
sctp_v4_dst_saddr(&dst_saddr, dst, htons(bp->port));
|
||||
if (sctp_v4_cmp_addr(&dst_saddr, &laddr->a))
|
||||
goto out_unlock;
|
||||
}
|
||||
|
@ -519,7 +519,8 @@ out:
|
|||
/* For v4, the source address is cached in the route entry(dst). So no need
|
||||
* to cache it separately and hence this is an empty routine.
|
||||
*/
|
||||
static void sctp_v4_get_saddr(struct sctp_association *asoc,
|
||||
static void sctp_v4_get_saddr(struct sctp_sock *sk,
|
||||
struct sctp_association *asoc,
|
||||
struct dst_entry *dst,
|
||||
union sctp_addr *daddr,
|
||||
union sctp_addr *saddr)
|
||||
|
@ -616,6 +617,11 @@ static void sctp_v4_seq_dump_addr(struct seq_file *seq, union sctp_addr *addr)
|
|||
seq_printf(seq, "%d.%d.%d.%d ", NIPQUAD(addr->v4.sin_addr));
|
||||
}
|
||||
|
||||
static void sctp_v4_ecn_capable(struct sock *sk)
|
||||
{
|
||||
INET_ECN_xmit(sk);
|
||||
}
|
||||
|
||||
/* Event handler for inet address addition/deletion events.
|
||||
* The sctp_local_addr_list needs to be protocted by a spin lock since
|
||||
* multiple notifiers (say IPv4 and IPv6) may be running at the same
|
||||
|
@ -934,6 +940,7 @@ static struct sctp_af sctp_af_inet = {
|
|||
.skb_iif = sctp_v4_skb_iif,
|
||||
.is_ce = sctp_v4_is_ce,
|
||||
.seq_dump_addr = sctp_v4_seq_dump_addr,
|
||||
.ecn_capable = sctp_v4_ecn_capable,
|
||||
.net_header_len = sizeof(struct iphdr),
|
||||
.sockaddr_len = sizeof(struct sockaddr_in),
|
||||
#ifdef CONFIG_COMPAT
|
||||
|
|
|
@ -79,6 +79,7 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
|
|||
peer->rttvar = 0;
|
||||
peer->srtt = 0;
|
||||
peer->rto_pending = 0;
|
||||
peer->fast_recovery = 0;
|
||||
|
||||
peer->last_time_heard = jiffies;
|
||||
peer->last_time_used = jiffies;
|
||||
|
@ -190,7 +191,7 @@ static void sctp_transport_destroy(struct sctp_transport *transport)
|
|||
/* Start T3_rtx timer if it is not already running and update the heartbeat
|
||||
* timer. This routine is called every time a DATA chunk is sent.
|
||||
*/
|
||||
void sctp_transport_reset_timers(struct sctp_transport *transport)
|
||||
void sctp_transport_reset_timers(struct sctp_transport *transport, int force)
|
||||
{
|
||||
/* RFC 2960 6.3.2 Retransmission Timer Rules
|
||||
*
|
||||
|
@ -200,7 +201,7 @@ void sctp_transport_reset_timers(struct sctp_transport *transport)
|
|||
* address.
|
||||
*/
|
||||
|
||||
if (!timer_pending(&transport->T3_rtx_timer))
|
||||
if (force || !timer_pending(&transport->T3_rtx_timer))
|
||||
if (!mod_timer(&transport->T3_rtx_timer,
|
||||
jiffies + transport->rto))
|
||||
sctp_transport_hold(transport);
|
||||
|
@ -291,7 +292,7 @@ void sctp_transport_route(struct sctp_transport *transport,
|
|||
if (saddr)
|
||||
memcpy(&transport->saddr, saddr, sizeof(union sctp_addr));
|
||||
else
|
||||
af->get_saddr(asoc, dst, daddr, &transport->saddr);
|
||||
af->get_saddr(opt, asoc, dst, daddr, &transport->saddr);
|
||||
|
||||
transport->dst = dst;
|
||||
if ((transport->param_flags & SPP_PMTUD_DISABLE) && transport->pathmtu) {
|
||||
|
@ -403,11 +404,16 @@ void sctp_transport_raise_cwnd(struct sctp_transport *transport,
|
|||
cwnd = transport->cwnd;
|
||||
flight_size = transport->flight_size;
|
||||
|
||||
/* See if we need to exit Fast Recovery first */
|
||||
if (transport->fast_recovery &&
|
||||
TSN_lte(transport->fast_recovery_exit, sack_ctsn))
|
||||
transport->fast_recovery = 0;
|
||||
|
||||
/* The appropriate cwnd increase algorithm is performed if, and only
|
||||
* if the cumulative TSN has advanced and the congestion window is
|
||||
* if the cumulative TSN whould advanced and the congestion window is
|
||||
* being fully utilized.
|
||||
*/
|
||||
if ((transport->asoc->ctsn_ack_point >= sack_ctsn) ||
|
||||
if (TSN_lte(sack_ctsn, transport->asoc->ctsn_ack_point) ||
|
||||
(flight_size < cwnd))
|
||||
return;
|
||||
|
||||
|
@ -416,17 +422,23 @@ void sctp_transport_raise_cwnd(struct sctp_transport *transport,
|
|||
pmtu = transport->asoc->pathmtu;
|
||||
|
||||
if (cwnd <= ssthresh) {
|
||||
/* RFC 2960 7.2.1, sctpimpguide-05 2.14.2 When cwnd is less
|
||||
* than or equal to ssthresh an SCTP endpoint MUST use the
|
||||
* slow start algorithm to increase cwnd only if the current
|
||||
* congestion window is being fully utilized and an incoming
|
||||
* SACK advances the Cumulative TSN Ack Point. Only when these
|
||||
* two conditions are met can the cwnd be increased otherwise
|
||||
* the cwnd MUST not be increased. If these conditions are met
|
||||
* then cwnd MUST be increased by at most the lesser of
|
||||
* 1) the total size of the previously outstanding DATA
|
||||
* chunk(s) acknowledged, and 2) the destination's path MTU.
|
||||
/* RFC 4960 7.2.1
|
||||
* o When cwnd is less than or equal to ssthresh, an SCTP
|
||||
* endpoint MUST use the slow-start algorithm to increase
|
||||
* cwnd only if the current congestion window is being fully
|
||||
* utilized, an incoming SACK advances the Cumulative TSN
|
||||
* Ack Point, and the data sender is not in Fast Recovery.
|
||||
* Only when these three conditions are met can the cwnd be
|
||||
* increased; otherwise, the cwnd MUST not be increased.
|
||||
* If these conditions are met, then cwnd MUST be increased
|
||||
* by, at most, the lesser of 1) the total size of the
|
||||
* previously outstanding DATA chunk(s) acknowledged, and
|
||||
* 2) the destination's path MTU. This upper bound protects
|
||||
* against the ACK-Splitting attack outlined in [SAVAGE99].
|
||||
*/
|
||||
if (transport->fast_recovery)
|
||||
return;
|
||||
|
||||
if (bytes_acked > pmtu)
|
||||
cwnd += pmtu;
|
||||
else
|
||||
|
@ -502,6 +514,13 @@ void sctp_transport_lower_cwnd(struct sctp_transport *transport,
|
|||
* cwnd = ssthresh
|
||||
* partial_bytes_acked = 0
|
||||
*/
|
||||
if (transport->fast_recovery)
|
||||
return;
|
||||
|
||||
/* Mark Fast recovery */
|
||||
transport->fast_recovery = 1;
|
||||
transport->fast_recovery_exit = transport->asoc->next_tsn - 1;
|
||||
|
||||
transport->ssthresh = max(transport->cwnd/2,
|
||||
4*transport->asoc->pathmtu);
|
||||
transport->cwnd = transport->ssthresh;
|
||||
|
@ -586,6 +605,7 @@ void sctp_transport_reset(struct sctp_transport *t)
|
|||
t->flight_size = 0;
|
||||
t->error_count = 0;
|
||||
t->rto_pending = 0;
|
||||
t->fast_recovery = 0;
|
||||
|
||||
/* Initialize the state information for SFR-CACC */
|
||||
t->cacc.changeover_active = 0;
|
||||
|
|
|
@ -187,7 +187,8 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
|
|||
return genlmsg_end(msg, hdr);
|
||||
|
||||
nla_put_failure:
|
||||
return genlmsg_cancel(msg, hdr);
|
||||
genlmsg_cancel(msg, hdr);
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
|
||||
static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb)
|
||||
|
@ -273,7 +274,8 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 pid, u32 seq, int flags,
|
|||
return genlmsg_end(msg, hdr);
|
||||
|
||||
nla_put_failure:
|
||||
return genlmsg_cancel(msg, hdr);
|
||||
genlmsg_cancel(msg, hdr);
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
|
||||
static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback *cb)
|
||||
|
@ -928,7 +930,8 @@ static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq,
|
|||
return genlmsg_end(msg, hdr);
|
||||
|
||||
nla_put_failure:
|
||||
return genlmsg_cancel(msg, hdr);
|
||||
genlmsg_cancel(msg, hdr);
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
|
||||
static int nl80211_dump_station(struct sk_buff *skb,
|
||||
|
@ -1267,7 +1270,8 @@ static int nl80211_send_mpath(struct sk_buff *msg, u32 pid, u32 seq,
|
|||
return genlmsg_end(msg, hdr);
|
||||
|
||||
nla_put_failure:
|
||||
return genlmsg_cancel(msg, hdr);
|
||||
genlmsg_cancel(msg, hdr);
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
|
||||
static int nl80211_dump_mpath(struct sk_buff *skb,
|
||||
|
|
|
@ -200,8 +200,8 @@ static struct xfrm_algo_desc aalg_list[] = {
|
|||
}
|
||||
},
|
||||
{
|
||||
.name = "hmac(ripemd160)",
|
||||
.compat = "ripemd160",
|
||||
.name = "hmac(rmd160)",
|
||||
.compat = "rmd160",
|
||||
|
||||
.uinfo = {
|
||||
.auth = {
|
||||
|
|
Loading…
Reference in New Issue