Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) NLA_PUT* --> nla_put_* conversion got one case wrong in nfnetlink_log, fix from Patrick McHardy. 2) Missed error return check in ipw2100 driver, from Julia Lawall. 3) PMTU updates in ipv4 were setting the expiry time incorrectly, fix from Eric Dumazet. 4) SFC driver erroneously reversed src and dst when reporting filters via ethtool. 5) Memory leak in CAN protocol and wrong setting of IRQF_SHARED in sja1000 can platform driver, from Alexey Khoroshilov and Sven Schmitt. 6) Fix multicast traffic scaling regression in ipv4_dst_destroy, only take the lock when we really need to. From Eric Dumazet. 7) Fix non-root process spoofing in netlink, from Pablo Neira Ayuso. 8) CWND reduction in TCP is done incorrectly during non-SACK recovery, fix from Yuchung Cheng. 9) Revert netpoll change, and fix what was actually a driver specific problem. From Amerigo Wang. This should cure bootup hangs with netconsole some people reported. 10) Fix xen-netfront invoking __skb_fill_page_desc() with a NULL page pointer. From Ian Campbell. 11) SIP NAT fix for expectiontation creation, from Pablo Neira Ayuso. 12) __ip_rt_update_pmtu() needs RCU locking, from Eric Dumazet. 13) Fix usbnet deadlock on resume, can't use GFP_KERNEL in this situation. From Oliver Neukum. 14) The davinci ethernet driver triggers an OOPS on removal because it frees an MDIO object before unregistering it. Fix from Bin Liu. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (41 commits) net: qmi_wwan: add several new Gobi devices fddi: 64 bit bug in smt_add_para() net: ethernet: fix kernel OOPS when remove davinci_mdio module net/xfrm/xfrm_state.c: fix error return code net: ipv6: fix error return code net: qmi_wwan: new device: Foxconn/Novatel E396 usbnet: fix deadlock in resume cs89x0 : packet reception not working netfilter: nf_conntrack: fix racy timer handling with reliable events bnx2x: Correct the ndo_poll_controller call bnx2x: Move netif_napi_add to the open call ipv4: must use rcu protection while calling fib_lookup bnx2x: fix 57840_MF pci id net: ipv4: ipmr_expire_timer causes crash when removing net namespace e1000e: DoS while TSO enabled caused by link partner with small MSS l2tp: avoid to use synchronize_rcu in tunnel free function gianfar: fix default tx vlan offload feature flag netfilter: nf_nat_sip: fix incorrect handling of EBUSY for RTCP expectation xen-netfront: use __pskb_pull_tail to ensure linear area is big enough on RX netfilter: nfnetlink_log: fix error return code in init path ...
This commit is contained in:
commit
0b1a34c992
|
@ -109,7 +109,9 @@ static int sp_probe(struct platform_device *pdev)
|
|||
priv = netdev_priv(dev);
|
||||
|
||||
dev->irq = res_irq->start;
|
||||
priv->irq_flags = res_irq->flags & (IRQF_TRIGGER_MASK | IRQF_SHARED);
|
||||
priv->irq_flags = res_irq->flags & IRQF_TRIGGER_MASK;
|
||||
if (res_irq->flags & IORESOURCE_IRQ_SHAREABLE)
|
||||
priv->irq_flags |= IRQF_SHARED;
|
||||
priv->reg_base = addr;
|
||||
/* The CAN clock frequency is half the oscillator clock frequency */
|
||||
priv->can.clock.freq = pdata->osc_freq / 2;
|
||||
|
|
|
@ -150,7 +150,7 @@ int softing_load_fw(const char *file, struct softing *card,
|
|||
const uint8_t *mem, *end, *dat;
|
||||
uint16_t type, len;
|
||||
uint32_t addr;
|
||||
uint8_t *buf = NULL;
|
||||
uint8_t *buf = NULL, *new_buf;
|
||||
int buflen = 0;
|
||||
int8_t type_end = 0;
|
||||
|
||||
|
@ -199,11 +199,12 @@ int softing_load_fw(const char *file, struct softing *card,
|
|||
if (len > buflen) {
|
||||
/* align buflen */
|
||||
buflen = (len + (1024-1)) & ~(1024-1);
|
||||
buf = krealloc(buf, buflen, GFP_KERNEL);
|
||||
if (!buf) {
|
||||
new_buf = krealloc(buf, buflen, GFP_KERNEL);
|
||||
if (!new_buf) {
|
||||
ret = -ENOMEM;
|
||||
goto failed;
|
||||
}
|
||||
buf = new_buf;
|
||||
}
|
||||
/* verify record data */
|
||||
memcpy_fromio(buf, &dpram[addr + offset], len);
|
||||
|
|
|
@ -1708,9 +1708,6 @@ struct bnx2x_func_init_params {
|
|||
continue; \
|
||||
else
|
||||
|
||||
#define for_each_napi_rx_queue(bp, var) \
|
||||
for ((var) = 0; (var) < bp->num_napi_queues; (var)++)
|
||||
|
||||
/* Skip OOO FP */
|
||||
#define for_each_tx_queue(bp, var) \
|
||||
for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
|
||||
|
|
|
@ -2046,6 +2046,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
|
|||
*/
|
||||
bnx2x_setup_tc(bp->dev, bp->max_cos);
|
||||
|
||||
/* Add all NAPI objects */
|
||||
bnx2x_add_all_napi(bp);
|
||||
bnx2x_napi_enable(bp);
|
||||
|
||||
/* set pf load just before approaching the MCP */
|
||||
|
@ -2408,6 +2410,8 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
|
|||
|
||||
/* Disable HW interrupts, NAPI */
|
||||
bnx2x_netif_stop(bp, 1);
|
||||
/* Delete all NAPI objects */
|
||||
bnx2x_del_all_napi(bp);
|
||||
|
||||
/* Release IRQs */
|
||||
bnx2x_free_irq(bp);
|
||||
|
|
|
@ -792,7 +792,7 @@ static inline void bnx2x_add_all_napi(struct bnx2x *bp)
|
|||
bp->num_napi_queues = bp->num_queues;
|
||||
|
||||
/* Add NAPI objects */
|
||||
for_each_napi_rx_queue(bp, i)
|
||||
for_each_rx_queue(bp, i)
|
||||
netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
|
||||
bnx2x_poll, BNX2X_NAPI_WEIGHT);
|
||||
}
|
||||
|
@ -801,7 +801,7 @@ static inline void bnx2x_del_all_napi(struct bnx2x *bp)
|
|||
{
|
||||
int i;
|
||||
|
||||
for_each_napi_rx_queue(bp, i)
|
||||
for_each_rx_queue(bp, i)
|
||||
netif_napi_del(&bnx2x_fp(bp, i, napi));
|
||||
}
|
||||
|
||||
|
|
|
@ -2888,11 +2888,9 @@ static void bnx2x_get_channels(struct net_device *dev,
|
|||
*/
|
||||
static void bnx2x_change_num_queues(struct bnx2x *bp, int num_rss)
|
||||
{
|
||||
bnx2x_del_all_napi(bp);
|
||||
bnx2x_disable_msi(bp);
|
||||
BNX2X_NUM_QUEUES(bp) = num_rss + NON_ETH_CONTEXT_USE;
|
||||
bnx2x_set_int_mode(bp);
|
||||
bnx2x_add_all_napi(bp);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -8427,6 +8427,8 @@ unload_error:
|
|||
|
||||
/* Disable HW interrupts, NAPI */
|
||||
bnx2x_netif_stop(bp, 1);
|
||||
/* Delete all NAPI objects */
|
||||
bnx2x_del_all_napi(bp);
|
||||
|
||||
/* Release IRQs */
|
||||
bnx2x_free_irq(bp);
|
||||
|
@ -11229,10 +11231,12 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
|
|||
static void poll_bnx2x(struct net_device *dev)
|
||||
{
|
||||
struct bnx2x *bp = netdev_priv(dev);
|
||||
int i;
|
||||
|
||||
disable_irq(bp->pdev->irq);
|
||||
bnx2x_interrupt(bp->pdev->irq, dev);
|
||||
enable_irq(bp->pdev->irq);
|
||||
for_each_eth_queue(bp, i) {
|
||||
struct bnx2x_fastpath *fp = &bp->fp[i];
|
||||
napi_schedule(&bnx2x_fp(bp, fp->index, napi));
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -11899,9 +11903,6 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
|
|||
*/
|
||||
bnx2x_set_int_mode(bp);
|
||||
|
||||
/* Add all NAPI objects */
|
||||
bnx2x_add_all_napi(bp);
|
||||
|
||||
rc = register_netdev(dev);
|
||||
if (rc) {
|
||||
dev_err(&pdev->dev, "Cannot register net device\n");
|
||||
|
@ -11976,9 +11977,6 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
|
|||
|
||||
unregister_netdev(dev);
|
||||
|
||||
/* Delete all NAPI objects */
|
||||
bnx2x_del_all_napi(bp);
|
||||
|
||||
/* Power on: we can't let PCI layer write to us while we are in D3 */
|
||||
bnx2x_set_power_state(bp, PCI_D0);
|
||||
|
||||
|
@ -12025,6 +12023,8 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
|
|||
bnx2x_tx_disable(bp);
|
||||
|
||||
bnx2x_netif_stop(bp, 0);
|
||||
/* Delete all NAPI objects */
|
||||
bnx2x_del_all_napi(bp);
|
||||
|
||||
del_timer_sync(&bp->timer);
|
||||
|
||||
|
|
|
@ -1243,6 +1243,7 @@ static void set_multicast_list(struct net_device *dev)
|
|||
{
|
||||
struct net_local *lp = netdev_priv(dev);
|
||||
unsigned long flags;
|
||||
u16 cfg;
|
||||
|
||||
spin_lock_irqsave(&lp->lock, flags);
|
||||
if (dev->flags & IFF_PROMISC)
|
||||
|
@ -1260,11 +1261,10 @@ static void set_multicast_list(struct net_device *dev)
|
|||
/* in promiscuous mode, we accept errored packets,
|
||||
* so we have to enable interrupts on them also
|
||||
*/
|
||||
writereg(dev, PP_RxCFG,
|
||||
(lp->curr_rx_cfg |
|
||||
(lp->rx_mode == RX_ALL_ACCEPT)
|
||||
? (RX_CRC_ERROR_ENBL | RX_RUNT_ENBL | RX_EXTRA_DATA_ENBL)
|
||||
: 0));
|
||||
cfg = lp->curr_rx_cfg;
|
||||
if (lp->rx_mode == RX_ALL_ACCEPT)
|
||||
cfg |= RX_CRC_ERROR_ENBL | RX_RUNT_ENBL | RX_EXTRA_DATA_ENBL;
|
||||
writereg(dev, PP_RxCFG, cfg);
|
||||
spin_unlock_irqrestore(&lp->lock, flags);
|
||||
}
|
||||
|
||||
|
|
|
@ -259,7 +259,7 @@ int be_process_mcc(struct be_adapter *adapter)
|
|||
int num = 0, status = 0;
|
||||
struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
|
||||
|
||||
spin_lock_bh(&adapter->mcc_cq_lock);
|
||||
spin_lock(&adapter->mcc_cq_lock);
|
||||
while ((compl = be_mcc_compl_get(adapter))) {
|
||||
if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
|
||||
/* Interpret flags as an async trailer */
|
||||
|
@ -280,7 +280,7 @@ int be_process_mcc(struct be_adapter *adapter)
|
|||
if (num)
|
||||
be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num);
|
||||
|
||||
spin_unlock_bh(&adapter->mcc_cq_lock);
|
||||
spin_unlock(&adapter->mcc_cq_lock);
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -295,7 +295,9 @@ static int be_mcc_wait_compl(struct be_adapter *adapter)
|
|||
if (be_error(adapter))
|
||||
return -EIO;
|
||||
|
||||
local_bh_disable();
|
||||
status = be_process_mcc(adapter);
|
||||
local_bh_enable();
|
||||
|
||||
if (atomic_read(&mcc_obj->q.used) == 0)
|
||||
break;
|
||||
|
|
|
@ -3763,7 +3763,9 @@ static void be_worker(struct work_struct *work)
|
|||
/* when interrupts are not yet enabled, just reap any pending
|
||||
* mcc completions */
|
||||
if (!netif_running(adapter->netdev)) {
|
||||
local_bh_disable();
|
||||
be_process_mcc(adapter);
|
||||
local_bh_enable();
|
||||
goto reschedule;
|
||||
}
|
||||
|
||||
|
|
|
@ -1041,7 +1041,7 @@ static int gfar_probe(struct platform_device *ofdev)
|
|||
|
||||
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
|
||||
dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
|
||||
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
|
||||
dev->features |= NETIF_F_HW_VLAN_RX;
|
||||
}
|
||||
|
||||
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
|
||||
|
|
|
@ -310,6 +310,7 @@ struct e1000_adapter {
|
|||
*/
|
||||
struct e1000_ring *tx_ring /* One per active queue */
|
||||
____cacheline_aligned_in_smp;
|
||||
u32 tx_fifo_limit;
|
||||
|
||||
struct napi_struct napi;
|
||||
|
||||
|
|
|
@ -3516,6 +3516,15 @@ void e1000e_reset(struct e1000_adapter *adapter)
|
|||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* Alignment of Tx data is on an arbitrary byte boundary with the
|
||||
* maximum size per Tx descriptor limited only to the transmit
|
||||
* allocation of the packet buffer minus 96 bytes with an upper
|
||||
* limit of 24KB due to receive synchronization limitations.
|
||||
*/
|
||||
adapter->tx_fifo_limit = min_t(u32, ((er32(PBA) >> 16) << 10) - 96,
|
||||
24 << 10);
|
||||
|
||||
/*
|
||||
* Disable Adaptive Interrupt Moderation if 2 full packets cannot
|
||||
* fit in receive buffer.
|
||||
|
@ -4785,12 +4794,9 @@ static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb)
|
|||
return 1;
|
||||
}
|
||||
|
||||
#define E1000_MAX_PER_TXD 8192
|
||||
#define E1000_MAX_TXD_PWR 12
|
||||
|
||||
static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb,
|
||||
unsigned int first, unsigned int max_per_txd,
|
||||
unsigned int nr_frags, unsigned int mss)
|
||||
unsigned int nr_frags)
|
||||
{
|
||||
struct e1000_adapter *adapter = tx_ring->adapter;
|
||||
struct pci_dev *pdev = adapter->pdev;
|
||||
|
@ -5023,20 +5029,19 @@ static int __e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size)
|
|||
|
||||
static int e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size)
|
||||
{
|
||||
BUG_ON(size > tx_ring->count);
|
||||
|
||||
if (e1000_desc_unused(tx_ring) >= size)
|
||||
return 0;
|
||||
return __e1000_maybe_stop_tx(tx_ring, size);
|
||||
}
|
||||
|
||||
#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1)
|
||||
static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
|
||||
struct net_device *netdev)
|
||||
{
|
||||
struct e1000_adapter *adapter = netdev_priv(netdev);
|
||||
struct e1000_ring *tx_ring = adapter->tx_ring;
|
||||
unsigned int first;
|
||||
unsigned int max_per_txd = E1000_MAX_PER_TXD;
|
||||
unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
|
||||
unsigned int tx_flags = 0;
|
||||
unsigned int len = skb_headlen(skb);
|
||||
unsigned int nr_frags;
|
||||
|
@ -5056,18 +5061,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
|
|||
}
|
||||
|
||||
mss = skb_shinfo(skb)->gso_size;
|
||||
/*
|
||||
* The controller does a simple calculation to
|
||||
* make sure there is enough room in the FIFO before
|
||||
* initiating the DMA for each buffer. The calc is:
|
||||
* 4 = ceil(buffer len/mss). To make sure we don't
|
||||
* overrun the FIFO, adjust the max buffer len if mss
|
||||
* drops.
|
||||
*/
|
||||
if (mss) {
|
||||
u8 hdr_len;
|
||||
max_per_txd = min(mss << 2, max_per_txd);
|
||||
max_txd_pwr = fls(max_per_txd) - 1;
|
||||
|
||||
/*
|
||||
* TSO Workaround for 82571/2/3 Controllers -- if skb->data
|
||||
|
@ -5097,12 +5092,12 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
|
|||
count++;
|
||||
count++;
|
||||
|
||||
count += TXD_USE_COUNT(len, max_txd_pwr);
|
||||
count += DIV_ROUND_UP(len, adapter->tx_fifo_limit);
|
||||
|
||||
nr_frags = skb_shinfo(skb)->nr_frags;
|
||||
for (f = 0; f < nr_frags; f++)
|
||||
count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
|
||||
max_txd_pwr);
|
||||
count += DIV_ROUND_UP(skb_frag_size(&skb_shinfo(skb)->frags[f]),
|
||||
adapter->tx_fifo_limit);
|
||||
|
||||
if (adapter->hw.mac.tx_pkt_filtering)
|
||||
e1000_transfer_dhcp_info(adapter, skb);
|
||||
|
@ -5144,15 +5139,18 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
|
|||
tx_flags |= E1000_TX_FLAGS_NO_FCS;
|
||||
|
||||
/* if count is 0 then mapping error has occurred */
|
||||
count = e1000_tx_map(tx_ring, skb, first, max_per_txd, nr_frags, mss);
|
||||
count = e1000_tx_map(tx_ring, skb, first, adapter->tx_fifo_limit,
|
||||
nr_frags);
|
||||
if (count) {
|
||||
skb_tx_timestamp(skb);
|
||||
|
||||
netdev_sent_queue(netdev, skb->len);
|
||||
e1000_tx_queue(tx_ring, tx_flags, count);
|
||||
/* Make sure there is space in the ring for the next send. */
|
||||
e1000_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 2);
|
||||
|
||||
e1000_maybe_stop_tx(tx_ring,
|
||||
(MAX_SKB_FRAGS *
|
||||
DIV_ROUND_UP(PAGE_SIZE,
|
||||
adapter->tx_fifo_limit) + 2));
|
||||
} else {
|
||||
dev_kfree_skb_any(skb);
|
||||
tx_ring->buffer_info[first].time_stamp = 0;
|
||||
|
@ -6327,8 +6325,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
|
|||
adapter->hw.phy.autoneg_advertised = 0x2f;
|
||||
|
||||
/* ring size defaults */
|
||||
adapter->rx_ring->count = 256;
|
||||
adapter->tx_ring->count = 256;
|
||||
adapter->rx_ring->count = E1000_DEFAULT_RXD;
|
||||
adapter->tx_ring->count = E1000_DEFAULT_TXD;
|
||||
|
||||
/*
|
||||
* Initial Wake on LAN setting - If APM wake is enabled in
|
||||
|
|
|
@ -863,8 +863,8 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx,
|
|||
&ip_entry->ip4dst, &ip_entry->pdst);
|
||||
if (rc != 0) {
|
||||
rc = efx_filter_get_ipv4_full(
|
||||
&spec, &proto, &ip_entry->ip4src, &ip_entry->psrc,
|
||||
&ip_entry->ip4dst, &ip_entry->pdst);
|
||||
&spec, &proto, &ip_entry->ip4dst, &ip_entry->pdst,
|
||||
&ip_entry->ip4src, &ip_entry->psrc);
|
||||
EFX_WARN_ON_PARANOID(rc);
|
||||
ip_mask->ip4src = ~0;
|
||||
ip_mask->psrc = ~0;
|
||||
|
|
|
@ -22,6 +22,9 @@
|
|||
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
|
||||
*******************************************************************************/
|
||||
|
||||
#ifndef __COMMON_H__
|
||||
#define __COMMON_H__
|
||||
|
||||
#include <linux/etherdevice.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/phy.h>
|
||||
|
@ -366,3 +369,5 @@ extern void stmmac_set_mac(void __iomem *ioaddr, bool enable);
|
|||
|
||||
extern void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
|
||||
extern const struct stmmac_ring_mode_ops ring_mode_ops;
|
||||
|
||||
#endif /* __COMMON_H__ */
|
||||
|
|
|
@ -20,6 +20,10 @@
|
|||
|
||||
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
|
||||
*******************************************************************************/
|
||||
|
||||
#ifndef __DESCS_H__
|
||||
#define __DESCS_H__
|
||||
|
||||
struct dma_desc {
|
||||
/* Receive descriptor */
|
||||
union {
|
||||
|
@ -166,3 +170,5 @@ enum tdes_csum_insertion {
|
|||
* is not calculated */
|
||||
cic_full = 3, /* IP header and pseudoheader */
|
||||
};
|
||||
|
||||
#endif /* __DESCS_H__ */
|
||||
|
|
|
@ -27,6 +27,9 @@
|
|||
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
|
||||
*******************************************************************************/
|
||||
|
||||
#ifndef __DESC_COM_H__
|
||||
#define __DESC_COM_H__
|
||||
|
||||
#if defined(CONFIG_STMMAC_RING)
|
||||
static inline void ehn_desc_rx_set_on_ring_chain(struct dma_desc *p, int end)
|
||||
{
|
||||
|
@ -124,3 +127,5 @@ static inline void norm_set_tx_desc_len(struct dma_desc *p, int len)
|
|||
p->des01.tx.buffer1_size = len;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* __DESC_COM_H__ */
|
||||
|
|
|
@ -22,6 +22,9 @@
|
|||
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
|
||||
*******************************************************************************/
|
||||
|
||||
#ifndef __DWMAC100_H__
|
||||
#define __DWMAC100_H__
|
||||
|
||||
#include <linux/phy.h>
|
||||
#include "common.h"
|
||||
|
||||
|
@ -119,3 +122,5 @@ enum ttc_control {
|
|||
#define DMA_MISSED_FRAME_M_CNTR 0x0000ffff /* Missed Frame Couinter */
|
||||
|
||||
extern const struct stmmac_dma_ops dwmac100_dma_ops;
|
||||
|
||||
#endif /* __DWMAC100_H__ */
|
||||
|
|
|
@ -19,6 +19,8 @@
|
|||
|
||||
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
|
||||
*******************************************************************************/
|
||||
#ifndef __DWMAC1000_H__
|
||||
#define __DWMAC1000_H__
|
||||
|
||||
#include <linux/phy.h>
|
||||
#include "common.h"
|
||||
|
@ -229,6 +231,7 @@ enum rtc_control {
|
|||
#define GMAC_MMC_RX_CSUM_OFFLOAD 0x208
|
||||
|
||||
/* Synopsys Core versions */
|
||||
#define DWMAC_CORE_3_40 34
|
||||
#define DWMAC_CORE_3_40 0x34
|
||||
|
||||
extern const struct stmmac_dma_ops dwmac1000_dma_ops;
|
||||
#endif /* __DWMAC1000_H__ */
|
||||
|
|
|
@ -22,6 +22,9 @@
|
|||
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
|
||||
*******************************************************************************/
|
||||
|
||||
#ifndef __DWMAC_DMA_H__
|
||||
#define __DWMAC_DMA_H__
|
||||
|
||||
/* DMA CRS Control and Status Register Mapping */
|
||||
#define DMA_BUS_MODE 0x00001000 /* Bus Mode */
|
||||
#define DMA_XMT_POLL_DEMAND 0x00001004 /* Transmit Poll Demand */
|
||||
|
@ -109,3 +112,5 @@ extern void dwmac_dma_start_rx(void __iomem *ioaddr);
|
|||
extern void dwmac_dma_stop_rx(void __iomem *ioaddr);
|
||||
extern int dwmac_dma_interrupt(void __iomem *ioaddr,
|
||||
struct stmmac_extra_stats *x);
|
||||
|
||||
#endif /* __DWMAC_DMA_H__ */
|
||||
|
|
|
@ -22,6 +22,9 @@
|
|||
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
|
||||
*******************************************************************************/
|
||||
|
||||
#ifndef __MMC_H__
|
||||
#define __MMC_H__
|
||||
|
||||
/* MMC control register */
|
||||
/* When set, all counter are reset */
|
||||
#define MMC_CNTRL_COUNTER_RESET 0x1
|
||||
|
@ -129,3 +132,5 @@ struct stmmac_counters {
|
|||
extern void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode);
|
||||
extern void dwmac_mmc_intr_all_mask(void __iomem *ioaddr);
|
||||
extern void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc);
|
||||
|
||||
#endif /* __MMC_H__ */
|
||||
|
|
|
@ -33,7 +33,7 @@
|
|||
#define MMC_TX_INTR 0x00000108 /* MMC TX Interrupt */
|
||||
#define MMC_RX_INTR_MASK 0x0000010c /* MMC Interrupt Mask */
|
||||
#define MMC_TX_INTR_MASK 0x00000110 /* MMC Interrupt Mask */
|
||||
#define MMC_DEFAUL_MASK 0xffffffff
|
||||
#define MMC_DEFAULT_MASK 0xffffffff
|
||||
|
||||
/* MMC TX counter registers */
|
||||
|
||||
|
@ -147,8 +147,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
|
|||
/* To mask all all interrupts.*/
|
||||
void dwmac_mmc_intr_all_mask(void __iomem *ioaddr)
|
||||
{
|
||||
writel(MMC_DEFAUL_MASK, ioaddr + MMC_RX_INTR_MASK);
|
||||
writel(MMC_DEFAUL_MASK, ioaddr + MMC_TX_INTR_MASK);
|
||||
writel(MMC_DEFAULT_MASK, ioaddr + MMC_RX_INTR_MASK);
|
||||
writel(MMC_DEFAULT_MASK, ioaddr + MMC_TX_INTR_MASK);
|
||||
}
|
||||
|
||||
/* This reads the MAC core counters (if actaully supported).
|
||||
|
|
|
@ -20,6 +20,9 @@
|
|||
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
|
||||
*******************************************************************************/
|
||||
|
||||
#ifndef __STMMAC_H__
|
||||
#define __STMMAC_H__
|
||||
|
||||
#define STMMAC_RESOURCE_NAME "stmmaceth"
|
||||
#define DRV_MODULE_VERSION "March_2012"
|
||||
|
||||
|
@ -166,3 +169,5 @@ static inline void stmmac_unregister_pci(void)
|
|||
{
|
||||
}
|
||||
#endif /* CONFIG_STMMAC_PCI */
|
||||
|
||||
#endif /* __STMMAC_H__ */
|
||||
|
|
|
@ -21,6 +21,8 @@
|
|||
|
||||
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
|
||||
*******************************************************************************/
|
||||
#ifndef __STMMAC_TIMER_H__
|
||||
#define __STMMAC_TIMER_H__
|
||||
|
||||
struct stmmac_timer {
|
||||
void (*timer_start) (unsigned int new_freq);
|
||||
|
@ -40,3 +42,5 @@ void stmmac_schedule(struct net_device *dev);
|
|||
extern int tmu2_register_user(void *fnt, void *data);
|
||||
extern void tmu2_unregister_user(void);
|
||||
#endif
|
||||
|
||||
#endif /* __STMMAC_TIMER_H__ */
|
||||
|
|
|
@ -394,8 +394,10 @@ static int __devexit davinci_mdio_remove(struct platform_device *pdev)
|
|||
struct device *dev = &pdev->dev;
|
||||
struct davinci_mdio_data *data = dev_get_drvdata(dev);
|
||||
|
||||
if (data->bus)
|
||||
if (data->bus) {
|
||||
mdiobus_unregister(data->bus);
|
||||
mdiobus_free(data->bus);
|
||||
}
|
||||
|
||||
if (data->clk)
|
||||
clk_put(data->clk);
|
||||
|
|
|
@ -673,7 +673,7 @@ void smt_add_para(struct s_smc *smc, struct s_pcon *pcon, u_short para,
|
|||
sm_pm_get_ls(smc,port_to_mib(smc,port))) ;
|
||||
break ;
|
||||
case SMT_P_REASON :
|
||||
* (u_long *) to = 0 ;
|
||||
*(u32 *)to = 0 ;
|
||||
sp_len = 4 ;
|
||||
goto sp_done ;
|
||||
case SMT_P1033 : /* time stamp */
|
||||
|
|
|
@ -413,7 +413,9 @@ static const struct usb_device_id products[] = {
|
|||
|
||||
/* 5. Gobi 2000 and 3000 devices */
|
||||
{QMI_GOBI_DEVICE(0x413c, 0x8186)}, /* Dell Gobi 2000 Modem device (N0218, VU936) */
|
||||
{QMI_GOBI_DEVICE(0x413c, 0x8194)}, /* Dell Gobi 3000 Composite */
|
||||
{QMI_GOBI_DEVICE(0x05c6, 0x920b)}, /* Generic Gobi 2000 Modem device */
|
||||
{QMI_GOBI_DEVICE(0x05c6, 0x920d)}, /* Gobi 3000 Composite */
|
||||
{QMI_GOBI_DEVICE(0x05c6, 0x9225)}, /* Sony Gobi 2000 Modem device (N0279, VU730) */
|
||||
{QMI_GOBI_DEVICE(0x05c6, 0x9245)}, /* Samsung Gobi 2000 Modem device (VL176) */
|
||||
{QMI_GOBI_DEVICE(0x03f0, 0x251d)}, /* HP Gobi 2000 Modem device (VP412) */
|
||||
|
@ -441,6 +443,8 @@ static const struct usb_device_id products[] = {
|
|||
{QMI_GOBI_DEVICE(0x1199, 0x9015)}, /* Sierra Wireless Gobi 3000 Modem device */
|
||||
{QMI_GOBI_DEVICE(0x1199, 0x9019)}, /* Sierra Wireless Gobi 3000 Modem device */
|
||||
{QMI_GOBI_DEVICE(0x1199, 0x901b)}, /* Sierra Wireless MC7770 */
|
||||
{QMI_GOBI_DEVICE(0x12d1, 0x14f1)}, /* Sony Gobi 3000 Composite */
|
||||
{QMI_GOBI_DEVICE(0x1410, 0xa021)}, /* Foxconn Gobi 3000 Modem device (Novatel E396) */
|
||||
|
||||
{ } /* END */
|
||||
};
|
||||
|
|
|
@ -1573,7 +1573,7 @@ int usbnet_resume (struct usb_interface *intf)
|
|||
netif_device_present(dev->net) &&
|
||||
!timer_pending(&dev->delay) &&
|
||||
!test_bit(EVENT_RX_HALT, &dev->flags))
|
||||
rx_alloc_submit(dev, GFP_KERNEL);
|
||||
rx_alloc_submit(dev, GFP_NOIO);
|
||||
|
||||
if (!(dev->txq.qlen >= TX_QLEN(dev)))
|
||||
netif_tx_wake_all_queues(dev->net);
|
||||
|
|
|
@ -1482,7 +1482,7 @@ ath5k_eeprom_read_target_rate_pwr_info(struct ath5k_hw *ah, unsigned int mode)
|
|||
case AR5K_EEPROM_MODE_11A:
|
||||
offset += AR5K_EEPROM_TARGET_PWR_OFF_11A(ee->ee_version);
|
||||
rate_pcal_info = ee->ee_rate_tpwr_a;
|
||||
ee->ee_rate_target_pwr_num[mode] = AR5K_EEPROM_N_5GHZ_CHAN;
|
||||
ee->ee_rate_target_pwr_num[mode] = AR5K_EEPROM_N_5GHZ_RATE_CHAN;
|
||||
break;
|
||||
case AR5K_EEPROM_MODE_11B:
|
||||
offset += AR5K_EEPROM_TARGET_PWR_OFF_11B(ee->ee_version);
|
||||
|
|
|
@ -182,6 +182,7 @@
|
|||
#define AR5K_EEPROM_EEP_DELTA 10
|
||||
#define AR5K_EEPROM_N_MODES 3
|
||||
#define AR5K_EEPROM_N_5GHZ_CHAN 10
|
||||
#define AR5K_EEPROM_N_5GHZ_RATE_CHAN 8
|
||||
#define AR5K_EEPROM_N_2GHZ_CHAN 3
|
||||
#define AR5K_EEPROM_N_2GHZ_CHAN_2413 4
|
||||
#define AR5K_EEPROM_N_2GHZ_CHAN_MAX 4
|
||||
|
|
|
@ -1233,6 +1233,9 @@ uint brcms_reset(struct brcms_info *wl)
|
|||
/* dpc will not be rescheduled */
|
||||
wl->resched = false;
|
||||
|
||||
/* inform publicly that interface is down */
|
||||
wl->pub->up = false;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -2042,7 +2042,8 @@ static void isr_indicate_associated(struct ipw2100_priv *priv, u32 status)
|
|||
return;
|
||||
}
|
||||
len = ETH_ALEN;
|
||||
ipw2100_get_ordinal(priv, IPW_ORD_STAT_ASSN_AP_BSSID, &bssid, &len);
|
||||
ret = ipw2100_get_ordinal(priv, IPW_ORD_STAT_ASSN_AP_BSSID, bssid,
|
||||
&len);
|
||||
if (ret) {
|
||||
IPW_DEBUG_INFO("failed querying ordinals at line %d\n",
|
||||
__LINE__);
|
||||
|
|
|
@ -124,6 +124,9 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
|
|||
const struct fw_img *img;
|
||||
size_t bufsz;
|
||||
|
||||
if (!iwl_is_ready_rf(priv))
|
||||
return -EAGAIN;
|
||||
|
||||
/* default is to dump the entire data segment */
|
||||
if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) {
|
||||
priv->dbgfs_sram_offset = 0x800000;
|
||||
|
|
|
@ -350,7 +350,7 @@ int iwl_queue_space(const struct iwl_queue *q);
|
|||
/*****************************************************
|
||||
* Error handling
|
||||
******************************************************/
|
||||
int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display);
|
||||
int iwl_dump_fh(struct iwl_trans *trans, char **buf);
|
||||
void iwl_dump_csr(struct iwl_trans *trans);
|
||||
|
||||
/*****************************************************
|
||||
|
|
|
@ -555,7 +555,7 @@ static void iwl_irq_handle_error(struct iwl_trans *trans)
|
|||
}
|
||||
|
||||
iwl_dump_csr(trans);
|
||||
iwl_dump_fh(trans, NULL, false);
|
||||
iwl_dump_fh(trans, NULL);
|
||||
|
||||
iwl_op_mode_nic_error(trans->op_mode);
|
||||
}
|
||||
|
|
|
@ -1649,13 +1649,9 @@ static const char *get_fh_string(int cmd)
|
|||
#undef IWL_CMD
|
||||
}
|
||||
|
||||
int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display)
|
||||
int iwl_dump_fh(struct iwl_trans *trans, char **buf)
|
||||
{
|
||||
int i;
|
||||
#ifdef CONFIG_IWLWIFI_DEBUG
|
||||
int pos = 0;
|
||||
size_t bufsz = 0;
|
||||
#endif
|
||||
static const u32 fh_tbl[] = {
|
||||
FH_RSCSR_CHNL0_STTS_WPTR_REG,
|
||||
FH_RSCSR_CHNL0_RBDCB_BASE_REG,
|
||||
|
@ -1667,29 +1663,35 @@ int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display)
|
|||
FH_TSSR_TX_STATUS_REG,
|
||||
FH_TSSR_TX_ERROR_REG
|
||||
};
|
||||
#ifdef CONFIG_IWLWIFI_DEBUG
|
||||
if (display) {
|
||||
bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
|
||||
|
||||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||
if (buf) {
|
||||
int pos = 0;
|
||||
size_t bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
|
||||
|
||||
*buf = kmalloc(bufsz, GFP_KERNEL);
|
||||
if (!*buf)
|
||||
return -ENOMEM;
|
||||
|
||||
pos += scnprintf(*buf + pos, bufsz - pos,
|
||||
"FH register values:\n");
|
||||
for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(fh_tbl); i++)
|
||||
pos += scnprintf(*buf + pos, bufsz - pos,
|
||||
" %34s: 0X%08x\n",
|
||||
get_fh_string(fh_tbl[i]),
|
||||
iwl_read_direct32(trans, fh_tbl[i]));
|
||||
}
|
||||
|
||||
return pos;
|
||||
}
|
||||
#endif
|
||||
|
||||
IWL_ERR(trans, "FH register values:\n");
|
||||
for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
|
||||
for (i = 0; i < ARRAY_SIZE(fh_tbl); i++)
|
||||
IWL_ERR(trans, " %34s: 0X%08x\n",
|
||||
get_fh_string(fh_tbl[i]),
|
||||
iwl_read_direct32(trans, fh_tbl[i]));
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1982,11 +1984,11 @@ static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
|
|||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct iwl_trans *trans = file->private_data;
|
||||
char *buf;
|
||||
char *buf = NULL;
|
||||
int pos = 0;
|
||||
ssize_t ret = -EFAULT;
|
||||
|
||||
ret = pos = iwl_dump_fh(trans, &buf, true);
|
||||
ret = pos = iwl_dump_fh(trans, &buf);
|
||||
if (buf) {
|
||||
ret = simple_read_from_buffer(user_buf,
|
||||
count, ppos, buf, pos);
|
||||
|
|
|
@ -57,8 +57,7 @@
|
|||
static const struct ethtool_ops xennet_ethtool_ops;
|
||||
|
||||
struct netfront_cb {
|
||||
struct page *page;
|
||||
unsigned offset;
|
||||
int pull_to;
|
||||
};
|
||||
|
||||
#define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb))
|
||||
|
@ -867,15 +866,9 @@ static int handle_incoming_queue(struct net_device *dev,
|
|||
struct sk_buff *skb;
|
||||
|
||||
while ((skb = __skb_dequeue(rxq)) != NULL) {
|
||||
struct page *page = NETFRONT_SKB_CB(skb)->page;
|
||||
void *vaddr = page_address(page);
|
||||
unsigned offset = NETFRONT_SKB_CB(skb)->offset;
|
||||
int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
|
||||
|
||||
memcpy(skb->data, vaddr + offset,
|
||||
skb_headlen(skb));
|
||||
|
||||
if (page != skb_frag_page(&skb_shinfo(skb)->frags[0]))
|
||||
__free_page(page);
|
||||
__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
|
||||
|
||||
/* Ethernet work: Delayed to here as it peeks the header. */
|
||||
skb->protocol = eth_type_trans(skb, dev);
|
||||
|
@ -913,7 +906,6 @@ static int xennet_poll(struct napi_struct *napi, int budget)
|
|||
struct sk_buff_head errq;
|
||||
struct sk_buff_head tmpq;
|
||||
unsigned long flags;
|
||||
unsigned int len;
|
||||
int err;
|
||||
|
||||
spin_lock(&np->rx_lock);
|
||||
|
@ -955,24 +947,13 @@ err:
|
|||
}
|
||||
}
|
||||
|
||||
NETFRONT_SKB_CB(skb)->page =
|
||||
skb_frag_page(&skb_shinfo(skb)->frags[0]);
|
||||
NETFRONT_SKB_CB(skb)->offset = rx->offset;
|
||||
NETFRONT_SKB_CB(skb)->pull_to = rx->status;
|
||||
if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD)
|
||||
NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD;
|
||||
|
||||
len = rx->status;
|
||||
if (len > RX_COPY_THRESHOLD)
|
||||
len = RX_COPY_THRESHOLD;
|
||||
skb_put(skb, len);
|
||||
|
||||
if (rx->status > len) {
|
||||
skb_shinfo(skb)->frags[0].page_offset =
|
||||
rx->offset + len;
|
||||
skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status - len);
|
||||
skb->data_len = rx->status - len;
|
||||
} else {
|
||||
__skb_fill_page_desc(skb, 0, NULL, 0, 0);
|
||||
skb_shinfo(skb)->nr_frags = 0;
|
||||
}
|
||||
skb_shinfo(skb)->frags[0].page_offset = rx->offset;
|
||||
skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status);
|
||||
skb->data_len = rx->status;
|
||||
|
||||
i = xennet_fill_frags(np, skb, &tmpq);
|
||||
|
||||
|
@ -999,7 +980,7 @@ err:
|
|||
* receive throughout using the standard receive
|
||||
* buffer size was cut by 25%(!!!).
|
||||
*/
|
||||
skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len);
|
||||
skb->truesize += skb->data_len - RX_COPY_THRESHOLD;
|
||||
skb->len += skb->data_len;
|
||||
|
||||
if (rx->flags & XEN_NETRXF_csum_blank)
|
||||
|
|
|
@ -2149,7 +2149,7 @@
|
|||
#define PCI_DEVICE_ID_TIGON3_5704S 0x16a8
|
||||
#define PCI_DEVICE_ID_NX2_57800_VF 0x16a9
|
||||
#define PCI_DEVICE_ID_NX2_5706S 0x16aa
|
||||
#define PCI_DEVICE_ID_NX2_57840_MF 0x16ab
|
||||
#define PCI_DEVICE_ID_NX2_57840_MF 0x16a4
|
||||
#define PCI_DEVICE_ID_NX2_5708S 0x16ac
|
||||
#define PCI_DEVICE_ID_NX2_57840_VF 0x16ad
|
||||
#define PCI_DEVICE_ID_NX2_57810_MF 0x16ae
|
||||
|
|
|
@ -18,6 +18,7 @@ struct nf_conntrack_ecache {
|
|||
u16 ctmask; /* bitmask of ct events to be delivered */
|
||||
u16 expmask; /* bitmask of expect events to be delivered */
|
||||
u32 pid; /* netlink pid of destroyer */
|
||||
struct timer_list timeout;
|
||||
};
|
||||
|
||||
static inline struct nf_conntrack_ecache *
|
||||
|
|
|
@ -168,24 +168,16 @@ static void poll_napi(struct net_device *dev)
|
|||
struct napi_struct *napi;
|
||||
int budget = 16;
|
||||
|
||||
WARN_ON_ONCE(!irqs_disabled());
|
||||
|
||||
list_for_each_entry(napi, &dev->napi_list, dev_list) {
|
||||
local_irq_enable();
|
||||
if (napi->poll_owner != smp_processor_id() &&
|
||||
spin_trylock(&napi->poll_lock)) {
|
||||
rcu_read_lock_bh();
|
||||
budget = poll_one_napi(rcu_dereference_bh(dev->npinfo),
|
||||
napi, budget);
|
||||
rcu_read_unlock_bh();
|
||||
spin_unlock(&napi->poll_lock);
|
||||
|
||||
if (!budget) {
|
||||
local_irq_disable();
|
||||
if (!budget)
|
||||
break;
|
||||
}
|
||||
}
|
||||
local_irq_disable();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -124,6 +124,8 @@ static DEFINE_SPINLOCK(mfc_unres_lock);
|
|||
static struct kmem_cache *mrt_cachep __read_mostly;
|
||||
|
||||
static struct mr_table *ipmr_new_table(struct net *net, u32 id);
|
||||
static void ipmr_free_table(struct mr_table *mrt);
|
||||
|
||||
static int ip_mr_forward(struct net *net, struct mr_table *mrt,
|
||||
struct sk_buff *skb, struct mfc_cache *cache,
|
||||
int local);
|
||||
|
@ -131,6 +133,7 @@ static int ipmr_cache_report(struct mr_table *mrt,
|
|||
struct sk_buff *pkt, vifi_t vifi, int assert);
|
||||
static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
|
||||
struct mfc_cache *c, struct rtmsg *rtm);
|
||||
static void mroute_clean_tables(struct mr_table *mrt);
|
||||
static void ipmr_expire_process(unsigned long arg);
|
||||
|
||||
#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
|
||||
|
@ -271,7 +274,7 @@ static void __net_exit ipmr_rules_exit(struct net *net)
|
|||
|
||||
list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) {
|
||||
list_del(&mrt->list);
|
||||
kfree(mrt);
|
||||
ipmr_free_table(mrt);
|
||||
}
|
||||
fib_rules_unregister(net->ipv4.mr_rules_ops);
|
||||
}
|
||||
|
@ -299,7 +302,7 @@ static int __net_init ipmr_rules_init(struct net *net)
|
|||
|
||||
static void __net_exit ipmr_rules_exit(struct net *net)
|
||||
{
|
||||
kfree(net->ipv4.mrt);
|
||||
ipmr_free_table(net->ipv4.mrt);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -336,6 +339,13 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id)
|
|||
return mrt;
|
||||
}
|
||||
|
||||
static void ipmr_free_table(struct mr_table *mrt)
|
||||
{
|
||||
del_timer_sync(&mrt->ipmr_expire_timer);
|
||||
mroute_clean_tables(mrt);
|
||||
kfree(mrt);
|
||||
}
|
||||
|
||||
/* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
|
||||
|
||||
static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v)
|
||||
|
|
|
@ -502,7 +502,10 @@ static unsigned int ip_nat_sdp_media(struct sk_buff *skb, unsigned int dataoff,
|
|||
ret = nf_ct_expect_related(rtcp_exp);
|
||||
if (ret == 0)
|
||||
break;
|
||||
else if (ret != -EBUSY) {
|
||||
else if (ret == -EBUSY) {
|
||||
nf_ct_unexpect_related(rtp_exp);
|
||||
continue;
|
||||
} else if (ret < 0) {
|
||||
nf_ct_unexpect_related(rtp_exp);
|
||||
port = 0;
|
||||
break;
|
||||
|
|
|
@ -934,12 +934,14 @@ static u32 __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
|
|||
if (mtu < ip_rt_min_pmtu)
|
||||
mtu = ip_rt_min_pmtu;
|
||||
|
||||
rcu_read_lock();
|
||||
if (fib_lookup(dev_net(rt->dst.dev), fl4, &res) == 0) {
|
||||
struct fib_nh *nh = &FIB_RES_NH(res);
|
||||
|
||||
update_or_create_fnhe(nh, fl4->daddr, 0, mtu,
|
||||
jiffies + ip_rt_mtu_expires);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
return mtu;
|
||||
}
|
||||
|
||||
|
@ -956,7 +958,7 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
|
|||
dst->obsolete = DST_OBSOLETE_KILL;
|
||||
} else {
|
||||
rt->rt_pmtu = mtu;
|
||||
dst_set_expires(&rt->dst, ip_rt_mtu_expires);
|
||||
rt->dst.expires = max(1UL, jiffies + ip_rt_mtu_expires);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1263,7 +1265,7 @@ static void ipv4_dst_destroy(struct dst_entry *dst)
|
|||
{
|
||||
struct rtable *rt = (struct rtable *) dst;
|
||||
|
||||
if (dst->flags & DST_NOCACHE) {
|
||||
if (!list_empty(&rt->rt_uncached)) {
|
||||
spin_lock_bh(&rt_uncached_lock);
|
||||
list_del(&rt->rt_uncached);
|
||||
spin_unlock_bh(&rt_uncached_lock);
|
||||
|
|
|
@ -2926,13 +2926,14 @@ static void tcp_enter_recovery(struct sock *sk, bool ece_ack)
|
|||
* tcp_xmit_retransmit_queue().
|
||||
*/
|
||||
static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
|
||||
int newly_acked_sacked, bool is_dupack,
|
||||
int prior_sacked, bool is_dupack,
|
||||
int flag)
|
||||
{
|
||||
struct inet_connection_sock *icsk = inet_csk(sk);
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
int do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) &&
|
||||
(tcp_fackets_out(tp) > tp->reordering));
|
||||
int newly_acked_sacked = 0;
|
||||
int fast_rexmit = 0;
|
||||
|
||||
if (WARN_ON(!tp->packets_out && tp->sacked_out))
|
||||
|
@ -2992,6 +2993,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
|
|||
tcp_add_reno_sack(sk);
|
||||
} else
|
||||
do_lost = tcp_try_undo_partial(sk, pkts_acked);
|
||||
newly_acked_sacked = pkts_acked + tp->sacked_out - prior_sacked;
|
||||
break;
|
||||
case TCP_CA_Loss:
|
||||
if (flag & FLAG_DATA_ACKED)
|
||||
|
@ -3013,6 +3015,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
|
|||
if (is_dupack)
|
||||
tcp_add_reno_sack(sk);
|
||||
}
|
||||
newly_acked_sacked = pkts_acked + tp->sacked_out - prior_sacked;
|
||||
|
||||
if (icsk->icsk_ca_state <= TCP_CA_Disorder)
|
||||
tcp_try_undo_dsack(sk);
|
||||
|
@ -3590,7 +3593,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
|
|||
int prior_packets;
|
||||
int prior_sacked = tp->sacked_out;
|
||||
int pkts_acked = 0;
|
||||
int newly_acked_sacked = 0;
|
||||
bool frto_cwnd = false;
|
||||
|
||||
/* If the ack is older than previous acks
|
||||
|
@ -3666,8 +3668,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
|
|||
flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una);
|
||||
|
||||
pkts_acked = prior_packets - tp->packets_out;
|
||||
newly_acked_sacked = (prior_packets - prior_sacked) -
|
||||
(tp->packets_out - tp->sacked_out);
|
||||
|
||||
if (tp->frto_counter)
|
||||
frto_cwnd = tcp_process_frto(sk, flag);
|
||||
|
@ -3681,7 +3681,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
|
|||
tcp_may_raise_cwnd(sk, flag))
|
||||
tcp_cong_avoid(sk, ack, prior_in_flight);
|
||||
is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
|
||||
tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked,
|
||||
tcp_fastretrans_alert(sk, pkts_acked, prior_sacked,
|
||||
is_dupack, flag);
|
||||
} else {
|
||||
if ((flag & FLAG_DATA_ACKED) && !frto_cwnd)
|
||||
|
@ -3698,7 +3698,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
|
|||
no_queue:
|
||||
/* If data was DSACKed, see if we can undo a cwnd reduction. */
|
||||
if (flag & FLAG_DSACKING_ACK)
|
||||
tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked,
|
||||
tcp_fastretrans_alert(sk, pkts_acked, prior_sacked,
|
||||
is_dupack, flag);
|
||||
/* If this ack opens up a zero window, clear backoff. It was
|
||||
* being used to time the probes, and is probably far higher than
|
||||
|
@ -3718,8 +3718,7 @@ old_ack:
|
|||
*/
|
||||
if (TCP_SKB_CB(skb)->sacked) {
|
||||
flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una);
|
||||
newly_acked_sacked = tp->sacked_out - prior_sacked;
|
||||
tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked,
|
||||
tcp_fastretrans_alert(sk, pkts_acked, prior_sacked,
|
||||
is_dupack, flag);
|
||||
}
|
||||
|
||||
|
|
|
@ -167,8 +167,6 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
|
|||
struct esp_data *esp = x->data;
|
||||
|
||||
/* skb is pure payload to encrypt */
|
||||
err = -ENOMEM;
|
||||
|
||||
aead = esp->aead;
|
||||
alen = crypto_aead_authsize(aead);
|
||||
|
||||
|
@ -203,8 +201,10 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
|
|||
}
|
||||
|
||||
tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen);
|
||||
if (!tmp)
|
||||
if (!tmp) {
|
||||
err = -ENOMEM;
|
||||
goto error;
|
||||
}
|
||||
|
||||
seqhi = esp_tmp_seqhi(tmp);
|
||||
iv = esp_tmp_iv(aead, tmp, seqhilen);
|
||||
|
|
|
@ -1347,11 +1347,10 @@ static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
|
|||
/* Remove from tunnel list */
|
||||
spin_lock_bh(&pn->l2tp_tunnel_list_lock);
|
||||
list_del_rcu(&tunnel->list);
|
||||
kfree_rcu(tunnel, rcu);
|
||||
spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
|
||||
synchronize_rcu();
|
||||
|
||||
atomic_dec(&l2tp_tunnel_count);
|
||||
kfree(tunnel);
|
||||
}
|
||||
|
||||
/* Create a socket for the tunnel, if one isn't set up by
|
||||
|
|
|
@ -163,6 +163,7 @@ struct l2tp_tunnel_cfg {
|
|||
|
||||
struct l2tp_tunnel {
|
||||
int magic; /* Should be L2TP_TUNNEL_MAGIC */
|
||||
struct rcu_head rcu;
|
||||
rwlock_t hlist_lock; /* protect session_hlist */
|
||||
struct hlist_head session_hlist[L2TP_HASH_SIZE];
|
||||
/* hashed list of sessions,
|
||||
|
|
|
@ -1811,37 +1811,31 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
|
|||
meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr,
|
||||
sdata, NULL, NULL);
|
||||
} else {
|
||||
int is_mesh_mcast = 1;
|
||||
const u8 *mesh_da;
|
||||
/* DS -> MBSS (802.11-2012 13.11.3.3).
|
||||
* For unicast with unknown forwarding information,
|
||||
* destination might be in the MBSS or if that fails
|
||||
* forwarded to another mesh gate. In either case
|
||||
* resolution will be handled in ieee80211_xmit(), so
|
||||
* leave the original DA. This also works for mcast */
|
||||
const u8 *mesh_da = skb->data;
|
||||
|
||||
if (mppath)
|
||||
mesh_da = mppath->mpp;
|
||||
else if (mpath)
|
||||
mesh_da = mpath->dst;
|
||||
rcu_read_unlock();
|
||||
|
||||
if (is_multicast_ether_addr(skb->data))
|
||||
/* DA TA mSA AE:SA */
|
||||
mesh_da = skb->data;
|
||||
else {
|
||||
static const u8 bcast[ETH_ALEN] =
|
||||
{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
|
||||
if (mppath) {
|
||||
/* RA TA mDA mSA AE:DA SA */
|
||||
mesh_da = mppath->mpp;
|
||||
is_mesh_mcast = 0;
|
||||
} else if (mpath) {
|
||||
mesh_da = mpath->dst;
|
||||
is_mesh_mcast = 0;
|
||||
} else {
|
||||
/* DA TA mSA AE:SA */
|
||||
mesh_da = bcast;
|
||||
}
|
||||
}
|
||||
hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc,
|
||||
mesh_da, sdata->vif.addr);
|
||||
rcu_read_unlock();
|
||||
if (is_mesh_mcast)
|
||||
if (is_multicast_ether_addr(mesh_da))
|
||||
/* DA TA mSA AE:SA */
|
||||
meshhdrlen =
|
||||
ieee80211_new_mesh_header(&mesh_hdr,
|
||||
sdata,
|
||||
skb->data + ETH_ALEN,
|
||||
NULL);
|
||||
else
|
||||
/* RA TA mDA mSA AE:DA SA */
|
||||
meshhdrlen =
|
||||
ieee80211_new_mesh_header(&mesh_hdr,
|
||||
sdata,
|
||||
|
|
|
@ -1171,8 +1171,10 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
|
|||
goto out_err;
|
||||
}
|
||||
svc->stats.cpustats = alloc_percpu(struct ip_vs_cpu_stats);
|
||||
if (!svc->stats.cpustats)
|
||||
if (!svc->stats.cpustats) {
|
||||
ret = -ENOMEM;
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
/* I'm the first user of the service */
|
||||
atomic_set(&svc->usecnt, 0);
|
||||
|
|
|
@ -249,12 +249,15 @@ static void death_by_event(unsigned long ul_conntrack)
|
|||
{
|
||||
struct nf_conn *ct = (void *)ul_conntrack;
|
||||
struct net *net = nf_ct_net(ct);
|
||||
struct nf_conntrack_ecache *ecache = nf_ct_ecache_find(ct);
|
||||
|
||||
BUG_ON(ecache == NULL);
|
||||
|
||||
if (nf_conntrack_event(IPCT_DESTROY, ct) < 0) {
|
||||
/* bad luck, let's retry again */
|
||||
ct->timeout.expires = jiffies +
|
||||
ecache->timeout.expires = jiffies +
|
||||
(random32() % net->ct.sysctl_events_retry_timeout);
|
||||
add_timer(&ct->timeout);
|
||||
add_timer(&ecache->timeout);
|
||||
return;
|
||||
}
|
||||
/* we've got the event delivered, now it's dying */
|
||||
|
@ -268,6 +271,9 @@ static void death_by_event(unsigned long ul_conntrack)
|
|||
void nf_ct_insert_dying_list(struct nf_conn *ct)
|
||||
{
|
||||
struct net *net = nf_ct_net(ct);
|
||||
struct nf_conntrack_ecache *ecache = nf_ct_ecache_find(ct);
|
||||
|
||||
BUG_ON(ecache == NULL);
|
||||
|
||||
/* add this conntrack to the dying list */
|
||||
spin_lock_bh(&nf_conntrack_lock);
|
||||
|
@ -275,10 +281,10 @@ void nf_ct_insert_dying_list(struct nf_conn *ct)
|
|||
&net->ct.dying);
|
||||
spin_unlock_bh(&nf_conntrack_lock);
|
||||
/* set a new timer to retry event delivery */
|
||||
setup_timer(&ct->timeout, death_by_event, (unsigned long)ct);
|
||||
ct->timeout.expires = jiffies +
|
||||
setup_timer(&ecache->timeout, death_by_event, (unsigned long)ct);
|
||||
ecache->timeout.expires = jiffies +
|
||||
(random32() % net->ct.sysctl_events_retry_timeout);
|
||||
add_timer(&ct->timeout);
|
||||
add_timer(&ecache->timeout);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nf_ct_insert_dying_list);
|
||||
|
||||
|
|
|
@ -2790,7 +2790,8 @@ static int __init ctnetlink_init(void)
|
|||
goto err_unreg_subsys;
|
||||
}
|
||||
|
||||
if (register_pernet_subsys(&ctnetlink_net_ops)) {
|
||||
ret = register_pernet_subsys(&ctnetlink_net_ops);
|
||||
if (ret < 0) {
|
||||
pr_err("ctnetlink_init: cannot register pernet operations\n");
|
||||
goto err_unreg_exp_subsys;
|
||||
}
|
||||
|
|
|
@ -480,7 +480,7 @@ __build_packet_message(struct nfulnl_instance *inst,
|
|||
}
|
||||
|
||||
if (indev && skb_mac_header_was_set(skb)) {
|
||||
if (nla_put_be32(inst->skb, NFULA_HWTYPE, htons(skb->dev->type)) ||
|
||||
if (nla_put_be16(inst->skb, NFULA_HWTYPE, htons(skb->dev->type)) ||
|
||||
nla_put_be16(inst->skb, NFULA_HWLEN,
|
||||
htons(skb->dev->hard_header_len)) ||
|
||||
nla_put(inst->skb, NFULA_HWHEADER, skb->dev->hard_header_len,
|
||||
|
@ -996,8 +996,10 @@ static int __init nfnetlink_log_init(void)
|
|||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
if (!proc_create("nfnetlink_log", 0440,
|
||||
proc_net_netfilter, &nful_file_ops))
|
||||
proc_net_netfilter, &nful_file_ops)) {
|
||||
status = -ENOMEM;
|
||||
goto cleanup_logger;
|
||||
}
|
||||
#endif
|
||||
return status;
|
||||
|
||||
|
|
|
@ -1373,7 +1373,8 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
|
|||
dst_pid = addr->nl_pid;
|
||||
dst_group = ffs(addr->nl_groups);
|
||||
err = -EPERM;
|
||||
if (dst_group && !netlink_capable(sock, NL_NONROOT_SEND))
|
||||
if ((dst_group || dst_pid) &&
|
||||
!netlink_capable(sock, NL_NONROOT_SEND))
|
||||
goto out;
|
||||
} else {
|
||||
dst_pid = nlk->dst_pid;
|
||||
|
@ -2147,6 +2148,7 @@ static void __init netlink_add_usersock_entry(void)
|
|||
rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners);
|
||||
nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
|
||||
nl_table[NETLINK_USERSOCK].registered = 1;
|
||||
nl_table[NETLINK_USERSOCK].nl_nonroot = NL_NONROOT_SEND;
|
||||
|
||||
netlink_table_ungrab();
|
||||
}
|
||||
|
|
|
@ -1273,7 +1273,7 @@ static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
|
|||
spin_unlock(&f->lock);
|
||||
}
|
||||
|
||||
bool match_fanout_group(struct packet_type *ptype, struct sock * sk)
|
||||
static bool match_fanout_group(struct packet_type *ptype, struct sock * sk)
|
||||
{
|
||||
if (ptype->af_packet_priv == (void*)((struct packet_sock *)sk)->fanout)
|
||||
return true;
|
||||
|
|
|
@ -1994,8 +1994,10 @@ int __xfrm_init_state(struct xfrm_state *x, bool init_replay)
|
|||
goto error;
|
||||
|
||||
x->outer_mode = xfrm_get_mode(x->props.mode, family);
|
||||
if (x->outer_mode == NULL)
|
||||
if (x->outer_mode == NULL) {
|
||||
err = -EPROTONOSUPPORT;
|
||||
goto error;
|
||||
}
|
||||
|
||||
if (init_replay) {
|
||||
err = xfrm_init_replay(x);
|
||||
|
|
Loading…
Reference in New Issue