Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

 1) NLA_PUT* --> nla_put_* conversion got one case wrong in
    nfnetlink_log, fix from Patrick McHardy.

 2) Missed error return check in ipw2100 driver, from Julia Lawall.

 3) PMTU updates in ipv4 were setting the expiry time incorrectly, fix
    from Eric Dumazet.

 4) SFC driver erroneously reversed src and dst when reporting filters
    via ethtool.

 5) Memory leak in CAN protocol and wrong setting of IRQF_SHARED in
    sja1000 can platform driver, from Alexey Khoroshilov and Sven
    Schmitt.

 6) Fix multicast traffic scaling regression in ipv4_dst_destroy, only
    take the lock when we really need to.  From Eric Dumazet.

 7) Fix non-root process spoofing in netlink, from Pablo Neira Ayuso.

 8) CWND reduction in TCP is done incorrectly during non-SACK recovery,
    fix from Yuchung Cheng.

 9) Revert netpoll change, and fix what was actually a driver specific
    problem.  From Amerigo Wang.  This should cure bootup hangs with
    netconsole some people reported.

10) Fix xen-netfront invoking __skb_fill_page_desc() with a NULL page
    pointer.  From Ian Campbell.

11) SIP NAT fix for expectiontation creation, from Pablo Neira Ayuso.

12) __ip_rt_update_pmtu() needs RCU locking, from Eric Dumazet.

13) Fix usbnet deadlock on resume, can't use GFP_KERNEL in this
    situation.  From Oliver Neukum.

14) The davinci ethernet driver triggers an OOPS on removal because it
    frees an MDIO object before unregistering it.  Fix from Bin Liu.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (41 commits)
  net: qmi_wwan: add several new Gobi devices
  fddi: 64 bit bug in smt_add_para()
  net: ethernet: fix kernel OOPS when remove davinci_mdio module
  net/xfrm/xfrm_state.c: fix error return code
  net: ipv6: fix error return code
  net: qmi_wwan: new device: Foxconn/Novatel E396
  usbnet: fix deadlock in resume
  cs89x0 : packet reception not working
  netfilter: nf_conntrack: fix racy timer handling with reliable events
  bnx2x: Correct the ndo_poll_controller call
  bnx2x: Move netif_napi_add to the open call
  ipv4: must use rcu protection while calling fib_lookup
  bnx2x: fix 57840_MF pci id
  net: ipv4: ipmr_expire_timer causes crash when removing net namespace
  e1000e: DoS while TSO enabled caused by link partner with small MSS
  l2tp: avoid to use synchronize_rcu in tunnel free function
  gianfar: fix default tx vlan offload feature flag
  netfilter: nf_nat_sip: fix incorrect handling of EBUSY for RTCP expectation
  xen-netfront: use __pskb_pull_tail to ensure linear area is big enough on RX
  netfilter: nfnetlink_log: fix error return code in init path
  ...
This commit is contained in:
Linus Torvalds 2012-09-02 11:28:00 -07:00
commit 0b1a34c992
55 changed files with 232 additions and 171 deletions

View File

@ -109,7 +109,9 @@ static int sp_probe(struct platform_device *pdev)
priv = netdev_priv(dev); priv = netdev_priv(dev);
dev->irq = res_irq->start; dev->irq = res_irq->start;
priv->irq_flags = res_irq->flags & (IRQF_TRIGGER_MASK | IRQF_SHARED); priv->irq_flags = res_irq->flags & IRQF_TRIGGER_MASK;
if (res_irq->flags & IORESOURCE_IRQ_SHAREABLE)
priv->irq_flags |= IRQF_SHARED;
priv->reg_base = addr; priv->reg_base = addr;
/* The CAN clock frequency is half the oscillator clock frequency */ /* The CAN clock frequency is half the oscillator clock frequency */
priv->can.clock.freq = pdata->osc_freq / 2; priv->can.clock.freq = pdata->osc_freq / 2;

View File

@ -150,7 +150,7 @@ int softing_load_fw(const char *file, struct softing *card,
const uint8_t *mem, *end, *dat; const uint8_t *mem, *end, *dat;
uint16_t type, len; uint16_t type, len;
uint32_t addr; uint32_t addr;
uint8_t *buf = NULL; uint8_t *buf = NULL, *new_buf;
int buflen = 0; int buflen = 0;
int8_t type_end = 0; int8_t type_end = 0;
@ -199,11 +199,12 @@ int softing_load_fw(const char *file, struct softing *card,
if (len > buflen) { if (len > buflen) {
/* align buflen */ /* align buflen */
buflen = (len + (1024-1)) & ~(1024-1); buflen = (len + (1024-1)) & ~(1024-1);
buf = krealloc(buf, buflen, GFP_KERNEL); new_buf = krealloc(buf, buflen, GFP_KERNEL);
if (!buf) { if (!new_buf) {
ret = -ENOMEM; ret = -ENOMEM;
goto failed; goto failed;
} }
buf = new_buf;
} }
/* verify record data */ /* verify record data */
memcpy_fromio(buf, &dpram[addr + offset], len); memcpy_fromio(buf, &dpram[addr + offset], len);

View File

@ -1708,9 +1708,6 @@ struct bnx2x_func_init_params {
continue; \ continue; \
else else
#define for_each_napi_rx_queue(bp, var) \
for ((var) = 0; (var) < bp->num_napi_queues; (var)++)
/* Skip OOO FP */ /* Skip OOO FP */
#define for_each_tx_queue(bp, var) \ #define for_each_tx_queue(bp, var) \
for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \ for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \

View File

@ -2046,6 +2046,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
*/ */
bnx2x_setup_tc(bp->dev, bp->max_cos); bnx2x_setup_tc(bp->dev, bp->max_cos);
/* Add all NAPI objects */
bnx2x_add_all_napi(bp);
bnx2x_napi_enable(bp); bnx2x_napi_enable(bp);
/* set pf load just before approaching the MCP */ /* set pf load just before approaching the MCP */
@ -2408,6 +2410,8 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
/* Disable HW interrupts, NAPI */ /* Disable HW interrupts, NAPI */
bnx2x_netif_stop(bp, 1); bnx2x_netif_stop(bp, 1);
/* Delete all NAPI objects */
bnx2x_del_all_napi(bp);
/* Release IRQs */ /* Release IRQs */
bnx2x_free_irq(bp); bnx2x_free_irq(bp);

View File

@ -792,7 +792,7 @@ static inline void bnx2x_add_all_napi(struct bnx2x *bp)
bp->num_napi_queues = bp->num_queues; bp->num_napi_queues = bp->num_queues;
/* Add NAPI objects */ /* Add NAPI objects */
for_each_napi_rx_queue(bp, i) for_each_rx_queue(bp, i)
netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
bnx2x_poll, BNX2X_NAPI_WEIGHT); bnx2x_poll, BNX2X_NAPI_WEIGHT);
} }
@ -801,7 +801,7 @@ static inline void bnx2x_del_all_napi(struct bnx2x *bp)
{ {
int i; int i;
for_each_napi_rx_queue(bp, i) for_each_rx_queue(bp, i)
netif_napi_del(&bnx2x_fp(bp, i, napi)); netif_napi_del(&bnx2x_fp(bp, i, napi));
} }

View File

@ -2888,11 +2888,9 @@ static void bnx2x_get_channels(struct net_device *dev,
*/ */
static void bnx2x_change_num_queues(struct bnx2x *bp, int num_rss) static void bnx2x_change_num_queues(struct bnx2x *bp, int num_rss)
{ {
bnx2x_del_all_napi(bp);
bnx2x_disable_msi(bp); bnx2x_disable_msi(bp);
BNX2X_NUM_QUEUES(bp) = num_rss + NON_ETH_CONTEXT_USE; BNX2X_NUM_QUEUES(bp) = num_rss + NON_ETH_CONTEXT_USE;
bnx2x_set_int_mode(bp); bnx2x_set_int_mode(bp);
bnx2x_add_all_napi(bp);
} }
/** /**

View File

@ -8427,6 +8427,8 @@ unload_error:
/* Disable HW interrupts, NAPI */ /* Disable HW interrupts, NAPI */
bnx2x_netif_stop(bp, 1); bnx2x_netif_stop(bp, 1);
/* Delete all NAPI objects */
bnx2x_del_all_napi(bp);
/* Release IRQs */ /* Release IRQs */
bnx2x_free_irq(bp); bnx2x_free_irq(bp);
@ -11229,10 +11231,12 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
static void poll_bnx2x(struct net_device *dev) static void poll_bnx2x(struct net_device *dev)
{ {
struct bnx2x *bp = netdev_priv(dev); struct bnx2x *bp = netdev_priv(dev);
int i;
disable_irq(bp->pdev->irq); for_each_eth_queue(bp, i) {
bnx2x_interrupt(bp->pdev->irq, dev); struct bnx2x_fastpath *fp = &bp->fp[i];
enable_irq(bp->pdev->irq); napi_schedule(&bnx2x_fp(bp, fp->index, napi));
}
} }
#endif #endif
@ -11899,9 +11903,6 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
*/ */
bnx2x_set_int_mode(bp); bnx2x_set_int_mode(bp);
/* Add all NAPI objects */
bnx2x_add_all_napi(bp);
rc = register_netdev(dev); rc = register_netdev(dev);
if (rc) { if (rc) {
dev_err(&pdev->dev, "Cannot register net device\n"); dev_err(&pdev->dev, "Cannot register net device\n");
@ -11976,9 +11977,6 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
unregister_netdev(dev); unregister_netdev(dev);
/* Delete all NAPI objects */
bnx2x_del_all_napi(bp);
/* Power on: we can't let PCI layer write to us while we are in D3 */ /* Power on: we can't let PCI layer write to us while we are in D3 */
bnx2x_set_power_state(bp, PCI_D0); bnx2x_set_power_state(bp, PCI_D0);
@ -12025,6 +12023,8 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
bnx2x_tx_disable(bp); bnx2x_tx_disable(bp);
bnx2x_netif_stop(bp, 0); bnx2x_netif_stop(bp, 0);
/* Delete all NAPI objects */
bnx2x_del_all_napi(bp);
del_timer_sync(&bp->timer); del_timer_sync(&bp->timer);

View File

@ -1243,6 +1243,7 @@ static void set_multicast_list(struct net_device *dev)
{ {
struct net_local *lp = netdev_priv(dev); struct net_local *lp = netdev_priv(dev);
unsigned long flags; unsigned long flags;
u16 cfg;
spin_lock_irqsave(&lp->lock, flags); spin_lock_irqsave(&lp->lock, flags);
if (dev->flags & IFF_PROMISC) if (dev->flags & IFF_PROMISC)
@ -1260,11 +1261,10 @@ static void set_multicast_list(struct net_device *dev)
/* in promiscuous mode, we accept errored packets, /* in promiscuous mode, we accept errored packets,
* so we have to enable interrupts on them also * so we have to enable interrupts on them also
*/ */
writereg(dev, PP_RxCFG, cfg = lp->curr_rx_cfg;
(lp->curr_rx_cfg | if (lp->rx_mode == RX_ALL_ACCEPT)
(lp->rx_mode == RX_ALL_ACCEPT) cfg |= RX_CRC_ERROR_ENBL | RX_RUNT_ENBL | RX_EXTRA_DATA_ENBL;
? (RX_CRC_ERROR_ENBL | RX_RUNT_ENBL | RX_EXTRA_DATA_ENBL) writereg(dev, PP_RxCFG, cfg);
: 0));
spin_unlock_irqrestore(&lp->lock, flags); spin_unlock_irqrestore(&lp->lock, flags);
} }

View File

@ -259,7 +259,7 @@ int be_process_mcc(struct be_adapter *adapter)
int num = 0, status = 0; int num = 0, status = 0;
struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
spin_lock_bh(&adapter->mcc_cq_lock); spin_lock(&adapter->mcc_cq_lock);
while ((compl = be_mcc_compl_get(adapter))) { while ((compl = be_mcc_compl_get(adapter))) {
if (compl->flags & CQE_FLAGS_ASYNC_MASK) { if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
/* Interpret flags as an async trailer */ /* Interpret flags as an async trailer */
@ -280,7 +280,7 @@ int be_process_mcc(struct be_adapter *adapter)
if (num) if (num)
be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num); be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num);
spin_unlock_bh(&adapter->mcc_cq_lock); spin_unlock(&adapter->mcc_cq_lock);
return status; return status;
} }
@ -295,7 +295,9 @@ static int be_mcc_wait_compl(struct be_adapter *adapter)
if (be_error(adapter)) if (be_error(adapter))
return -EIO; return -EIO;
local_bh_disable();
status = be_process_mcc(adapter); status = be_process_mcc(adapter);
local_bh_enable();
if (atomic_read(&mcc_obj->q.used) == 0) if (atomic_read(&mcc_obj->q.used) == 0)
break; break;

View File

@ -3763,7 +3763,9 @@ static void be_worker(struct work_struct *work)
/* when interrupts are not yet enabled, just reap any pending /* when interrupts are not yet enabled, just reap any pending
* mcc completions */ * mcc completions */
if (!netif_running(adapter->netdev)) { if (!netif_running(adapter->netdev)) {
local_bh_disable();
be_process_mcc(adapter); be_process_mcc(adapter);
local_bh_enable();
goto reschedule; goto reschedule;
} }

View File

@ -1041,7 +1041,7 @@ static int gfar_probe(struct platform_device *ofdev)
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) { if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; dev->features |= NETIF_F_HW_VLAN_RX;
} }
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) { if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {

View File

@ -310,6 +310,7 @@ struct e1000_adapter {
*/ */
struct e1000_ring *tx_ring /* One per active queue */ struct e1000_ring *tx_ring /* One per active queue */
____cacheline_aligned_in_smp; ____cacheline_aligned_in_smp;
u32 tx_fifo_limit;
struct napi_struct napi; struct napi_struct napi;

View File

@ -3516,6 +3516,15 @@ void e1000e_reset(struct e1000_adapter *adapter)
break; break;
} }
/*
* Alignment of Tx data is on an arbitrary byte boundary with the
* maximum size per Tx descriptor limited only to the transmit
* allocation of the packet buffer minus 96 bytes with an upper
* limit of 24KB due to receive synchronization limitations.
*/
adapter->tx_fifo_limit = min_t(u32, ((er32(PBA) >> 16) << 10) - 96,
24 << 10);
/* /*
* Disable Adaptive Interrupt Moderation if 2 full packets cannot * Disable Adaptive Interrupt Moderation if 2 full packets cannot
* fit in receive buffer. * fit in receive buffer.
@ -4785,12 +4794,9 @@ static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb)
return 1; return 1;
} }
#define E1000_MAX_PER_TXD 8192
#define E1000_MAX_TXD_PWR 12
static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb, static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb,
unsigned int first, unsigned int max_per_txd, unsigned int first, unsigned int max_per_txd,
unsigned int nr_frags, unsigned int mss) unsigned int nr_frags)
{ {
struct e1000_adapter *adapter = tx_ring->adapter; struct e1000_adapter *adapter = tx_ring->adapter;
struct pci_dev *pdev = adapter->pdev; struct pci_dev *pdev = adapter->pdev;
@ -5023,20 +5029,19 @@ static int __e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size)
static int e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size) static int e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size)
{ {
BUG_ON(size > tx_ring->count);
if (e1000_desc_unused(tx_ring) >= size) if (e1000_desc_unused(tx_ring) >= size)
return 0; return 0;
return __e1000_maybe_stop_tx(tx_ring, size); return __e1000_maybe_stop_tx(tx_ring, size);
} }
#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1)
static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
struct net_device *netdev) struct net_device *netdev)
{ {
struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_ring *tx_ring = adapter->tx_ring; struct e1000_ring *tx_ring = adapter->tx_ring;
unsigned int first; unsigned int first;
unsigned int max_per_txd = E1000_MAX_PER_TXD;
unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
unsigned int tx_flags = 0; unsigned int tx_flags = 0;
unsigned int len = skb_headlen(skb); unsigned int len = skb_headlen(skb);
unsigned int nr_frags; unsigned int nr_frags;
@ -5056,18 +5061,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
} }
mss = skb_shinfo(skb)->gso_size; mss = skb_shinfo(skb)->gso_size;
/*
* The controller does a simple calculation to
* make sure there is enough room in the FIFO before
* initiating the DMA for each buffer. The calc is:
* 4 = ceil(buffer len/mss). To make sure we don't
* overrun the FIFO, adjust the max buffer len if mss
* drops.
*/
if (mss) { if (mss) {
u8 hdr_len; u8 hdr_len;
max_per_txd = min(mss << 2, max_per_txd);
max_txd_pwr = fls(max_per_txd) - 1;
/* /*
* TSO Workaround for 82571/2/3 Controllers -- if skb->data * TSO Workaround for 82571/2/3 Controllers -- if skb->data
@ -5097,12 +5092,12 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
count++; count++;
count++; count++;
count += TXD_USE_COUNT(len, max_txd_pwr); count += DIV_ROUND_UP(len, adapter->tx_fifo_limit);
nr_frags = skb_shinfo(skb)->nr_frags; nr_frags = skb_shinfo(skb)->nr_frags;
for (f = 0; f < nr_frags; f++) for (f = 0; f < nr_frags; f++)
count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]), count += DIV_ROUND_UP(skb_frag_size(&skb_shinfo(skb)->frags[f]),
max_txd_pwr); adapter->tx_fifo_limit);
if (adapter->hw.mac.tx_pkt_filtering) if (adapter->hw.mac.tx_pkt_filtering)
e1000_transfer_dhcp_info(adapter, skb); e1000_transfer_dhcp_info(adapter, skb);
@ -5144,15 +5139,18 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
tx_flags |= E1000_TX_FLAGS_NO_FCS; tx_flags |= E1000_TX_FLAGS_NO_FCS;
/* if count is 0 then mapping error has occurred */ /* if count is 0 then mapping error has occurred */
count = e1000_tx_map(tx_ring, skb, first, max_per_txd, nr_frags, mss); count = e1000_tx_map(tx_ring, skb, first, adapter->tx_fifo_limit,
nr_frags);
if (count) { if (count) {
skb_tx_timestamp(skb); skb_tx_timestamp(skb);
netdev_sent_queue(netdev, skb->len); netdev_sent_queue(netdev, skb->len);
e1000_tx_queue(tx_ring, tx_flags, count); e1000_tx_queue(tx_ring, tx_flags, count);
/* Make sure there is space in the ring for the next send. */ /* Make sure there is space in the ring for the next send. */
e1000_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 2); e1000_maybe_stop_tx(tx_ring,
(MAX_SKB_FRAGS *
DIV_ROUND_UP(PAGE_SIZE,
adapter->tx_fifo_limit) + 2));
} else { } else {
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
tx_ring->buffer_info[first].time_stamp = 0; tx_ring->buffer_info[first].time_stamp = 0;
@ -6327,8 +6325,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
adapter->hw.phy.autoneg_advertised = 0x2f; adapter->hw.phy.autoneg_advertised = 0x2f;
/* ring size defaults */ /* ring size defaults */
adapter->rx_ring->count = 256; adapter->rx_ring->count = E1000_DEFAULT_RXD;
adapter->tx_ring->count = 256; adapter->tx_ring->count = E1000_DEFAULT_TXD;
/* /*
* Initial Wake on LAN setting - If APM wake is enabled in * Initial Wake on LAN setting - If APM wake is enabled in

View File

@ -863,8 +863,8 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx,
&ip_entry->ip4dst, &ip_entry->pdst); &ip_entry->ip4dst, &ip_entry->pdst);
if (rc != 0) { if (rc != 0) {
rc = efx_filter_get_ipv4_full( rc = efx_filter_get_ipv4_full(
&spec, &proto, &ip_entry->ip4src, &ip_entry->psrc, &spec, &proto, &ip_entry->ip4dst, &ip_entry->pdst,
&ip_entry->ip4dst, &ip_entry->pdst); &ip_entry->ip4src, &ip_entry->psrc);
EFX_WARN_ON_PARANOID(rc); EFX_WARN_ON_PARANOID(rc);
ip_mask->ip4src = ~0; ip_mask->ip4src = ~0;
ip_mask->psrc = ~0; ip_mask->psrc = ~0;

View File

@ -22,6 +22,9 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/ *******************************************************************************/
#ifndef __COMMON_H__
#define __COMMON_H__
#include <linux/etherdevice.h> #include <linux/etherdevice.h>
#include <linux/netdevice.h> #include <linux/netdevice.h>
#include <linux/phy.h> #include <linux/phy.h>
@ -366,3 +369,5 @@ extern void stmmac_set_mac(void __iomem *ioaddr, bool enable);
extern void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr); extern void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
extern const struct stmmac_ring_mode_ops ring_mode_ops; extern const struct stmmac_ring_mode_ops ring_mode_ops;
#endif /* __COMMON_H__ */

View File

@ -20,6 +20,10 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/ *******************************************************************************/
#ifndef __DESCS_H__
#define __DESCS_H__
struct dma_desc { struct dma_desc {
/* Receive descriptor */ /* Receive descriptor */
union { union {
@ -166,3 +170,5 @@ enum tdes_csum_insertion {
* is not calculated */ * is not calculated */
cic_full = 3, /* IP header and pseudoheader */ cic_full = 3, /* IP header and pseudoheader */
}; };
#endif /* __DESCS_H__ */

View File

@ -27,6 +27,9 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/ *******************************************************************************/
#ifndef __DESC_COM_H__
#define __DESC_COM_H__
#if defined(CONFIG_STMMAC_RING) #if defined(CONFIG_STMMAC_RING)
static inline void ehn_desc_rx_set_on_ring_chain(struct dma_desc *p, int end) static inline void ehn_desc_rx_set_on_ring_chain(struct dma_desc *p, int end)
{ {
@ -124,3 +127,5 @@ static inline void norm_set_tx_desc_len(struct dma_desc *p, int len)
p->des01.tx.buffer1_size = len; p->des01.tx.buffer1_size = len;
} }
#endif #endif
#endif /* __DESC_COM_H__ */

View File

@ -22,6 +22,9 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/ *******************************************************************************/
#ifndef __DWMAC100_H__
#define __DWMAC100_H__
#include <linux/phy.h> #include <linux/phy.h>
#include "common.h" #include "common.h"
@ -119,3 +122,5 @@ enum ttc_control {
#define DMA_MISSED_FRAME_M_CNTR 0x0000ffff /* Missed Frame Couinter */ #define DMA_MISSED_FRAME_M_CNTR 0x0000ffff /* Missed Frame Couinter */
extern const struct stmmac_dma_ops dwmac100_dma_ops; extern const struct stmmac_dma_ops dwmac100_dma_ops;
#endif /* __DWMAC100_H__ */

View File

@ -19,6 +19,8 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/ *******************************************************************************/
#ifndef __DWMAC1000_H__
#define __DWMAC1000_H__
#include <linux/phy.h> #include <linux/phy.h>
#include "common.h" #include "common.h"
@ -229,6 +231,7 @@ enum rtc_control {
#define GMAC_MMC_RX_CSUM_OFFLOAD 0x208 #define GMAC_MMC_RX_CSUM_OFFLOAD 0x208
/* Synopsys Core versions */ /* Synopsys Core versions */
#define DWMAC_CORE_3_40 34 #define DWMAC_CORE_3_40 0x34
extern const struct stmmac_dma_ops dwmac1000_dma_ops; extern const struct stmmac_dma_ops dwmac1000_dma_ops;
#endif /* __DWMAC1000_H__ */

View File

@ -22,6 +22,9 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/ *******************************************************************************/
#ifndef __DWMAC_DMA_H__
#define __DWMAC_DMA_H__
/* DMA CRS Control and Status Register Mapping */ /* DMA CRS Control and Status Register Mapping */
#define DMA_BUS_MODE 0x00001000 /* Bus Mode */ #define DMA_BUS_MODE 0x00001000 /* Bus Mode */
#define DMA_XMT_POLL_DEMAND 0x00001004 /* Transmit Poll Demand */ #define DMA_XMT_POLL_DEMAND 0x00001004 /* Transmit Poll Demand */
@ -109,3 +112,5 @@ extern void dwmac_dma_start_rx(void __iomem *ioaddr);
extern void dwmac_dma_stop_rx(void __iomem *ioaddr); extern void dwmac_dma_stop_rx(void __iomem *ioaddr);
extern int dwmac_dma_interrupt(void __iomem *ioaddr, extern int dwmac_dma_interrupt(void __iomem *ioaddr,
struct stmmac_extra_stats *x); struct stmmac_extra_stats *x);
#endif /* __DWMAC_DMA_H__ */

View File

@ -22,6 +22,9 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/ *******************************************************************************/
#ifndef __MMC_H__
#define __MMC_H__
/* MMC control register */ /* MMC control register */
/* When set, all counter are reset */ /* When set, all counter are reset */
#define MMC_CNTRL_COUNTER_RESET 0x1 #define MMC_CNTRL_COUNTER_RESET 0x1
@ -129,3 +132,5 @@ struct stmmac_counters {
extern void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode); extern void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode);
extern void dwmac_mmc_intr_all_mask(void __iomem *ioaddr); extern void dwmac_mmc_intr_all_mask(void __iomem *ioaddr);
extern void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc); extern void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc);
#endif /* __MMC_H__ */

View File

@ -33,7 +33,7 @@
#define MMC_TX_INTR 0x00000108 /* MMC TX Interrupt */ #define MMC_TX_INTR 0x00000108 /* MMC TX Interrupt */
#define MMC_RX_INTR_MASK 0x0000010c /* MMC Interrupt Mask */ #define MMC_RX_INTR_MASK 0x0000010c /* MMC Interrupt Mask */
#define MMC_TX_INTR_MASK 0x00000110 /* MMC Interrupt Mask */ #define MMC_TX_INTR_MASK 0x00000110 /* MMC Interrupt Mask */
#define MMC_DEFAUL_MASK 0xffffffff #define MMC_DEFAULT_MASK 0xffffffff
/* MMC TX counter registers */ /* MMC TX counter registers */
@ -147,8 +147,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
/* To mask all all interrupts.*/ /* To mask all all interrupts.*/
void dwmac_mmc_intr_all_mask(void __iomem *ioaddr) void dwmac_mmc_intr_all_mask(void __iomem *ioaddr)
{ {
writel(MMC_DEFAUL_MASK, ioaddr + MMC_RX_INTR_MASK); writel(MMC_DEFAULT_MASK, ioaddr + MMC_RX_INTR_MASK);
writel(MMC_DEFAUL_MASK, ioaddr + MMC_TX_INTR_MASK); writel(MMC_DEFAULT_MASK, ioaddr + MMC_TX_INTR_MASK);
} }
/* This reads the MAC core counters (if actaully supported). /* This reads the MAC core counters (if actaully supported).

View File

@ -20,6 +20,9 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/ *******************************************************************************/
#ifndef __STMMAC_H__
#define __STMMAC_H__
#define STMMAC_RESOURCE_NAME "stmmaceth" #define STMMAC_RESOURCE_NAME "stmmaceth"
#define DRV_MODULE_VERSION "March_2012" #define DRV_MODULE_VERSION "March_2012"
@ -166,3 +169,5 @@ static inline void stmmac_unregister_pci(void)
{ {
} }
#endif /* CONFIG_STMMAC_PCI */ #endif /* CONFIG_STMMAC_PCI */
#endif /* __STMMAC_H__ */

View File

@ -21,6 +21,8 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/ *******************************************************************************/
#ifndef __STMMAC_TIMER_H__
#define __STMMAC_TIMER_H__
struct stmmac_timer { struct stmmac_timer {
void (*timer_start) (unsigned int new_freq); void (*timer_start) (unsigned int new_freq);
@ -40,3 +42,5 @@ void stmmac_schedule(struct net_device *dev);
extern int tmu2_register_user(void *fnt, void *data); extern int tmu2_register_user(void *fnt, void *data);
extern void tmu2_unregister_user(void); extern void tmu2_unregister_user(void);
#endif #endif
#endif /* __STMMAC_TIMER_H__ */

View File

@ -394,8 +394,10 @@ static int __devexit davinci_mdio_remove(struct platform_device *pdev)
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
struct davinci_mdio_data *data = dev_get_drvdata(dev); struct davinci_mdio_data *data = dev_get_drvdata(dev);
if (data->bus) if (data->bus) {
mdiobus_unregister(data->bus);
mdiobus_free(data->bus); mdiobus_free(data->bus);
}
if (data->clk) if (data->clk)
clk_put(data->clk); clk_put(data->clk);

View File

@ -673,7 +673,7 @@ void smt_add_para(struct s_smc *smc, struct s_pcon *pcon, u_short para,
sm_pm_get_ls(smc,port_to_mib(smc,port))) ; sm_pm_get_ls(smc,port_to_mib(smc,port))) ;
break ; break ;
case SMT_P_REASON : case SMT_P_REASON :
* (u_long *) to = 0 ; *(u32 *)to = 0 ;
sp_len = 4 ; sp_len = 4 ;
goto sp_done ; goto sp_done ;
case SMT_P1033 : /* time stamp */ case SMT_P1033 : /* time stamp */

View File

@ -413,7 +413,9 @@ static const struct usb_device_id products[] = {
/* 5. Gobi 2000 and 3000 devices */ /* 5. Gobi 2000 and 3000 devices */
{QMI_GOBI_DEVICE(0x413c, 0x8186)}, /* Dell Gobi 2000 Modem device (N0218, VU936) */ {QMI_GOBI_DEVICE(0x413c, 0x8186)}, /* Dell Gobi 2000 Modem device (N0218, VU936) */
{QMI_GOBI_DEVICE(0x413c, 0x8194)}, /* Dell Gobi 3000 Composite */
{QMI_GOBI_DEVICE(0x05c6, 0x920b)}, /* Generic Gobi 2000 Modem device */ {QMI_GOBI_DEVICE(0x05c6, 0x920b)}, /* Generic Gobi 2000 Modem device */
{QMI_GOBI_DEVICE(0x05c6, 0x920d)}, /* Gobi 3000 Composite */
{QMI_GOBI_DEVICE(0x05c6, 0x9225)}, /* Sony Gobi 2000 Modem device (N0279, VU730) */ {QMI_GOBI_DEVICE(0x05c6, 0x9225)}, /* Sony Gobi 2000 Modem device (N0279, VU730) */
{QMI_GOBI_DEVICE(0x05c6, 0x9245)}, /* Samsung Gobi 2000 Modem device (VL176) */ {QMI_GOBI_DEVICE(0x05c6, 0x9245)}, /* Samsung Gobi 2000 Modem device (VL176) */
{QMI_GOBI_DEVICE(0x03f0, 0x251d)}, /* HP Gobi 2000 Modem device (VP412) */ {QMI_GOBI_DEVICE(0x03f0, 0x251d)}, /* HP Gobi 2000 Modem device (VP412) */
@ -441,6 +443,8 @@ static const struct usb_device_id products[] = {
{QMI_GOBI_DEVICE(0x1199, 0x9015)}, /* Sierra Wireless Gobi 3000 Modem device */ {QMI_GOBI_DEVICE(0x1199, 0x9015)}, /* Sierra Wireless Gobi 3000 Modem device */
{QMI_GOBI_DEVICE(0x1199, 0x9019)}, /* Sierra Wireless Gobi 3000 Modem device */ {QMI_GOBI_DEVICE(0x1199, 0x9019)}, /* Sierra Wireless Gobi 3000 Modem device */
{QMI_GOBI_DEVICE(0x1199, 0x901b)}, /* Sierra Wireless MC7770 */ {QMI_GOBI_DEVICE(0x1199, 0x901b)}, /* Sierra Wireless MC7770 */
{QMI_GOBI_DEVICE(0x12d1, 0x14f1)}, /* Sony Gobi 3000 Composite */
{QMI_GOBI_DEVICE(0x1410, 0xa021)}, /* Foxconn Gobi 3000 Modem device (Novatel E396) */
{ } /* END */ { } /* END */
}; };

View File

@ -1573,7 +1573,7 @@ int usbnet_resume (struct usb_interface *intf)
netif_device_present(dev->net) && netif_device_present(dev->net) &&
!timer_pending(&dev->delay) && !timer_pending(&dev->delay) &&
!test_bit(EVENT_RX_HALT, &dev->flags)) !test_bit(EVENT_RX_HALT, &dev->flags))
rx_alloc_submit(dev, GFP_KERNEL); rx_alloc_submit(dev, GFP_NOIO);
if (!(dev->txq.qlen >= TX_QLEN(dev))) if (!(dev->txq.qlen >= TX_QLEN(dev)))
netif_tx_wake_all_queues(dev->net); netif_tx_wake_all_queues(dev->net);

View File

@ -1482,7 +1482,7 @@ ath5k_eeprom_read_target_rate_pwr_info(struct ath5k_hw *ah, unsigned int mode)
case AR5K_EEPROM_MODE_11A: case AR5K_EEPROM_MODE_11A:
offset += AR5K_EEPROM_TARGET_PWR_OFF_11A(ee->ee_version); offset += AR5K_EEPROM_TARGET_PWR_OFF_11A(ee->ee_version);
rate_pcal_info = ee->ee_rate_tpwr_a; rate_pcal_info = ee->ee_rate_tpwr_a;
ee->ee_rate_target_pwr_num[mode] = AR5K_EEPROM_N_5GHZ_CHAN; ee->ee_rate_target_pwr_num[mode] = AR5K_EEPROM_N_5GHZ_RATE_CHAN;
break; break;
case AR5K_EEPROM_MODE_11B: case AR5K_EEPROM_MODE_11B:
offset += AR5K_EEPROM_TARGET_PWR_OFF_11B(ee->ee_version); offset += AR5K_EEPROM_TARGET_PWR_OFF_11B(ee->ee_version);

View File

@ -182,6 +182,7 @@
#define AR5K_EEPROM_EEP_DELTA 10 #define AR5K_EEPROM_EEP_DELTA 10
#define AR5K_EEPROM_N_MODES 3 #define AR5K_EEPROM_N_MODES 3
#define AR5K_EEPROM_N_5GHZ_CHAN 10 #define AR5K_EEPROM_N_5GHZ_CHAN 10
#define AR5K_EEPROM_N_5GHZ_RATE_CHAN 8
#define AR5K_EEPROM_N_2GHZ_CHAN 3 #define AR5K_EEPROM_N_2GHZ_CHAN 3
#define AR5K_EEPROM_N_2GHZ_CHAN_2413 4 #define AR5K_EEPROM_N_2GHZ_CHAN_2413 4
#define AR5K_EEPROM_N_2GHZ_CHAN_MAX 4 #define AR5K_EEPROM_N_2GHZ_CHAN_MAX 4

View File

@ -1233,6 +1233,9 @@ uint brcms_reset(struct brcms_info *wl)
/* dpc will not be rescheduled */ /* dpc will not be rescheduled */
wl->resched = false; wl->resched = false;
/* inform publicly that interface is down */
wl->pub->up = false;
return 0; return 0;
} }

View File

@ -2042,7 +2042,8 @@ static void isr_indicate_associated(struct ipw2100_priv *priv, u32 status)
return; return;
} }
len = ETH_ALEN; len = ETH_ALEN;
ipw2100_get_ordinal(priv, IPW_ORD_STAT_ASSN_AP_BSSID, &bssid, &len); ret = ipw2100_get_ordinal(priv, IPW_ORD_STAT_ASSN_AP_BSSID, bssid,
&len);
if (ret) { if (ret) {
IPW_DEBUG_INFO("failed querying ordinals at line %d\n", IPW_DEBUG_INFO("failed querying ordinals at line %d\n",
__LINE__); __LINE__);

View File

@ -124,6 +124,9 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
const struct fw_img *img; const struct fw_img *img;
size_t bufsz; size_t bufsz;
if (!iwl_is_ready_rf(priv))
return -EAGAIN;
/* default is to dump the entire data segment */ /* default is to dump the entire data segment */
if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) { if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) {
priv->dbgfs_sram_offset = 0x800000; priv->dbgfs_sram_offset = 0x800000;

View File

@ -350,7 +350,7 @@ int iwl_queue_space(const struct iwl_queue *q);
/***************************************************** /*****************************************************
* Error handling * Error handling
******************************************************/ ******************************************************/
int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display); int iwl_dump_fh(struct iwl_trans *trans, char **buf);
void iwl_dump_csr(struct iwl_trans *trans); void iwl_dump_csr(struct iwl_trans *trans);
/***************************************************** /*****************************************************

View File

@ -555,7 +555,7 @@ static void iwl_irq_handle_error(struct iwl_trans *trans)
} }
iwl_dump_csr(trans); iwl_dump_csr(trans);
iwl_dump_fh(trans, NULL, false); iwl_dump_fh(trans, NULL);
iwl_op_mode_nic_error(trans->op_mode); iwl_op_mode_nic_error(trans->op_mode);
} }

View File

@ -1649,13 +1649,9 @@ static const char *get_fh_string(int cmd)
#undef IWL_CMD #undef IWL_CMD
} }
int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display) int iwl_dump_fh(struct iwl_trans *trans, char **buf)
{ {
int i; int i;
#ifdef CONFIG_IWLWIFI_DEBUG
int pos = 0;
size_t bufsz = 0;
#endif
static const u32 fh_tbl[] = { static const u32 fh_tbl[] = {
FH_RSCSR_CHNL0_STTS_WPTR_REG, FH_RSCSR_CHNL0_STTS_WPTR_REG,
FH_RSCSR_CHNL0_RBDCB_BASE_REG, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
@ -1667,29 +1663,35 @@ int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display)
FH_TSSR_TX_STATUS_REG, FH_TSSR_TX_STATUS_REG,
FH_TSSR_TX_ERROR_REG FH_TSSR_TX_ERROR_REG
}; };
#ifdef CONFIG_IWLWIFI_DEBUG
if (display) { #ifdef CONFIG_IWLWIFI_DEBUGFS
bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40; if (buf) {
int pos = 0;
size_t bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
*buf = kmalloc(bufsz, GFP_KERNEL); *buf = kmalloc(bufsz, GFP_KERNEL);
if (!*buf) if (!*buf)
return -ENOMEM; return -ENOMEM;
pos += scnprintf(*buf + pos, bufsz - pos, pos += scnprintf(*buf + pos, bufsz - pos,
"FH register values:\n"); "FH register values:\n");
for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
for (i = 0; i < ARRAY_SIZE(fh_tbl); i++)
pos += scnprintf(*buf + pos, bufsz - pos, pos += scnprintf(*buf + pos, bufsz - pos,
" %34s: 0X%08x\n", " %34s: 0X%08x\n",
get_fh_string(fh_tbl[i]), get_fh_string(fh_tbl[i]),
iwl_read_direct32(trans, fh_tbl[i])); iwl_read_direct32(trans, fh_tbl[i]));
}
return pos; return pos;
} }
#endif #endif
IWL_ERR(trans, "FH register values:\n"); IWL_ERR(trans, "FH register values:\n");
for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) { for (i = 0; i < ARRAY_SIZE(fh_tbl); i++)
IWL_ERR(trans, " %34s: 0X%08x\n", IWL_ERR(trans, " %34s: 0X%08x\n",
get_fh_string(fh_tbl[i]), get_fh_string(fh_tbl[i]),
iwl_read_direct32(trans, fh_tbl[i])); iwl_read_direct32(trans, fh_tbl[i]));
}
return 0; return 0;
} }
@ -1982,11 +1984,11 @@ static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
size_t count, loff_t *ppos) size_t count, loff_t *ppos)
{ {
struct iwl_trans *trans = file->private_data; struct iwl_trans *trans = file->private_data;
char *buf; char *buf = NULL;
int pos = 0; int pos = 0;
ssize_t ret = -EFAULT; ssize_t ret = -EFAULT;
ret = pos = iwl_dump_fh(trans, &buf, true); ret = pos = iwl_dump_fh(trans, &buf);
if (buf) { if (buf) {
ret = simple_read_from_buffer(user_buf, ret = simple_read_from_buffer(user_buf,
count, ppos, buf, pos); count, ppos, buf, pos);

View File

@ -57,8 +57,7 @@
static const struct ethtool_ops xennet_ethtool_ops; static const struct ethtool_ops xennet_ethtool_ops;
struct netfront_cb { struct netfront_cb {
struct page *page; int pull_to;
unsigned offset;
}; };
#define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb)) #define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb))
@ -867,15 +866,9 @@ static int handle_incoming_queue(struct net_device *dev,
struct sk_buff *skb; struct sk_buff *skb;
while ((skb = __skb_dequeue(rxq)) != NULL) { while ((skb = __skb_dequeue(rxq)) != NULL) {
struct page *page = NETFRONT_SKB_CB(skb)->page; int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
void *vaddr = page_address(page);
unsigned offset = NETFRONT_SKB_CB(skb)->offset;
memcpy(skb->data, vaddr + offset, __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
skb_headlen(skb));
if (page != skb_frag_page(&skb_shinfo(skb)->frags[0]))
__free_page(page);
/* Ethernet work: Delayed to here as it peeks the header. */ /* Ethernet work: Delayed to here as it peeks the header. */
skb->protocol = eth_type_trans(skb, dev); skb->protocol = eth_type_trans(skb, dev);
@ -913,7 +906,6 @@ static int xennet_poll(struct napi_struct *napi, int budget)
struct sk_buff_head errq; struct sk_buff_head errq;
struct sk_buff_head tmpq; struct sk_buff_head tmpq;
unsigned long flags; unsigned long flags;
unsigned int len;
int err; int err;
spin_lock(&np->rx_lock); spin_lock(&np->rx_lock);
@ -955,24 +947,13 @@ err:
} }
} }
NETFRONT_SKB_CB(skb)->page = NETFRONT_SKB_CB(skb)->pull_to = rx->status;
skb_frag_page(&skb_shinfo(skb)->frags[0]); if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD)
NETFRONT_SKB_CB(skb)->offset = rx->offset; NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD;
len = rx->status; skb_shinfo(skb)->frags[0].page_offset = rx->offset;
if (len > RX_COPY_THRESHOLD) skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status);
len = RX_COPY_THRESHOLD; skb->data_len = rx->status;
skb_put(skb, len);
if (rx->status > len) {
skb_shinfo(skb)->frags[0].page_offset =
rx->offset + len;
skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status - len);
skb->data_len = rx->status - len;
} else {
__skb_fill_page_desc(skb, 0, NULL, 0, 0);
skb_shinfo(skb)->nr_frags = 0;
}
i = xennet_fill_frags(np, skb, &tmpq); i = xennet_fill_frags(np, skb, &tmpq);
@ -999,7 +980,7 @@ err:
* receive throughout using the standard receive * receive throughout using the standard receive
* buffer size was cut by 25%(!!!). * buffer size was cut by 25%(!!!).
*/ */
skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len); skb->truesize += skb->data_len - RX_COPY_THRESHOLD;
skb->len += skb->data_len; skb->len += skb->data_len;
if (rx->flags & XEN_NETRXF_csum_blank) if (rx->flags & XEN_NETRXF_csum_blank)

View File

@ -2149,7 +2149,7 @@
#define PCI_DEVICE_ID_TIGON3_5704S 0x16a8 #define PCI_DEVICE_ID_TIGON3_5704S 0x16a8
#define PCI_DEVICE_ID_NX2_57800_VF 0x16a9 #define PCI_DEVICE_ID_NX2_57800_VF 0x16a9
#define PCI_DEVICE_ID_NX2_5706S 0x16aa #define PCI_DEVICE_ID_NX2_5706S 0x16aa
#define PCI_DEVICE_ID_NX2_57840_MF 0x16ab #define PCI_DEVICE_ID_NX2_57840_MF 0x16a4
#define PCI_DEVICE_ID_NX2_5708S 0x16ac #define PCI_DEVICE_ID_NX2_5708S 0x16ac
#define PCI_DEVICE_ID_NX2_57840_VF 0x16ad #define PCI_DEVICE_ID_NX2_57840_VF 0x16ad
#define PCI_DEVICE_ID_NX2_57810_MF 0x16ae #define PCI_DEVICE_ID_NX2_57810_MF 0x16ae

View File

@ -18,6 +18,7 @@ struct nf_conntrack_ecache {
u16 ctmask; /* bitmask of ct events to be delivered */ u16 ctmask; /* bitmask of ct events to be delivered */
u16 expmask; /* bitmask of expect events to be delivered */ u16 expmask; /* bitmask of expect events to be delivered */
u32 pid; /* netlink pid of destroyer */ u32 pid; /* netlink pid of destroyer */
struct timer_list timeout;
}; };
static inline struct nf_conntrack_ecache * static inline struct nf_conntrack_ecache *

View File

@ -168,24 +168,16 @@ static void poll_napi(struct net_device *dev)
struct napi_struct *napi; struct napi_struct *napi;
int budget = 16; int budget = 16;
WARN_ON_ONCE(!irqs_disabled());
list_for_each_entry(napi, &dev->napi_list, dev_list) { list_for_each_entry(napi, &dev->napi_list, dev_list) {
local_irq_enable();
if (napi->poll_owner != smp_processor_id() && if (napi->poll_owner != smp_processor_id() &&
spin_trylock(&napi->poll_lock)) { spin_trylock(&napi->poll_lock)) {
rcu_read_lock_bh();
budget = poll_one_napi(rcu_dereference_bh(dev->npinfo), budget = poll_one_napi(rcu_dereference_bh(dev->npinfo),
napi, budget); napi, budget);
rcu_read_unlock_bh();
spin_unlock(&napi->poll_lock); spin_unlock(&napi->poll_lock);
if (!budget) { if (!budget)
local_irq_disable();
break; break;
}
} }
local_irq_disable();
} }
} }

View File

@ -124,6 +124,8 @@ static DEFINE_SPINLOCK(mfc_unres_lock);
static struct kmem_cache *mrt_cachep __read_mostly; static struct kmem_cache *mrt_cachep __read_mostly;
static struct mr_table *ipmr_new_table(struct net *net, u32 id); static struct mr_table *ipmr_new_table(struct net *net, u32 id);
static void ipmr_free_table(struct mr_table *mrt);
static int ip_mr_forward(struct net *net, struct mr_table *mrt, static int ip_mr_forward(struct net *net, struct mr_table *mrt,
struct sk_buff *skb, struct mfc_cache *cache, struct sk_buff *skb, struct mfc_cache *cache,
int local); int local);
@ -131,6 +133,7 @@ static int ipmr_cache_report(struct mr_table *mrt,
struct sk_buff *pkt, vifi_t vifi, int assert); struct sk_buff *pkt, vifi_t vifi, int assert);
static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
struct mfc_cache *c, struct rtmsg *rtm); struct mfc_cache *c, struct rtmsg *rtm);
static void mroute_clean_tables(struct mr_table *mrt);
static void ipmr_expire_process(unsigned long arg); static void ipmr_expire_process(unsigned long arg);
#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
@ -271,7 +274,7 @@ static void __net_exit ipmr_rules_exit(struct net *net)
list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) { list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) {
list_del(&mrt->list); list_del(&mrt->list);
kfree(mrt); ipmr_free_table(mrt);
} }
fib_rules_unregister(net->ipv4.mr_rules_ops); fib_rules_unregister(net->ipv4.mr_rules_ops);
} }
@ -299,7 +302,7 @@ static int __net_init ipmr_rules_init(struct net *net)
static void __net_exit ipmr_rules_exit(struct net *net) static void __net_exit ipmr_rules_exit(struct net *net)
{ {
kfree(net->ipv4.mrt); ipmr_free_table(net->ipv4.mrt);
} }
#endif #endif
@ -336,6 +339,13 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id)
return mrt; return mrt;
} }
static void ipmr_free_table(struct mr_table *mrt)
{
del_timer_sync(&mrt->ipmr_expire_timer);
mroute_clean_tables(mrt);
kfree(mrt);
}
/* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */ /* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v) static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v)

View File

@ -502,7 +502,10 @@ static unsigned int ip_nat_sdp_media(struct sk_buff *skb, unsigned int dataoff,
ret = nf_ct_expect_related(rtcp_exp); ret = nf_ct_expect_related(rtcp_exp);
if (ret == 0) if (ret == 0)
break; break;
else if (ret != -EBUSY) { else if (ret == -EBUSY) {
nf_ct_unexpect_related(rtp_exp);
continue;
} else if (ret < 0) {
nf_ct_unexpect_related(rtp_exp); nf_ct_unexpect_related(rtp_exp);
port = 0; port = 0;
break; break;

View File

@ -934,12 +934,14 @@ static u32 __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
if (mtu < ip_rt_min_pmtu) if (mtu < ip_rt_min_pmtu)
mtu = ip_rt_min_pmtu; mtu = ip_rt_min_pmtu;
rcu_read_lock();
if (fib_lookup(dev_net(rt->dst.dev), fl4, &res) == 0) { if (fib_lookup(dev_net(rt->dst.dev), fl4, &res) == 0) {
struct fib_nh *nh = &FIB_RES_NH(res); struct fib_nh *nh = &FIB_RES_NH(res);
update_or_create_fnhe(nh, fl4->daddr, 0, mtu, update_or_create_fnhe(nh, fl4->daddr, 0, mtu,
jiffies + ip_rt_mtu_expires); jiffies + ip_rt_mtu_expires);
} }
rcu_read_unlock();
return mtu; return mtu;
} }
@ -956,7 +958,7 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
dst->obsolete = DST_OBSOLETE_KILL; dst->obsolete = DST_OBSOLETE_KILL;
} else { } else {
rt->rt_pmtu = mtu; rt->rt_pmtu = mtu;
dst_set_expires(&rt->dst, ip_rt_mtu_expires); rt->dst.expires = max(1UL, jiffies + ip_rt_mtu_expires);
} }
} }
@ -1263,7 +1265,7 @@ static void ipv4_dst_destroy(struct dst_entry *dst)
{ {
struct rtable *rt = (struct rtable *) dst; struct rtable *rt = (struct rtable *) dst;
if (dst->flags & DST_NOCACHE) { if (!list_empty(&rt->rt_uncached)) {
spin_lock_bh(&rt_uncached_lock); spin_lock_bh(&rt_uncached_lock);
list_del(&rt->rt_uncached); list_del(&rt->rt_uncached);
spin_unlock_bh(&rt_uncached_lock); spin_unlock_bh(&rt_uncached_lock);

View File

@ -2926,13 +2926,14 @@ static void tcp_enter_recovery(struct sock *sk, bool ece_ack)
* tcp_xmit_retransmit_queue(). * tcp_xmit_retransmit_queue().
*/ */
static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
int newly_acked_sacked, bool is_dupack, int prior_sacked, bool is_dupack,
int flag) int flag)
{ {
struct inet_connection_sock *icsk = inet_csk(sk); struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
int do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) && int do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) &&
(tcp_fackets_out(tp) > tp->reordering)); (tcp_fackets_out(tp) > tp->reordering));
int newly_acked_sacked = 0;
int fast_rexmit = 0; int fast_rexmit = 0;
if (WARN_ON(!tp->packets_out && tp->sacked_out)) if (WARN_ON(!tp->packets_out && tp->sacked_out))
@ -2992,6 +2993,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
tcp_add_reno_sack(sk); tcp_add_reno_sack(sk);
} else } else
do_lost = tcp_try_undo_partial(sk, pkts_acked); do_lost = tcp_try_undo_partial(sk, pkts_acked);
newly_acked_sacked = pkts_acked + tp->sacked_out - prior_sacked;
break; break;
case TCP_CA_Loss: case TCP_CA_Loss:
if (flag & FLAG_DATA_ACKED) if (flag & FLAG_DATA_ACKED)
@ -3013,6 +3015,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
if (is_dupack) if (is_dupack)
tcp_add_reno_sack(sk); tcp_add_reno_sack(sk);
} }
newly_acked_sacked = pkts_acked + tp->sacked_out - prior_sacked;
if (icsk->icsk_ca_state <= TCP_CA_Disorder) if (icsk->icsk_ca_state <= TCP_CA_Disorder)
tcp_try_undo_dsack(sk); tcp_try_undo_dsack(sk);
@ -3590,7 +3593,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
int prior_packets; int prior_packets;
int prior_sacked = tp->sacked_out; int prior_sacked = tp->sacked_out;
int pkts_acked = 0; int pkts_acked = 0;
int newly_acked_sacked = 0;
bool frto_cwnd = false; bool frto_cwnd = false;
/* If the ack is older than previous acks /* If the ack is older than previous acks
@ -3666,8 +3668,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una); flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una);
pkts_acked = prior_packets - tp->packets_out; pkts_acked = prior_packets - tp->packets_out;
newly_acked_sacked = (prior_packets - prior_sacked) -
(tp->packets_out - tp->sacked_out);
if (tp->frto_counter) if (tp->frto_counter)
frto_cwnd = tcp_process_frto(sk, flag); frto_cwnd = tcp_process_frto(sk, flag);
@ -3681,7 +3681,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
tcp_may_raise_cwnd(sk, flag)) tcp_may_raise_cwnd(sk, flag))
tcp_cong_avoid(sk, ack, prior_in_flight); tcp_cong_avoid(sk, ack, prior_in_flight);
is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP)); is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked, tcp_fastretrans_alert(sk, pkts_acked, prior_sacked,
is_dupack, flag); is_dupack, flag);
} else { } else {
if ((flag & FLAG_DATA_ACKED) && !frto_cwnd) if ((flag & FLAG_DATA_ACKED) && !frto_cwnd)
@ -3698,7 +3698,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
no_queue: no_queue:
/* If data was DSACKed, see if we can undo a cwnd reduction. */ /* If data was DSACKed, see if we can undo a cwnd reduction. */
if (flag & FLAG_DSACKING_ACK) if (flag & FLAG_DSACKING_ACK)
tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked, tcp_fastretrans_alert(sk, pkts_acked, prior_sacked,
is_dupack, flag); is_dupack, flag);
/* If this ack opens up a zero window, clear backoff. It was /* If this ack opens up a zero window, clear backoff. It was
* being used to time the probes, and is probably far higher than * being used to time the probes, and is probably far higher than
@ -3718,8 +3718,7 @@ old_ack:
*/ */
if (TCP_SKB_CB(skb)->sacked) { if (TCP_SKB_CB(skb)->sacked) {
flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una); flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una);
newly_acked_sacked = tp->sacked_out - prior_sacked; tcp_fastretrans_alert(sk, pkts_acked, prior_sacked,
tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked,
is_dupack, flag); is_dupack, flag);
} }

View File

@ -167,8 +167,6 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
struct esp_data *esp = x->data; struct esp_data *esp = x->data;
/* skb is pure payload to encrypt */ /* skb is pure payload to encrypt */
err = -ENOMEM;
aead = esp->aead; aead = esp->aead;
alen = crypto_aead_authsize(aead); alen = crypto_aead_authsize(aead);
@ -203,8 +201,10 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
} }
tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen); tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen);
if (!tmp) if (!tmp) {
err = -ENOMEM;
goto error; goto error;
}
seqhi = esp_tmp_seqhi(tmp); seqhi = esp_tmp_seqhi(tmp);
iv = esp_tmp_iv(aead, tmp, seqhilen); iv = esp_tmp_iv(aead, tmp, seqhilen);

View File

@ -1347,11 +1347,10 @@ static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
/* Remove from tunnel list */ /* Remove from tunnel list */
spin_lock_bh(&pn->l2tp_tunnel_list_lock); spin_lock_bh(&pn->l2tp_tunnel_list_lock);
list_del_rcu(&tunnel->list); list_del_rcu(&tunnel->list);
kfree_rcu(tunnel, rcu);
spin_unlock_bh(&pn->l2tp_tunnel_list_lock); spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
synchronize_rcu();
atomic_dec(&l2tp_tunnel_count); atomic_dec(&l2tp_tunnel_count);
kfree(tunnel);
} }
/* Create a socket for the tunnel, if one isn't set up by /* Create a socket for the tunnel, if one isn't set up by

View File

@ -163,6 +163,7 @@ struct l2tp_tunnel_cfg {
struct l2tp_tunnel { struct l2tp_tunnel {
int magic; /* Should be L2TP_TUNNEL_MAGIC */ int magic; /* Should be L2TP_TUNNEL_MAGIC */
struct rcu_head rcu;
rwlock_t hlist_lock; /* protect session_hlist */ rwlock_t hlist_lock; /* protect session_hlist */
struct hlist_head session_hlist[L2TP_HASH_SIZE]; struct hlist_head session_hlist[L2TP_HASH_SIZE];
/* hashed list of sessions, /* hashed list of sessions,

View File

@ -1811,37 +1811,31 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr, meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr,
sdata, NULL, NULL); sdata, NULL, NULL);
} else { } else {
int is_mesh_mcast = 1; /* DS -> MBSS (802.11-2012 13.11.3.3).
const u8 *mesh_da; * For unicast with unknown forwarding information,
* destination might be in the MBSS or if that fails
* forwarded to another mesh gate. In either case
* resolution will be handled in ieee80211_xmit(), so
* leave the original DA. This also works for mcast */
const u8 *mesh_da = skb->data;
if (mppath)
mesh_da = mppath->mpp;
else if (mpath)
mesh_da = mpath->dst;
rcu_read_unlock();
if (is_multicast_ether_addr(skb->data))
/* DA TA mSA AE:SA */
mesh_da = skb->data;
else {
static const u8 bcast[ETH_ALEN] =
{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
if (mppath) {
/* RA TA mDA mSA AE:DA SA */
mesh_da = mppath->mpp;
is_mesh_mcast = 0;
} else if (mpath) {
mesh_da = mpath->dst;
is_mesh_mcast = 0;
} else {
/* DA TA mSA AE:SA */
mesh_da = bcast;
}
}
hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc, hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc,
mesh_da, sdata->vif.addr); mesh_da, sdata->vif.addr);
rcu_read_unlock(); if (is_multicast_ether_addr(mesh_da))
if (is_mesh_mcast) /* DA TA mSA AE:SA */
meshhdrlen = meshhdrlen =
ieee80211_new_mesh_header(&mesh_hdr, ieee80211_new_mesh_header(&mesh_hdr,
sdata, sdata,
skb->data + ETH_ALEN, skb->data + ETH_ALEN,
NULL); NULL);
else else
/* RA TA mDA mSA AE:DA SA */
meshhdrlen = meshhdrlen =
ieee80211_new_mesh_header(&mesh_hdr, ieee80211_new_mesh_header(&mesh_hdr,
sdata, sdata,

View File

@ -1171,8 +1171,10 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
goto out_err; goto out_err;
} }
svc->stats.cpustats = alloc_percpu(struct ip_vs_cpu_stats); svc->stats.cpustats = alloc_percpu(struct ip_vs_cpu_stats);
if (!svc->stats.cpustats) if (!svc->stats.cpustats) {
ret = -ENOMEM;
goto out_err; goto out_err;
}
/* I'm the first user of the service */ /* I'm the first user of the service */
atomic_set(&svc->usecnt, 0); atomic_set(&svc->usecnt, 0);

View File

@ -249,12 +249,15 @@ static void death_by_event(unsigned long ul_conntrack)
{ {
struct nf_conn *ct = (void *)ul_conntrack; struct nf_conn *ct = (void *)ul_conntrack;
struct net *net = nf_ct_net(ct); struct net *net = nf_ct_net(ct);
struct nf_conntrack_ecache *ecache = nf_ct_ecache_find(ct);
BUG_ON(ecache == NULL);
if (nf_conntrack_event(IPCT_DESTROY, ct) < 0) { if (nf_conntrack_event(IPCT_DESTROY, ct) < 0) {
/* bad luck, let's retry again */ /* bad luck, let's retry again */
ct->timeout.expires = jiffies + ecache->timeout.expires = jiffies +
(random32() % net->ct.sysctl_events_retry_timeout); (random32() % net->ct.sysctl_events_retry_timeout);
add_timer(&ct->timeout); add_timer(&ecache->timeout);
return; return;
} }
/* we've got the event delivered, now it's dying */ /* we've got the event delivered, now it's dying */
@ -268,6 +271,9 @@ static void death_by_event(unsigned long ul_conntrack)
void nf_ct_insert_dying_list(struct nf_conn *ct) void nf_ct_insert_dying_list(struct nf_conn *ct)
{ {
struct net *net = nf_ct_net(ct); struct net *net = nf_ct_net(ct);
struct nf_conntrack_ecache *ecache = nf_ct_ecache_find(ct);
BUG_ON(ecache == NULL);
/* add this conntrack to the dying list */ /* add this conntrack to the dying list */
spin_lock_bh(&nf_conntrack_lock); spin_lock_bh(&nf_conntrack_lock);
@ -275,10 +281,10 @@ void nf_ct_insert_dying_list(struct nf_conn *ct)
&net->ct.dying); &net->ct.dying);
spin_unlock_bh(&nf_conntrack_lock); spin_unlock_bh(&nf_conntrack_lock);
/* set a new timer to retry event delivery */ /* set a new timer to retry event delivery */
setup_timer(&ct->timeout, death_by_event, (unsigned long)ct); setup_timer(&ecache->timeout, death_by_event, (unsigned long)ct);
ct->timeout.expires = jiffies + ecache->timeout.expires = jiffies +
(random32() % net->ct.sysctl_events_retry_timeout); (random32() % net->ct.sysctl_events_retry_timeout);
add_timer(&ct->timeout); add_timer(&ecache->timeout);
} }
EXPORT_SYMBOL_GPL(nf_ct_insert_dying_list); EXPORT_SYMBOL_GPL(nf_ct_insert_dying_list);

View File

@ -2790,7 +2790,8 @@ static int __init ctnetlink_init(void)
goto err_unreg_subsys; goto err_unreg_subsys;
} }
if (register_pernet_subsys(&ctnetlink_net_ops)) { ret = register_pernet_subsys(&ctnetlink_net_ops);
if (ret < 0) {
pr_err("ctnetlink_init: cannot register pernet operations\n"); pr_err("ctnetlink_init: cannot register pernet operations\n");
goto err_unreg_exp_subsys; goto err_unreg_exp_subsys;
} }

View File

@ -480,7 +480,7 @@ __build_packet_message(struct nfulnl_instance *inst,
} }
if (indev && skb_mac_header_was_set(skb)) { if (indev && skb_mac_header_was_set(skb)) {
if (nla_put_be32(inst->skb, NFULA_HWTYPE, htons(skb->dev->type)) || if (nla_put_be16(inst->skb, NFULA_HWTYPE, htons(skb->dev->type)) ||
nla_put_be16(inst->skb, NFULA_HWLEN, nla_put_be16(inst->skb, NFULA_HWLEN,
htons(skb->dev->hard_header_len)) || htons(skb->dev->hard_header_len)) ||
nla_put(inst->skb, NFULA_HWHEADER, skb->dev->hard_header_len, nla_put(inst->skb, NFULA_HWHEADER, skb->dev->hard_header_len,
@ -996,8 +996,10 @@ static int __init nfnetlink_log_init(void)
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
if (!proc_create("nfnetlink_log", 0440, if (!proc_create("nfnetlink_log", 0440,
proc_net_netfilter, &nful_file_ops)) proc_net_netfilter, &nful_file_ops)) {
status = -ENOMEM;
goto cleanup_logger; goto cleanup_logger;
}
#endif #endif
return status; return status;

View File

@ -1373,7 +1373,8 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
dst_pid = addr->nl_pid; dst_pid = addr->nl_pid;
dst_group = ffs(addr->nl_groups); dst_group = ffs(addr->nl_groups);
err = -EPERM; err = -EPERM;
if (dst_group && !netlink_capable(sock, NL_NONROOT_SEND)) if ((dst_group || dst_pid) &&
!netlink_capable(sock, NL_NONROOT_SEND))
goto out; goto out;
} else { } else {
dst_pid = nlk->dst_pid; dst_pid = nlk->dst_pid;
@ -2147,6 +2148,7 @@ static void __init netlink_add_usersock_entry(void)
rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners); rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners);
nl_table[NETLINK_USERSOCK].module = THIS_MODULE; nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
nl_table[NETLINK_USERSOCK].registered = 1; nl_table[NETLINK_USERSOCK].registered = 1;
nl_table[NETLINK_USERSOCK].nl_nonroot = NL_NONROOT_SEND;
netlink_table_ungrab(); netlink_table_ungrab();
} }

View File

@ -1273,7 +1273,7 @@ static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
spin_unlock(&f->lock); spin_unlock(&f->lock);
} }
bool match_fanout_group(struct packet_type *ptype, struct sock * sk) static bool match_fanout_group(struct packet_type *ptype, struct sock * sk)
{ {
if (ptype->af_packet_priv == (void*)((struct packet_sock *)sk)->fanout) if (ptype->af_packet_priv == (void*)((struct packet_sock *)sk)->fanout)
return true; return true;

View File

@ -1994,8 +1994,10 @@ int __xfrm_init_state(struct xfrm_state *x, bool init_replay)
goto error; goto error;
x->outer_mode = xfrm_get_mode(x->props.mode, family); x->outer_mode = xfrm_get_mode(x->props.mode, family);
if (x->outer_mode == NULL) if (x->outer_mode == NULL) {
err = -EPROTONOSUPPORT;
goto error; goto error;
}
if (init_replay) { if (init_replay) {
err = xfrm_init_replay(x); err = xfrm_init_replay(x);