Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

 1) Workaround hw bug when acquiring PCI bos ownership of iwlwifi
    devices, from Emmanuel Grumbach.

 2) Falling back to vmalloc in conntrack should not emit a warning, from
    Pablo Neira Ayuso.

 3) Fix NULL deref when rtlwifi driver is used as an AP, from Luis
    Felipe Dominguez Vega.

 4) Rocker doesn't free netdev on device removal, from Ido Schimmel.

 5) UDP multicast early sock demux has route handling races, from Eric
    Dumazet.

 6) Fix L4 checksum handling in openvswitch, from Glenn Griffin.

 7) Fix use-after-free in skb_set_peeked, from Herbert Xu.

 8) Don't advertize NETIF_F_FRAGLIST in virtio_net driver, this can lead
    to fraglists longer than the driver can support.  From Jason Wang.

 9) Fix mlx5 on non-4k-pagesize systems, from Carol L Soto.

10) Fix interrupt storm in bna driver, from Ivan Vecera.

11) Don't propagate -EBUSY from netlink_insert(), from Daniel Borkmann.

12) Fix inet request sock leak, from Eric Dumazet.

13) Fix TX interrupt masking and marking in TX descriptors of fs_enet
    driver, from LEROY Christophe.

14) Get rid of rule optimizer in gianfar driver, it's buggy and unlikely
    to get fixed any time soon.  From Jakub Kicinski

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (61 commits)
  cosa: missing error code on failure in probe()
  gianfar: remove faulty filer optimizer
  gianfar: correct list membership accounting
  gianfar: correct filer table writing
  bonding: Gratuitous ARP gets dropped when first slave added
  net: dsa: Do not override PHY interface if already configured
  net: fs_enet: mask interrupts for TX partial frames.
  net: fs_enet: explicitly remove I flag on TX partial frames
  inet: fix possible request socket leak
  inet: fix races with reqsk timers
  mkiss: Fix error handling in mkiss_open()
  bnx2x: Free NVRAM lock at end of each page
  bnx2x: Prevent null pointer dereference on SKB release
  cxgb4: missing curly braces in t4_setup_debugfs()
  net-timestamp: Update skb_complete_tx_timestamp comment
  ipv6: don't reject link-local nexthop on other interface
  netlink: make sure -EBUSY won't escape from netlink_insert
  bna: fix interrupts storm caused by erroneous packets
  net: mvpp2: replace TX coalescing interrupts with hrtimer
  net: mvpp2: enable proper per-CPU TX buffers unmapping
  ...
This commit is contained in:
Linus Torvalds 2015-08-13 10:46:39 -07:00
commit 26b552e0a8
59 changed files with 590 additions and 633 deletions

View File

@ -786,6 +786,7 @@ static bool bond_should_notify_peers(struct bonding *bond)
slave ? slave->dev->name : "NULL"); slave ? slave->dev->name : "NULL");
if (!slave || !bond->send_peer_notif || if (!slave || !bond->send_peer_notif ||
!netif_carrier_ok(bond->dev) ||
test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state)) test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state))
return false; return false;

View File

@ -1763,16 +1763,9 @@ vortex_open(struct net_device *dev)
vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE)); vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
} }
if (i != RX_RING_SIZE) { if (i != RX_RING_SIZE) {
int j;
pr_emerg("%s: no memory for rx ring\n", dev->name); pr_emerg("%s: no memory for rx ring\n", dev->name);
for (j = 0; j < i; j++) {
if (vp->rx_skbuff[j]) {
dev_kfree_skb(vp->rx_skbuff[j]);
vp->rx_skbuff[j] = NULL;
}
}
retval = -ENOMEM; retval = -ENOMEM;
goto err_free_irq; goto err_free_skb;
} }
/* Wrap the ring. */ /* Wrap the ring. */
vp->rx_ring[i-1].next = cpu_to_le32(vp->rx_ring_dma); vp->rx_ring[i-1].next = cpu_to_le32(vp->rx_ring_dma);
@ -1782,7 +1775,13 @@ vortex_open(struct net_device *dev)
if (!retval) if (!retval)
goto out; goto out;
err_free_irq: err_free_skb:
for (i = 0; i < RX_RING_SIZE; i++) {
if (vp->rx_skbuff[i]) {
dev_kfree_skb(vp->rx_skbuff[i]);
vp->rx_skbuff[i] = NULL;
}
}
free_irq(dev->irq, dev); free_irq(dev->irq, dev);
err: err:
if (vortex_debug > 1) if (vortex_debug > 1)

View File

@ -262,9 +262,9 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
if (likely(skb)) { if (likely(skb)) {
(*pkts_compl)++; (*pkts_compl)++;
(*bytes_compl) += skb->len; (*bytes_compl) += skb->len;
dev_kfree_skb_any(skb);
} }
dev_kfree_skb_any(skb);
tx_buf->first_bd = 0; tx_buf->first_bd = 0;
tx_buf->skb = NULL; tx_buf->skb = NULL;

View File

@ -1718,6 +1718,22 @@ static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
offset += sizeof(u32); offset += sizeof(u32);
data_buf += sizeof(u32); data_buf += sizeof(u32);
written_so_far += sizeof(u32); written_so_far += sizeof(u32);
/* At end of each 4Kb page, release nvram lock to allow MFW
* chance to take it for its own use.
*/
if ((cmd_flags & MCPR_NVM_COMMAND_LAST) &&
(written_so_far < buf_size)) {
DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
"Releasing NVM lock after offset 0x%x\n",
(u32)(offset - sizeof(u32)));
bnx2x_release_nvram_lock(bp);
usleep_range(1000, 2000);
rc = bnx2x_acquire_nvram_lock(bp);
if (rc)
return rc;
}
cmd_flags = 0; cmd_flags = 0;
} }

View File

@ -676,6 +676,7 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
if (!next_cmpl->valid) if (!next_cmpl->valid)
break; break;
} }
packets++;
/* TODO: BNA_CQ_EF_LOCAL ? */ /* TODO: BNA_CQ_EF_LOCAL ? */
if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR | if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR |
@ -692,7 +693,6 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
else else
bnad_cq_setup_skb_frags(rcb, skb, sop_ci, nvecs, len); bnad_cq_setup_skb_frags(rcb, skb, sop_ci, nvecs, len);
packets++;
rcb->rxq->rx_packets++; rcb->rxq->rx_packets++;
rcb->rxq->rx_bytes += totlen; rcb->rxq->rx_bytes += totlen;
ccb->bytes_per_intr += totlen; ccb->bytes_per_intr += totlen;

View File

@ -16,7 +16,6 @@ if NET_VENDOR_CAVIUM
config THUNDER_NIC_PF config THUNDER_NIC_PF
tristate "Thunder Physical function driver" tristate "Thunder Physical function driver"
depends on 64BIT depends on 64BIT
default ARCH_THUNDER
select THUNDER_NIC_BGX select THUNDER_NIC_BGX
---help--- ---help---
This driver supports Thunder's NIC physical function. This driver supports Thunder's NIC physical function.
@ -29,14 +28,12 @@ config THUNDER_NIC_PF
config THUNDER_NIC_VF config THUNDER_NIC_VF
tristate "Thunder Virtual function driver" tristate "Thunder Virtual function driver"
depends on 64BIT depends on 64BIT
default ARCH_THUNDER
---help--- ---help---
This driver supports Thunder's NIC virtual function This driver supports Thunder's NIC virtual function
config THUNDER_NIC_BGX config THUNDER_NIC_BGX
tristate "Thunder MAC interface driver (BGX)" tristate "Thunder MAC interface driver (BGX)"
depends on 64BIT depends on 64BIT
default ARCH_THUNDER
---help--- ---help---
This driver supports programming and controlling of MAC This driver supports programming and controlling of MAC
interface from NIC physical function driver. interface from NIC physical function driver.

View File

@ -2332,10 +2332,11 @@ int t4_setup_debugfs(struct adapter *adap)
EXT_MEM1_SIZE_G(size)); EXT_MEM1_SIZE_G(size));
} }
} else { } else {
if (i & EXT_MEM_ENABLE_F) if (i & EXT_MEM_ENABLE_F) {
size = t4_read_reg(adap, MA_EXT_MEMORY_BAR_A); size = t4_read_reg(adap, MA_EXT_MEMORY_BAR_A);
add_debugfs_mem(adap, "mc", MEM_MC, add_debugfs_mem(adap, "mc", MEM_MC,
EXT_MEM_SIZE_G(size)); EXT_MEM_SIZE_G(size));
}
} }
de = debugfs_create_file_size("flash", S_IRUSR, adap->debugfs_root, adap, de = debugfs_create_file_size("flash", S_IRUSR, adap->debugfs_root, adap,

View File

@ -620,6 +620,11 @@ enum be_if_flags {
BE_IF_FLAGS_VLAN_PROMISCUOUS |\ BE_IF_FLAGS_VLAN_PROMISCUOUS |\
BE_IF_FLAGS_MCAST_PROMISCUOUS) BE_IF_FLAGS_MCAST_PROMISCUOUS)
#define BE_IF_EN_FLAGS (BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |\
BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_UNTAGGED)
#define BE_IF_ALL_FILT_FLAGS (BE_IF_EN_FLAGS | BE_IF_FLAGS_ALL_PROMISCUOUS)
/* An RX interface is an object with one or more MAC addresses and /* An RX interface is an object with one or more MAC addresses and
* filtering capabilities. */ * filtering capabilities. */
struct be_cmd_req_if_create { struct be_cmd_req_if_create {

View File

@ -273,6 +273,10 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
if (ether_addr_equal(addr->sa_data, netdev->dev_addr)) if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
return 0; return 0;
/* if device is not running, copy MAC to netdev->dev_addr */
if (!netif_running(netdev))
goto done;
/* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
* privilege or if PF did not provision the new MAC address. * privilege or if PF did not provision the new MAC address.
* On BE3, this cmd will always fail if the VF doesn't have the * On BE3, this cmd will always fail if the VF doesn't have the
@ -307,9 +311,9 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
status = -EPERM; status = -EPERM;
goto err; goto err;
} }
done:
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); ether_addr_copy(netdev->dev_addr, addr->sa_data);
dev_info(dev, "MAC address changed to %pM\n", mac); dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
return 0; return 0;
err: err:
dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data); dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
@ -2447,10 +2451,24 @@ static void be_eq_clean(struct be_eq_obj *eqo)
be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0); be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0);
} }
/* Free posted rx buffers that were not used */
static void be_rxq_clean(struct be_rx_obj *rxo)
{
struct be_queue_info *rxq = &rxo->q;
struct be_rx_page_info *page_info;
while (atomic_read(&rxq->used) > 0) {
page_info = get_rx_page_info(rxo);
put_page(page_info->page);
memset(page_info, 0, sizeof(*page_info));
}
BUG_ON(atomic_read(&rxq->used));
rxq->tail = 0;
rxq->head = 0;
}
static void be_rx_cq_clean(struct be_rx_obj *rxo) static void be_rx_cq_clean(struct be_rx_obj *rxo)
{ {
struct be_rx_page_info *page_info;
struct be_queue_info *rxq = &rxo->q;
struct be_queue_info *rx_cq = &rxo->cq; struct be_queue_info *rx_cq = &rxo->cq;
struct be_rx_compl_info *rxcp; struct be_rx_compl_info *rxcp;
struct be_adapter *adapter = rxo->adapter; struct be_adapter *adapter = rxo->adapter;
@ -2487,16 +2505,6 @@ static void be_rx_cq_clean(struct be_rx_obj *rxo)
/* After cleanup, leave the CQ in unarmed state */ /* After cleanup, leave the CQ in unarmed state */
be_cq_notify(adapter, rx_cq->id, false, 0); be_cq_notify(adapter, rx_cq->id, false, 0);
/* Then free posted rx buffers that were not used */
while (atomic_read(&rxq->used) > 0) {
page_info = get_rx_page_info(rxo);
put_page(page_info->page);
memset(page_info, 0, sizeof(*page_info));
}
BUG_ON(atomic_read(&rxq->used));
rxq->tail = 0;
rxq->head = 0;
} }
static void be_tx_compl_clean(struct be_adapter *adapter) static void be_tx_compl_clean(struct be_adapter *adapter)
@ -2576,8 +2584,8 @@ static void be_evt_queues_destroy(struct be_adapter *adapter)
be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ); be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
napi_hash_del(&eqo->napi); napi_hash_del(&eqo->napi);
netif_napi_del(&eqo->napi); netif_napi_del(&eqo->napi);
free_cpumask_var(eqo->affinity_mask);
} }
free_cpumask_var(eqo->affinity_mask);
be_queue_free(adapter, &eqo->q); be_queue_free(adapter, &eqo->q);
} }
} }
@ -2594,13 +2602,7 @@ static int be_evt_queues_create(struct be_adapter *adapter)
for_all_evt_queues(adapter, eqo, i) { for_all_evt_queues(adapter, eqo, i) {
int numa_node = dev_to_node(&adapter->pdev->dev); int numa_node = dev_to_node(&adapter->pdev->dev);
if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
return -ENOMEM;
cpumask_set_cpu(cpumask_local_spread(i, numa_node),
eqo->affinity_mask);
netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
BE_NAPI_WEIGHT);
napi_hash_add(&eqo->napi);
aic = &adapter->aic_obj[i]; aic = &adapter->aic_obj[i];
eqo->adapter = adapter; eqo->adapter = adapter;
eqo->idx = i; eqo->idx = i;
@ -2616,6 +2618,14 @@ static int be_evt_queues_create(struct be_adapter *adapter)
rc = be_cmd_eq_create(adapter, eqo); rc = be_cmd_eq_create(adapter, eqo);
if (rc) if (rc)
return rc; return rc;
if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
return -ENOMEM;
cpumask_set_cpu(cpumask_local_spread(i, numa_node),
eqo->affinity_mask);
netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
BE_NAPI_WEIGHT);
napi_hash_add(&eqo->napi);
} }
return 0; return 0;
} }
@ -3354,13 +3364,54 @@ static void be_rx_qs_destroy(struct be_adapter *adapter)
for_all_rx_queues(adapter, rxo, i) { for_all_rx_queues(adapter, rxo, i) {
q = &rxo->q; q = &rxo->q;
if (q->created) { if (q->created) {
/* If RXQs are destroyed while in an "out of buffer"
* state, there is a possibility of an HW stall on
* Lancer. So, post 64 buffers to each queue to relieve
* the "out of buffer" condition.
* Make sure there's space in the RXQ before posting.
*/
if (lancer_chip(adapter)) {
be_rx_cq_clean(rxo);
if (atomic_read(&q->used) == 0)
be_post_rx_frags(rxo, GFP_KERNEL,
MAX_RX_POST);
}
be_cmd_rxq_destroy(adapter, q); be_cmd_rxq_destroy(adapter, q);
be_rx_cq_clean(rxo); be_rx_cq_clean(rxo);
be_rxq_clean(rxo);
} }
be_queue_free(adapter, q); be_queue_free(adapter, q);
} }
} }
static void be_disable_if_filters(struct be_adapter *adapter)
{
be_cmd_pmac_del(adapter, adapter->if_handle,
adapter->pmac_id[0], 0);
be_clear_uc_list(adapter);
/* The IFACE flags are enabled in the open path and cleared
* in the close path. When a VF gets detached from the host and
* assigned to a VM the following happens:
* - VF's IFACE flags get cleared in the detach path
* - IFACE create is issued by the VF in the attach path
* Due to a bug in the BE3/Skyhawk-R FW
* (Lancer FW doesn't have the bug), the IFACE capability flags
* specified along with the IFACE create cmd issued by a VF are not
* honoured by FW. As a consequence, if a *new* driver
* (that enables/disables IFACE flags in open/close)
* is loaded in the host and an *old* driver is * used by a VM/VF,
* the IFACE gets created *without* the needed flags.
* To avoid this, disable RX-filter flags only for Lancer.
*/
if (lancer_chip(adapter)) {
be_cmd_rx_filter(adapter, BE_IF_ALL_FILT_FLAGS, OFF);
adapter->if_flags &= ~BE_IF_ALL_FILT_FLAGS;
}
}
static int be_close(struct net_device *netdev) static int be_close(struct net_device *netdev)
{ {
struct be_adapter *adapter = netdev_priv(netdev); struct be_adapter *adapter = netdev_priv(netdev);
@ -3373,6 +3424,8 @@ static int be_close(struct net_device *netdev)
if (!(adapter->flags & BE_FLAGS_SETUP_DONE)) if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
return 0; return 0;
be_disable_if_filters(adapter);
be_roce_dev_close(adapter); be_roce_dev_close(adapter);
if (adapter->flags & BE_FLAGS_NAPI_ENABLED) { if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
@ -3392,7 +3445,6 @@ static int be_close(struct net_device *netdev)
be_tx_compl_clean(adapter); be_tx_compl_clean(adapter);
be_rx_qs_destroy(adapter); be_rx_qs_destroy(adapter);
be_clear_uc_list(adapter);
for_all_evt_queues(adapter, eqo, i) { for_all_evt_queues(adapter, eqo, i) {
if (msix_enabled(adapter)) if (msix_enabled(adapter))
@ -3477,6 +3529,31 @@ static int be_rx_qs_create(struct be_adapter *adapter)
return 0; return 0;
} }
static int be_enable_if_filters(struct be_adapter *adapter)
{
int status;
status = be_cmd_rx_filter(adapter, BE_IF_EN_FLAGS, ON);
if (status)
return status;
/* For BE3 VFs, the PF programs the initial MAC address */
if (!(BEx_chip(adapter) && be_virtfn(adapter))) {
status = be_cmd_pmac_add(adapter, adapter->netdev->dev_addr,
adapter->if_handle,
&adapter->pmac_id[0], 0);
if (status)
return status;
}
if (adapter->vlans_added)
be_vid_config(adapter);
be_set_rx_mode(adapter->netdev);
return 0;
}
static int be_open(struct net_device *netdev) static int be_open(struct net_device *netdev)
{ {
struct be_adapter *adapter = netdev_priv(netdev); struct be_adapter *adapter = netdev_priv(netdev);
@ -3490,6 +3567,10 @@ static int be_open(struct net_device *netdev)
if (status) if (status)
goto err; goto err;
status = be_enable_if_filters(adapter);
if (status)
goto err;
status = be_irq_register(adapter); status = be_irq_register(adapter);
if (status) if (status)
goto err; goto err;
@ -3686,16 +3767,6 @@ static void be_cancel_err_detection(struct be_adapter *adapter)
} }
} }
static void be_mac_clear(struct be_adapter *adapter)
{
if (adapter->pmac_id) {
be_cmd_pmac_del(adapter, adapter->if_handle,
adapter->pmac_id[0], 0);
kfree(adapter->pmac_id);
adapter->pmac_id = NULL;
}
}
#ifdef CONFIG_BE2NET_VXLAN #ifdef CONFIG_BE2NET_VXLAN
static void be_disable_vxlan_offloads(struct be_adapter *adapter) static void be_disable_vxlan_offloads(struct be_adapter *adapter)
{ {
@ -3770,8 +3841,8 @@ static int be_clear(struct be_adapter *adapter)
#ifdef CONFIG_BE2NET_VXLAN #ifdef CONFIG_BE2NET_VXLAN
be_disable_vxlan_offloads(adapter); be_disable_vxlan_offloads(adapter);
#endif #endif
/* delete the primary mac along with the uc-mac list */ kfree(adapter->pmac_id);
be_mac_clear(adapter); adapter->pmac_id = NULL;
be_cmd_if_destroy(adapter, adapter->if_handle, 0); be_cmd_if_destroy(adapter, adapter->if_handle, 0);
@ -3782,25 +3853,11 @@ static int be_clear(struct be_adapter *adapter)
return 0; return 0;
} }
static int be_if_create(struct be_adapter *adapter, u32 *if_handle,
u32 cap_flags, u32 vf)
{
u32 en_flags;
en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |
BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
en_flags &= cap_flags;
return be_cmd_if_create(adapter, cap_flags, en_flags, if_handle, vf);
}
static int be_vfs_if_create(struct be_adapter *adapter) static int be_vfs_if_create(struct be_adapter *adapter)
{ {
struct be_resources res = {0}; struct be_resources res = {0};
u32 cap_flags, en_flags, vf;
struct be_vf_cfg *vf_cfg; struct be_vf_cfg *vf_cfg;
u32 cap_flags, vf;
int status; int status;
/* If a FW profile exists, then cap_flags are updated */ /* If a FW profile exists, then cap_flags are updated */
@ -3821,8 +3878,12 @@ static int be_vfs_if_create(struct be_adapter *adapter)
} }
} }
status = be_if_create(adapter, &vf_cfg->if_handle, en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
cap_flags, vf + 1); BE_IF_FLAGS_BROADCAST |
BE_IF_FLAGS_MULTICAST |
BE_IF_FLAGS_PASS_L3L4_ERRORS);
status = be_cmd_if_create(adapter, cap_flags, en_flags,
&vf_cfg->if_handle, vf + 1);
if (status) if (status)
return status; return status;
} }
@ -4194,15 +4255,8 @@ static int be_mac_setup(struct be_adapter *adapter)
memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN); memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN); memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
} else {
/* Maybe the HW was reset; dev_addr must be re-programmed */
memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
} }
/* For BE3-R VFs, the PF programs the initial MAC address */
if (!(BEx_chip(adapter) && be_virtfn(adapter)))
be_cmd_pmac_add(adapter, mac, adapter->if_handle,
&adapter->pmac_id[0], 0);
return 0; return 0;
} }
@ -4342,6 +4396,7 @@ static int be_func_init(struct be_adapter *adapter)
static int be_setup(struct be_adapter *adapter) static int be_setup(struct be_adapter *adapter)
{ {
struct device *dev = &adapter->pdev->dev; struct device *dev = &adapter->pdev->dev;
u32 en_flags;
int status; int status;
status = be_func_init(adapter); status = be_func_init(adapter);
@ -4364,8 +4419,11 @@ static int be_setup(struct be_adapter *adapter)
if (status) if (status)
goto err; goto err;
status = be_if_create(adapter, &adapter->if_handle, /* will enable all the needed filter flags in be_open() */
be_if_cap_flags(adapter), 0); en_flags = BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
en_flags = en_flags & be_if_cap_flags(adapter);
status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
&adapter->if_handle, 0);
if (status) if (status)
goto err; goto err;
@ -4391,11 +4449,6 @@ static int be_setup(struct be_adapter *adapter)
dev_err(dev, "Please upgrade firmware to version >= 4.0\n"); dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
} }
if (adapter->vlans_added)
be_vid_config(adapter);
be_set_rx_mode(adapter->netdev);
status = be_cmd_set_flow_control(adapter, adapter->tx_fc, status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
adapter->rx_fc); adapter->rx_fc);
if (status) if (status)

View File

@ -3433,6 +3433,7 @@ fec_probe(struct platform_device *pdev)
pm_runtime_set_autosuspend_delay(&pdev->dev, FEC_MDIO_PM_TIMEOUT); pm_runtime_set_autosuspend_delay(&pdev->dev, FEC_MDIO_PM_TIMEOUT);
pm_runtime_use_autosuspend(&pdev->dev); pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_get_noresume(&pdev->dev);
pm_runtime_set_active(&pdev->dev); pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev); pm_runtime_enable(&pdev->dev);

View File

@ -586,7 +586,8 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
frag = skb_shinfo(skb)->frags; frag = skb_shinfo(skb)->frags;
while (nr_frags) { while (nr_frags) {
CBDC_SC(bdp, CBDC_SC(bdp,
BD_ENET_TX_STATS | BD_ENET_TX_LAST | BD_ENET_TX_TC); BD_ENET_TX_STATS | BD_ENET_TX_INTR | BD_ENET_TX_LAST |
BD_ENET_TX_TC);
CBDS_SC(bdp, BD_ENET_TX_READY); CBDS_SC(bdp, BD_ENET_TX_READY);
if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0) if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0)

View File

@ -110,7 +110,7 @@ static int do_pd_setup(struct fs_enet_private *fep)
} }
#define FEC_NAPI_RX_EVENT_MSK (FEC_ENET_RXF | FEC_ENET_RXB) #define FEC_NAPI_RX_EVENT_MSK (FEC_ENET_RXF | FEC_ENET_RXB)
#define FEC_NAPI_TX_EVENT_MSK (FEC_ENET_TXF | FEC_ENET_TXB) #define FEC_NAPI_TX_EVENT_MSK (FEC_ENET_TXF)
#define FEC_RX_EVENT (FEC_ENET_RXF) #define FEC_RX_EVENT (FEC_ENET_RXF)
#define FEC_TX_EVENT (FEC_ENET_TXF) #define FEC_TX_EVENT (FEC_ENET_TXF)
#define FEC_ERR_EVENT_MSK (FEC_ENET_HBERR | FEC_ENET_BABR | \ #define FEC_ERR_EVENT_MSK (FEC_ENET_HBERR | FEC_ENET_BABR | \

View File

@ -900,27 +900,6 @@ static int gfar_check_filer_hardware(struct gfar_private *priv)
return 0; return 0;
} }
static int gfar_comp_asc(const void *a, const void *b)
{
return memcmp(a, b, 4);
}
static int gfar_comp_desc(const void *a, const void *b)
{
return -memcmp(a, b, 4);
}
static void gfar_swap(void *a, void *b, int size)
{
u32 *_a = a;
u32 *_b = b;
swap(_a[0], _b[0]);
swap(_a[1], _b[1]);
swap(_a[2], _b[2]);
swap(_a[3], _b[3]);
}
/* Write a mask to filer cache */ /* Write a mask to filer cache */
static void gfar_set_mask(u32 mask, struct filer_table *tab) static void gfar_set_mask(u32 mask, struct filer_table *tab)
{ {
@ -1270,310 +1249,6 @@ static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
return 0; return 0;
} }
/* Copy size filer entries */
static void gfar_copy_filer_entries(struct gfar_filer_entry dst[0],
struct gfar_filer_entry src[0], s32 size)
{
while (size > 0) {
size--;
dst[size].ctrl = src[size].ctrl;
dst[size].prop = src[size].prop;
}
}
/* Delete the contents of the filer-table between start and end
* and collapse them
*/
static int gfar_trim_filer_entries(u32 begin, u32 end, struct filer_table *tab)
{
int length;
if (end > MAX_FILER_CACHE_IDX || end < begin)
return -EINVAL;
end++;
length = end - begin;
/* Copy */
while (end < tab->index) {
tab->fe[begin].ctrl = tab->fe[end].ctrl;
tab->fe[begin++].prop = tab->fe[end++].prop;
}
/* Fill up with don't cares */
while (begin < tab->index) {
tab->fe[begin].ctrl = 0x60;
tab->fe[begin].prop = 0xFFFFFFFF;
begin++;
}
tab->index -= length;
return 0;
}
/* Make space on the wanted location */
static int gfar_expand_filer_entries(u32 begin, u32 length,
struct filer_table *tab)
{
if (length == 0 || length + tab->index > MAX_FILER_CACHE_IDX ||
begin > MAX_FILER_CACHE_IDX)
return -EINVAL;
gfar_copy_filer_entries(&(tab->fe[begin + length]), &(tab->fe[begin]),
tab->index - length + 1);
tab->index += length;
return 0;
}
static int gfar_get_next_cluster_start(int start, struct filer_table *tab)
{
for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1);
start++) {
if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) ==
(RQFCR_AND | RQFCR_CLE))
return start;
}
return -1;
}
static int gfar_get_next_cluster_end(int start, struct filer_table *tab)
{
for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1);
start++) {
if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) ==
(RQFCR_CLE))
return start;
}
return -1;
}
/* Uses hardwares clustering option to reduce
* the number of filer table entries
*/
static void gfar_cluster_filer(struct filer_table *tab)
{
s32 i = -1, j, iend, jend;
while ((i = gfar_get_next_cluster_start(++i, tab)) != -1) {
j = i;
while ((j = gfar_get_next_cluster_start(++j, tab)) != -1) {
/* The cluster entries self and the previous one
* (a mask) must be identical!
*/
if (tab->fe[i].ctrl != tab->fe[j].ctrl)
break;
if (tab->fe[i].prop != tab->fe[j].prop)
break;
if (tab->fe[i - 1].ctrl != tab->fe[j - 1].ctrl)
break;
if (tab->fe[i - 1].prop != tab->fe[j - 1].prop)
break;
iend = gfar_get_next_cluster_end(i, tab);
jend = gfar_get_next_cluster_end(j, tab);
if (jend == -1 || iend == -1)
break;
/* First we make some free space, where our cluster
* element should be. Then we copy it there and finally
* delete in from its old location.
*/
if (gfar_expand_filer_entries(iend, (jend - j), tab) ==
-EINVAL)
break;
gfar_copy_filer_entries(&(tab->fe[iend + 1]),
&(tab->fe[jend + 1]), jend - j);
if (gfar_trim_filer_entries(jend - 1,
jend + (jend - j),
tab) == -EINVAL)
return;
/* Mask out cluster bit */
tab->fe[iend].ctrl &= ~(RQFCR_CLE);
}
}
}
/* Swaps the masked bits of a1<>a2 and b1<>b2 */
static void gfar_swap_bits(struct gfar_filer_entry *a1,
struct gfar_filer_entry *a2,
struct gfar_filer_entry *b1,
struct gfar_filer_entry *b2, u32 mask)
{
u32 temp[4];
temp[0] = a1->ctrl & mask;
temp[1] = a2->ctrl & mask;
temp[2] = b1->ctrl & mask;
temp[3] = b2->ctrl & mask;
a1->ctrl &= ~mask;
a2->ctrl &= ~mask;
b1->ctrl &= ~mask;
b2->ctrl &= ~mask;
a1->ctrl |= temp[1];
a2->ctrl |= temp[0];
b1->ctrl |= temp[3];
b2->ctrl |= temp[2];
}
/* Generate a list consisting of masks values with their start and
* end of validity and block as indicator for parts belonging
* together (glued by ANDs) in mask_table
*/
static u32 gfar_generate_mask_table(struct gfar_mask_entry *mask_table,
struct filer_table *tab)
{
u32 i, and_index = 0, block_index = 1;
for (i = 0; i < tab->index; i++) {
/* LSByte of control = 0 sets a mask */
if (!(tab->fe[i].ctrl & 0xF)) {
mask_table[and_index].mask = tab->fe[i].prop;
mask_table[and_index].start = i;
mask_table[and_index].block = block_index;
if (and_index >= 1)
mask_table[and_index - 1].end = i - 1;
and_index++;
}
/* cluster starts and ends will be separated because they should
* hold their position
*/
if (tab->fe[i].ctrl & RQFCR_CLE)
block_index++;
/* A not set AND indicates the end of a depended block */
if (!(tab->fe[i].ctrl & RQFCR_AND))
block_index++;
}
mask_table[and_index - 1].end = i - 1;
return and_index;
}
/* Sorts the entries of mask_table by the values of the masks.
* Important: The 0xFF80 flags of the first and last entry of a
* block must hold their position (which queue, CLusterEnable, ReJEct,
* AND)
*/
static void gfar_sort_mask_table(struct gfar_mask_entry *mask_table,
struct filer_table *temp_table, u32 and_index)
{
/* Pointer to compare function (_asc or _desc) */
int (*gfar_comp)(const void *, const void *);
u32 i, size = 0, start = 0, prev = 1;
u32 old_first, old_last, new_first, new_last;
gfar_comp = &gfar_comp_desc;
for (i = 0; i < and_index; i++) {
if (prev != mask_table[i].block) {
old_first = mask_table[start].start + 1;
old_last = mask_table[i - 1].end;
sort(mask_table + start, size,
sizeof(struct gfar_mask_entry),
gfar_comp, &gfar_swap);
/* Toggle order for every block. This makes the
* thing more efficient!
*/
if (gfar_comp == gfar_comp_desc)
gfar_comp = &gfar_comp_asc;
else
gfar_comp = &gfar_comp_desc;
new_first = mask_table[start].start + 1;
new_last = mask_table[i - 1].end;
gfar_swap_bits(&temp_table->fe[new_first],
&temp_table->fe[old_first],
&temp_table->fe[new_last],
&temp_table->fe[old_last],
RQFCR_QUEUE | RQFCR_CLE |
RQFCR_RJE | RQFCR_AND);
start = i;
size = 0;
}
size++;
prev = mask_table[i].block;
}
}
/* Reduces the number of masks needed in the filer table to save entries
* This is done by sorting the masks of a depended block. A depended block is
* identified by gluing ANDs or CLE. The sorting order toggles after every
* block. Of course entries in scope of a mask must change their location with
* it.
*/
static int gfar_optimize_filer_masks(struct filer_table *tab)
{
struct filer_table *temp_table;
struct gfar_mask_entry *mask_table;
u32 and_index = 0, previous_mask = 0, i = 0, j = 0, size = 0;
s32 ret = 0;
/* We need a copy of the filer table because
* we want to change its order
*/
temp_table = kmemdup(tab, sizeof(*temp_table), GFP_KERNEL);
if (temp_table == NULL)
return -ENOMEM;
mask_table = kcalloc(MAX_FILER_CACHE_IDX / 2 + 1,
sizeof(struct gfar_mask_entry), GFP_KERNEL);
if (mask_table == NULL) {
ret = -ENOMEM;
goto end;
}
and_index = gfar_generate_mask_table(mask_table, tab);
gfar_sort_mask_table(mask_table, temp_table, and_index);
/* Now we can copy the data from our duplicated filer table to
* the real one in the order the mask table says
*/
for (i = 0; i < and_index; i++) {
size = mask_table[i].end - mask_table[i].start + 1;
gfar_copy_filer_entries(&(tab->fe[j]),
&(temp_table->fe[mask_table[i].start]), size);
j += size;
}
/* And finally we just have to check for duplicated masks and drop the
* second ones
*/
for (i = 0; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) {
if (tab->fe[i].ctrl == 0x80) {
previous_mask = i++;
break;
}
}
for (; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) {
if (tab->fe[i].ctrl == 0x80) {
if (tab->fe[i].prop == tab->fe[previous_mask].prop) {
/* Two identical ones found!
* So drop the second one!
*/
gfar_trim_filer_entries(i, i, tab);
} else
/* Not identical! */
previous_mask = i;
}
}
kfree(mask_table);
end: kfree(temp_table);
return ret;
}
/* Write the bit-pattern from software's buffer to hardware registers */ /* Write the bit-pattern from software's buffer to hardware registers */
static int gfar_write_filer_table(struct gfar_private *priv, static int gfar_write_filer_table(struct gfar_private *priv,
struct filer_table *tab) struct filer_table *tab)
@ -1583,11 +1258,10 @@ static int gfar_write_filer_table(struct gfar_private *priv,
return -EBUSY; return -EBUSY;
/* Fill regular entries */ /* Fill regular entries */
for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].prop); for (; i < MAX_FILER_IDX && (tab->fe[i].ctrl | tab->fe[i].prop); i++)
i++)
gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop); gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop);
/* Fill the rest with fall-troughs */ /* Fill the rest with fall-troughs */
for (; i < MAX_FILER_IDX - 1; i++) for (; i < MAX_FILER_IDX; i++)
gfar_write_filer(priv, i, 0x60, 0xFFFFFFFF); gfar_write_filer(priv, i, 0x60, 0xFFFFFFFF);
/* Last entry must be default accept /* Last entry must be default accept
* because that's what people expect * because that's what people expect
@ -1621,7 +1295,6 @@ static int gfar_process_filer_changes(struct gfar_private *priv)
{ {
struct ethtool_flow_spec_container *j; struct ethtool_flow_spec_container *j;
struct filer_table *tab; struct filer_table *tab;
s32 i = 0;
s32 ret = 0; s32 ret = 0;
/* So index is set to zero, too! */ /* So index is set to zero, too! */
@ -1646,17 +1319,6 @@ static int gfar_process_filer_changes(struct gfar_private *priv)
} }
} }
i = tab->index;
/* Optimizations to save entries */
gfar_cluster_filer(tab);
gfar_optimize_filer_masks(tab);
pr_debug("\tSummary:\n"
"\tData on hardware: %d\n"
"\tCompression rate: %d%%\n",
tab->index, 100 - (100 * tab->index) / i);
/* Write everything to hardware */ /* Write everything to hardware */
ret = gfar_write_filer_table(priv, tab); ret = gfar_write_filer_table(priv, tab);
if (ret == -EBUSY) { if (ret == -EBUSY) {
@ -1722,13 +1384,14 @@ static int gfar_add_cls(struct gfar_private *priv,
} }
process: process:
priv->rx_list.count++;
ret = gfar_process_filer_changes(priv); ret = gfar_process_filer_changes(priv);
if (ret) if (ret)
goto clean_list; goto clean_list;
priv->rx_list.count++;
return ret; return ret;
clean_list: clean_list:
priv->rx_list.count--;
list_del(&temp->list); list_del(&temp->list);
clean_mem: clean_mem:
kfree(temp); kfree(temp);

View File

@ -27,6 +27,8 @@
#include <linux/of_address.h> #include <linux/of_address.h>
#include <linux/phy.h> #include <linux/phy.h>
#include <linux/clk.h> #include <linux/clk.h>
#include <linux/hrtimer.h>
#include <linux/ktime.h>
#include <uapi/linux/ppp_defs.h> #include <uapi/linux/ppp_defs.h>
#include <net/ip.h> #include <net/ip.h>
#include <net/ipv6.h> #include <net/ipv6.h>
@ -299,6 +301,7 @@
/* Coalescing */ /* Coalescing */
#define MVPP2_TXDONE_COAL_PKTS_THRESH 15 #define MVPP2_TXDONE_COAL_PKTS_THRESH 15
#define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL
#define MVPP2_RX_COAL_PKTS 32 #define MVPP2_RX_COAL_PKTS 32
#define MVPP2_RX_COAL_USEC 100 #define MVPP2_RX_COAL_USEC 100
@ -660,6 +663,14 @@ struct mvpp2_pcpu_stats {
u64 tx_bytes; u64 tx_bytes;
}; };
/* Per-CPU port control */
struct mvpp2_port_pcpu {
struct hrtimer tx_done_timer;
bool timer_scheduled;
/* Tasklet for egress finalization */
struct tasklet_struct tx_done_tasklet;
};
struct mvpp2_port { struct mvpp2_port {
u8 id; u8 id;
@ -679,6 +690,9 @@ struct mvpp2_port {
u32 pending_cause_rx; u32 pending_cause_rx;
struct napi_struct napi; struct napi_struct napi;
/* Per-CPU port control */
struct mvpp2_port_pcpu __percpu *pcpu;
/* Flags */ /* Flags */
unsigned long flags; unsigned long flags;
@ -776,6 +790,9 @@ struct mvpp2_txq_pcpu {
/* Array of transmitted skb */ /* Array of transmitted skb */
struct sk_buff **tx_skb; struct sk_buff **tx_skb;
/* Array of transmitted buffers' physical addresses */
dma_addr_t *tx_buffs;
/* Index of last TX DMA descriptor that was inserted */ /* Index of last TX DMA descriptor that was inserted */
int txq_put_index; int txq_put_index;
@ -913,8 +930,6 @@ struct mvpp2_bm_pool {
/* Occupied buffers indicator */ /* Occupied buffers indicator */
atomic_t in_use; atomic_t in_use;
int in_use_thresh; int in_use_thresh;
spinlock_t lock;
}; };
struct mvpp2_buff_hdr { struct mvpp2_buff_hdr {
@ -963,9 +978,13 @@ static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
} }
static void mvpp2_txq_inc_put(struct mvpp2_txq_pcpu *txq_pcpu, static void mvpp2_txq_inc_put(struct mvpp2_txq_pcpu *txq_pcpu,
struct sk_buff *skb) struct sk_buff *skb,
struct mvpp2_tx_desc *tx_desc)
{ {
txq_pcpu->tx_skb[txq_pcpu->txq_put_index] = skb; txq_pcpu->tx_skb[txq_pcpu->txq_put_index] = skb;
if (skb)
txq_pcpu->tx_buffs[txq_pcpu->txq_put_index] =
tx_desc->buf_phys_addr;
txq_pcpu->txq_put_index++; txq_pcpu->txq_put_index++;
if (txq_pcpu->txq_put_index == txq_pcpu->size) if (txq_pcpu->txq_put_index == txq_pcpu->size)
txq_pcpu->txq_put_index = 0; txq_pcpu->txq_put_index = 0;
@ -3376,7 +3395,6 @@ static int mvpp2_bm_pool_create(struct platform_device *pdev,
bm_pool->pkt_size = 0; bm_pool->pkt_size = 0;
bm_pool->buf_num = 0; bm_pool->buf_num = 0;
atomic_set(&bm_pool->in_use, 0); atomic_set(&bm_pool->in_use, 0);
spin_lock_init(&bm_pool->lock);
return 0; return 0;
} }
@ -3647,7 +3665,6 @@ static struct mvpp2_bm_pool *
mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type, mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
int pkt_size) int pkt_size)
{ {
unsigned long flags = 0;
struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool]; struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
int num; int num;
@ -3656,8 +3673,6 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
return NULL; return NULL;
} }
spin_lock_irqsave(&new_pool->lock, flags);
if (new_pool->type == MVPP2_BM_FREE) if (new_pool->type == MVPP2_BM_FREE)
new_pool->type = type; new_pool->type = type;
@ -3686,8 +3701,6 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
if (num != pkts_num) { if (num != pkts_num) {
WARN(1, "pool %d: %d of %d allocated\n", WARN(1, "pool %d: %d of %d allocated\n",
new_pool->id, num, pkts_num); new_pool->id, num, pkts_num);
/* We need to undo the bufs_add() allocations */
spin_unlock_irqrestore(&new_pool->lock, flags);
return NULL; return NULL;
} }
} }
@ -3695,15 +3708,12 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
mvpp2_bm_pool_bufsize_set(port->priv, new_pool, mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
MVPP2_RX_BUF_SIZE(new_pool->pkt_size)); MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
spin_unlock_irqrestore(&new_pool->lock, flags);
return new_pool; return new_pool;
} }
/* Initialize pools for swf */ /* Initialize pools for swf */
static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port) static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
{ {
unsigned long flags = 0;
int rxq; int rxq;
if (!port->pool_long) { if (!port->pool_long) {
@ -3714,9 +3724,7 @@ static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
if (!port->pool_long) if (!port->pool_long)
return -ENOMEM; return -ENOMEM;
spin_lock_irqsave(&port->pool_long->lock, flags);
port->pool_long->port_map |= (1 << port->id); port->pool_long->port_map |= (1 << port->id);
spin_unlock_irqrestore(&port->pool_long->lock, flags);
for (rxq = 0; rxq < rxq_number; rxq++) for (rxq = 0; rxq < rxq_number; rxq++)
mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id); mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
@ -3730,9 +3738,7 @@ static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
if (!port->pool_short) if (!port->pool_short)
return -ENOMEM; return -ENOMEM;
spin_lock_irqsave(&port->pool_short->lock, flags);
port->pool_short->port_map |= (1 << port->id); port->pool_short->port_map |= (1 << port->id);
spin_unlock_irqrestore(&port->pool_short->lock, flags);
for (rxq = 0; rxq < rxq_number; rxq++) for (rxq = 0; rxq < rxq_number; rxq++)
mvpp2_rxq_short_pool_set(port, rxq, mvpp2_rxq_short_pool_set(port, rxq,
@ -3806,7 +3812,6 @@ static void mvpp2_interrupts_unmask(void *arg)
mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id), mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id),
(MVPP2_CAUSE_MISC_SUM_MASK | (MVPP2_CAUSE_MISC_SUM_MASK |
MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK |
MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK)); MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK));
} }
@ -4382,23 +4387,6 @@ static void mvpp2_rx_time_coal_set(struct mvpp2_port *port,
rxq->time_coal = usec; rxq->time_coal = usec;
} }
/* Set threshold for TX_DONE pkts coalescing */
static void mvpp2_tx_done_pkts_coal_set(void *arg)
{
struct mvpp2_port *port = arg;
int queue;
u32 val;
for (queue = 0; queue < txq_number; queue++) {
struct mvpp2_tx_queue *txq = port->txqs[queue];
val = (txq->done_pkts_coal << MVPP2_TRANSMITTED_THRESH_OFFSET) &
MVPP2_TRANSMITTED_THRESH_MASK;
mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
mvpp2_write(port->priv, MVPP2_TXQ_THRESH_REG, val);
}
}
/* Free Tx queue skbuffs */ /* Free Tx queue skbuffs */
static void mvpp2_txq_bufs_free(struct mvpp2_port *port, static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
struct mvpp2_tx_queue *txq, struct mvpp2_tx_queue *txq,
@ -4407,8 +4395,8 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
int i; int i;
for (i = 0; i < num; i++) { for (i = 0; i < num; i++) {
struct mvpp2_tx_desc *tx_desc = txq->descs + dma_addr_t buf_phys_addr =
txq_pcpu->txq_get_index; txq_pcpu->tx_buffs[txq_pcpu->txq_get_index];
struct sk_buff *skb = txq_pcpu->tx_skb[txq_pcpu->txq_get_index]; struct sk_buff *skb = txq_pcpu->tx_skb[txq_pcpu->txq_get_index];
mvpp2_txq_inc_get(txq_pcpu); mvpp2_txq_inc_get(txq_pcpu);
@ -4416,8 +4404,8 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
if (!skb) if (!skb)
continue; continue;
dma_unmap_single(port->dev->dev.parent, tx_desc->buf_phys_addr, dma_unmap_single(port->dev->dev.parent, buf_phys_addr,
tx_desc->data_size, DMA_TO_DEVICE); skb_headlen(skb), DMA_TO_DEVICE);
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
} }
} }
@ -4433,7 +4421,7 @@ static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port, static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
u32 cause) u32 cause)
{ {
int queue = fls(cause >> 16) - 1; int queue = fls(cause) - 1;
return port->txqs[queue]; return port->txqs[queue];
} }
@ -4460,6 +4448,29 @@ static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
netif_tx_wake_queue(nq); netif_tx_wake_queue(nq);
} }
static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause)
{
struct mvpp2_tx_queue *txq;
struct mvpp2_txq_pcpu *txq_pcpu;
unsigned int tx_todo = 0;
while (cause) {
txq = mvpp2_get_tx_queue(port, cause);
if (!txq)
break;
txq_pcpu = this_cpu_ptr(txq->pcpu);
if (txq_pcpu->count) {
mvpp2_txq_done(port, txq, txq_pcpu);
tx_todo += txq_pcpu->count;
}
cause &= ~(1 << txq->log_id);
}
return tx_todo;
}
/* Rx/Tx queue initialization/cleanup methods */ /* Rx/Tx queue initialization/cleanup methods */
/* Allocate and initialize descriptors for aggr TXQ */ /* Allocate and initialize descriptors for aggr TXQ */
@ -4649,12 +4660,13 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
txq_pcpu->tx_skb = kmalloc(txq_pcpu->size * txq_pcpu->tx_skb = kmalloc(txq_pcpu->size *
sizeof(*txq_pcpu->tx_skb), sizeof(*txq_pcpu->tx_skb),
GFP_KERNEL); GFP_KERNEL);
if (!txq_pcpu->tx_skb) { if (!txq_pcpu->tx_skb)
dma_free_coherent(port->dev->dev.parent, goto error;
txq->size * MVPP2_DESC_ALIGNED_SIZE,
txq->descs, txq->descs_phys); txq_pcpu->tx_buffs = kmalloc(txq_pcpu->size *
return -ENOMEM; sizeof(dma_addr_t), GFP_KERNEL);
} if (!txq_pcpu->tx_buffs)
goto error;
txq_pcpu->count = 0; txq_pcpu->count = 0;
txq_pcpu->reserved_num = 0; txq_pcpu->reserved_num = 0;
@ -4663,6 +4675,19 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
} }
return 0; return 0;
error:
for_each_present_cpu(cpu) {
txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
kfree(txq_pcpu->tx_skb);
kfree(txq_pcpu->tx_buffs);
}
dma_free_coherent(port->dev->dev.parent,
txq->size * MVPP2_DESC_ALIGNED_SIZE,
txq->descs, txq->descs_phys);
return -ENOMEM;
} }
/* Free allocated TXQ resources */ /* Free allocated TXQ resources */
@ -4675,6 +4700,7 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port,
for_each_present_cpu(cpu) { for_each_present_cpu(cpu) {
txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
kfree(txq_pcpu->tx_skb); kfree(txq_pcpu->tx_skb);
kfree(txq_pcpu->tx_buffs);
} }
if (txq->descs) if (txq->descs)
@ -4805,7 +4831,6 @@ static int mvpp2_setup_txqs(struct mvpp2_port *port)
goto err_cleanup; goto err_cleanup;
} }
on_each_cpu(mvpp2_tx_done_pkts_coal_set, port, 1);
on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1); on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
return 0; return 0;
@ -4887,6 +4912,49 @@ static void mvpp2_link_event(struct net_device *dev)
} }
} }
static void mvpp2_timer_set(struct mvpp2_port_pcpu *port_pcpu)
{
ktime_t interval;
if (!port_pcpu->timer_scheduled) {
port_pcpu->timer_scheduled = true;
interval = ktime_set(0, MVPP2_TXDONE_HRTIMER_PERIOD_NS);
hrtimer_start(&port_pcpu->tx_done_timer, interval,
HRTIMER_MODE_REL_PINNED);
}
}
static void mvpp2_tx_proc_cb(unsigned long data)
{
struct net_device *dev = (struct net_device *)data;
struct mvpp2_port *port = netdev_priv(dev);
struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
unsigned int tx_todo, cause;
if (!netif_running(dev))
return;
port_pcpu->timer_scheduled = false;
/* Process all the Tx queues */
cause = (1 << txq_number) - 1;
tx_todo = mvpp2_tx_done(port, cause);
/* Set the timer in case not all the packets were processed */
if (tx_todo)
mvpp2_timer_set(port_pcpu);
}
static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
{
struct mvpp2_port_pcpu *port_pcpu = container_of(timer,
struct mvpp2_port_pcpu,
tx_done_timer);
tasklet_schedule(&port_pcpu->tx_done_tasklet);
return HRTIMER_NORESTART;
}
/* Main RX/TX processing routines */ /* Main RX/TX processing routines */
/* Display more error info */ /* Display more error info */
@ -5144,11 +5212,11 @@ static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
if (i == (skb_shinfo(skb)->nr_frags - 1)) { if (i == (skb_shinfo(skb)->nr_frags - 1)) {
/* Last descriptor */ /* Last descriptor */
tx_desc->command = MVPP2_TXD_L_DESC; tx_desc->command = MVPP2_TXD_L_DESC;
mvpp2_txq_inc_put(txq_pcpu, skb); mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc);
} else { } else {
/* Descriptor in the middle: Not First, Not Last */ /* Descriptor in the middle: Not First, Not Last */
tx_desc->command = 0; tx_desc->command = 0;
mvpp2_txq_inc_put(txq_pcpu, NULL); mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc);
} }
} }
@ -5214,12 +5282,12 @@ static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
/* First and Last descriptor */ /* First and Last descriptor */
tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC; tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
tx_desc->command = tx_cmd; tx_desc->command = tx_cmd;
mvpp2_txq_inc_put(txq_pcpu, skb); mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc);
} else { } else {
/* First but not Last */ /* First but not Last */
tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE; tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE;
tx_desc->command = tx_cmd; tx_desc->command = tx_cmd;
mvpp2_txq_inc_put(txq_pcpu, NULL); mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc);
/* Continue with other skb fragments */ /* Continue with other skb fragments */
if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) { if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {
@ -5255,6 +5323,17 @@ out:
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
} }
/* Finalize TX processing */
if (txq_pcpu->count >= txq->done_pkts_coal)
mvpp2_txq_done(port, txq, txq_pcpu);
/* Set the timer in case not all frags were processed */
if (txq_pcpu->count <= frags && txq_pcpu->count > 0) {
struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
mvpp2_timer_set(port_pcpu);
}
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
@ -5268,10 +5347,11 @@ static inline void mvpp2_cause_error(struct net_device *dev, int cause)
netdev_err(dev, "tx fifo underrun error\n"); netdev_err(dev, "tx fifo underrun error\n");
} }
static void mvpp2_txq_done_percpu(void *arg) static int mvpp2_poll(struct napi_struct *napi, int budget)
{ {
struct mvpp2_port *port = arg; u32 cause_rx_tx, cause_rx, cause_misc;
u32 cause_rx_tx, cause_tx, cause_misc; int rx_done = 0;
struct mvpp2_port *port = netdev_priv(napi->dev);
/* Rx/Tx cause register /* Rx/Tx cause register
* *
@ -5285,7 +5365,7 @@ static void mvpp2_txq_done_percpu(void *arg)
*/ */
cause_rx_tx = mvpp2_read(port->priv, cause_rx_tx = mvpp2_read(port->priv,
MVPP2_ISR_RX_TX_CAUSE_REG(port->id)); MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; cause_rx_tx &= ~MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK; cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
if (cause_misc) { if (cause_misc) {
@ -5297,26 +5377,6 @@ static void mvpp2_txq_done_percpu(void *arg)
cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK); cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
} }
/* Release TX descriptors */
if (cause_tx) {
struct mvpp2_tx_queue *txq = mvpp2_get_tx_queue(port, cause_tx);
struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
if (txq_pcpu->count)
mvpp2_txq_done(port, txq, txq_pcpu);
}
}
static int mvpp2_poll(struct napi_struct *napi, int budget)
{
u32 cause_rx_tx, cause_rx;
int rx_done = 0;
struct mvpp2_port *port = netdev_priv(napi->dev);
on_each_cpu(mvpp2_txq_done_percpu, port, 1);
cause_rx_tx = mvpp2_read(port->priv,
MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK; cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
/* Process RX packets */ /* Process RX packets */
@ -5561,6 +5621,8 @@ err_cleanup_rxqs:
static int mvpp2_stop(struct net_device *dev) static int mvpp2_stop(struct net_device *dev)
{ {
struct mvpp2_port *port = netdev_priv(dev); struct mvpp2_port *port = netdev_priv(dev);
struct mvpp2_port_pcpu *port_pcpu;
int cpu;
mvpp2_stop_dev(port); mvpp2_stop_dev(port);
mvpp2_phy_disconnect(port); mvpp2_phy_disconnect(port);
@ -5569,6 +5631,13 @@ static int mvpp2_stop(struct net_device *dev)
on_each_cpu(mvpp2_interrupts_mask, port, 1); on_each_cpu(mvpp2_interrupts_mask, port, 1);
free_irq(port->irq, port); free_irq(port->irq, port);
for_each_present_cpu(cpu) {
port_pcpu = per_cpu_ptr(port->pcpu, cpu);
hrtimer_cancel(&port_pcpu->tx_done_timer);
port_pcpu->timer_scheduled = false;
tasklet_kill(&port_pcpu->tx_done_tasklet);
}
mvpp2_cleanup_rxqs(port); mvpp2_cleanup_rxqs(port);
mvpp2_cleanup_txqs(port); mvpp2_cleanup_txqs(port);
@ -5784,7 +5853,6 @@ static int mvpp2_ethtool_set_coalesce(struct net_device *dev,
txq->done_pkts_coal = c->tx_max_coalesced_frames; txq->done_pkts_coal = c->tx_max_coalesced_frames;
} }
on_each_cpu(mvpp2_tx_done_pkts_coal_set, port, 1);
return 0; return 0;
} }
@ -6035,6 +6103,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
{ {
struct device_node *phy_node; struct device_node *phy_node;
struct mvpp2_port *port; struct mvpp2_port *port;
struct mvpp2_port_pcpu *port_pcpu;
struct net_device *dev; struct net_device *dev;
struct resource *res; struct resource *res;
const char *dt_mac_addr; const char *dt_mac_addr;
@ -6044,7 +6113,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
int features; int features;
int phy_mode; int phy_mode;
int priv_common_regs_num = 2; int priv_common_regs_num = 2;
int err, i; int err, i, cpu;
dev = alloc_etherdev_mqs(sizeof(struct mvpp2_port), txq_number, dev = alloc_etherdev_mqs(sizeof(struct mvpp2_port), txq_number,
rxq_number); rxq_number);
@ -6135,6 +6204,24 @@ static int mvpp2_port_probe(struct platform_device *pdev,
} }
mvpp2_port_power_up(port); mvpp2_port_power_up(port);
port->pcpu = alloc_percpu(struct mvpp2_port_pcpu);
if (!port->pcpu) {
err = -ENOMEM;
goto err_free_txq_pcpu;
}
for_each_present_cpu(cpu) {
port_pcpu = per_cpu_ptr(port->pcpu, cpu);
hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
HRTIMER_MODE_REL_PINNED);
port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
port_pcpu->timer_scheduled = false;
tasklet_init(&port_pcpu->tx_done_tasklet, mvpp2_tx_proc_cb,
(unsigned long)dev);
}
netif_napi_add(dev, &port->napi, mvpp2_poll, NAPI_POLL_WEIGHT); netif_napi_add(dev, &port->napi, mvpp2_poll, NAPI_POLL_WEIGHT);
features = NETIF_F_SG | NETIF_F_IP_CSUM; features = NETIF_F_SG | NETIF_F_IP_CSUM;
dev->features = features | NETIF_F_RXCSUM; dev->features = features | NETIF_F_RXCSUM;
@ -6144,7 +6231,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
err = register_netdev(dev); err = register_netdev(dev);
if (err < 0) { if (err < 0) {
dev_err(&pdev->dev, "failed to register netdev\n"); dev_err(&pdev->dev, "failed to register netdev\n");
goto err_free_txq_pcpu; goto err_free_port_pcpu;
} }
netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr); netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
@ -6153,6 +6240,8 @@ static int mvpp2_port_probe(struct platform_device *pdev,
priv->port_list[id] = port; priv->port_list[id] = port;
return 0; return 0;
err_free_port_pcpu:
free_percpu(port->pcpu);
err_free_txq_pcpu: err_free_txq_pcpu:
for (i = 0; i < txq_number; i++) for (i = 0; i < txq_number; i++)
free_percpu(port->txqs[i]->pcpu); free_percpu(port->txqs[i]->pcpu);
@ -6171,6 +6260,7 @@ static void mvpp2_port_remove(struct mvpp2_port *port)
int i; int i;
unregister_netdev(port->dev); unregister_netdev(port->dev);
free_percpu(port->pcpu);
free_percpu(port->stats); free_percpu(port->stats);
for (i = 0; i < txq_number; i++) for (i = 0; i < txq_number; i++)
free_percpu(port->txqs[i]->pcpu); free_percpu(port->txqs[i]->pcpu);

View File

@ -391,6 +391,8 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
/* disable cmdif checksum */ /* disable cmdif checksum */
MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0); MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0);
MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12);
err = set_caps(dev, set_ctx, set_sz); err = set_caps(dev, set_ctx, set_sz);
query_ex: query_ex:

View File

@ -4875,10 +4875,12 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_46: case RTL_GIGA_MAC_VER_46:
case RTL_GIGA_MAC_VER_47: case RTL_GIGA_MAC_VER_47:
case RTL_GIGA_MAC_VER_48: case RTL_GIGA_MAC_VER_48:
RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST | RX_EARLY_OFF);
break;
case RTL_GIGA_MAC_VER_49: case RTL_GIGA_MAC_VER_49:
case RTL_GIGA_MAC_VER_50: case RTL_GIGA_MAC_VER_50:
case RTL_GIGA_MAC_VER_51: case RTL_GIGA_MAC_VER_51:
RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST | RX_EARLY_OFF); RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
break; break;
default: default:
RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST); RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST);

View File

@ -4821,6 +4821,7 @@ static void rocker_remove_ports(const struct rocker *rocker)
rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE, rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE,
ROCKER_OP_FLAG_REMOVE); ROCKER_OP_FLAG_REMOVE);
unregister_netdev(rocker_port->dev); unregister_netdev(rocker_port->dev);
free_netdev(rocker_port->dev);
} }
kfree(rocker->ports); kfree(rocker->ports);
} }

View File

@ -42,7 +42,7 @@
#define NSS_COMMON_CLK_DIV_MASK 0x7f #define NSS_COMMON_CLK_DIV_MASK 0x7f
#define NSS_COMMON_CLK_SRC_CTRL 0x14 #define NSS_COMMON_CLK_SRC_CTRL 0x14
#define NSS_COMMON_CLK_SRC_CTRL_OFFSET(x) (1 << x) #define NSS_COMMON_CLK_SRC_CTRL_OFFSET(x) (x)
/* Mode is coded on 1 bit but is different depending on the MAC ID: /* Mode is coded on 1 bit but is different depending on the MAC ID:
* MAC0: QSGMII=0 RGMII=1 * MAC0: QSGMII=0 RGMII=1
* MAC1: QSGMII=0 SGMII=0 RGMII=1 * MAC1: QSGMII=0 SGMII=0 RGMII=1
@ -291,7 +291,7 @@ static void *ipq806x_gmac_setup(struct platform_device *pdev)
/* Configure the clock src according to the mode */ /* Configure the clock src according to the mode */
regmap_read(gmac->nss_common, NSS_COMMON_CLK_SRC_CTRL, &val); regmap_read(gmac->nss_common, NSS_COMMON_CLK_SRC_CTRL, &val);
val &= ~NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id); val &= ~(1 << NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id));
switch (gmac->phy_mode) { switch (gmac->phy_mode) {
case PHY_INTERFACE_MODE_RGMII: case PHY_INTERFACE_MODE_RGMII:
val |= NSS_COMMON_CLK_SRC_CTRL_RGMII(gmac->id) << val |= NSS_COMMON_CLK_SRC_CTRL_RGMII(gmac->id) <<

View File

@ -85,7 +85,6 @@ struct netcp_intf {
struct list_head rxhook_list_head; struct list_head rxhook_list_head;
unsigned int rx_queue_id; unsigned int rx_queue_id;
void *rx_fdq[KNAV_DMA_FDQ_PER_CHAN]; void *rx_fdq[KNAV_DMA_FDQ_PER_CHAN];
u32 rx_buffer_sizes[KNAV_DMA_FDQ_PER_CHAN];
struct napi_struct rx_napi; struct napi_struct rx_napi;
struct napi_struct tx_napi; struct napi_struct tx_napi;

View File

@ -34,6 +34,7 @@
#define NETCP_SOP_OFFSET (NET_IP_ALIGN + NET_SKB_PAD) #define NETCP_SOP_OFFSET (NET_IP_ALIGN + NET_SKB_PAD)
#define NETCP_NAPI_WEIGHT 64 #define NETCP_NAPI_WEIGHT 64
#define NETCP_TX_TIMEOUT (5 * HZ) #define NETCP_TX_TIMEOUT (5 * HZ)
#define NETCP_PACKET_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN)
#define NETCP_MIN_PACKET_SIZE ETH_ZLEN #define NETCP_MIN_PACKET_SIZE ETH_ZLEN
#define NETCP_MAX_MCAST_ADDR 16 #define NETCP_MAX_MCAST_ADDR 16
@ -804,30 +805,28 @@ static void netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
if (likely(fdq == 0)) { if (likely(fdq == 0)) {
unsigned int primary_buf_len; unsigned int primary_buf_len;
/* Allocate a primary receive queue entry */ /* Allocate a primary receive queue entry */
buf_len = netcp->rx_buffer_sizes[0] + NETCP_SOP_OFFSET; buf_len = NETCP_PACKET_SIZE + NETCP_SOP_OFFSET;
primary_buf_len = SKB_DATA_ALIGN(buf_len) + primary_buf_len = SKB_DATA_ALIGN(buf_len) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
if (primary_buf_len <= PAGE_SIZE) { bufptr = netdev_alloc_frag(primary_buf_len);
bufptr = netdev_alloc_frag(primary_buf_len); pad[1] = primary_buf_len;
pad[1] = primary_buf_len;
} else {
bufptr = kmalloc(primary_buf_len, GFP_ATOMIC |
GFP_DMA32 | __GFP_COLD);
pad[1] = 0;
}
if (unlikely(!bufptr)) { if (unlikely(!bufptr)) {
dev_warn_ratelimited(netcp->ndev_dev, "Primary RX buffer alloc failed\n"); dev_warn_ratelimited(netcp->ndev_dev,
"Primary RX buffer alloc failed\n");
goto fail; goto fail;
} }
dma = dma_map_single(netcp->dev, bufptr, buf_len, dma = dma_map_single(netcp->dev, bufptr, buf_len,
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(netcp->dev, dma)))
goto fail;
pad[0] = (u32)bufptr; pad[0] = (u32)bufptr;
} else { } else {
/* Allocate a secondary receive queue entry */ /* Allocate a secondary receive queue entry */
page = alloc_page(GFP_ATOMIC | GFP_DMA32 | __GFP_COLD); page = alloc_page(GFP_ATOMIC | GFP_DMA | __GFP_COLD);
if (unlikely(!page)) { if (unlikely(!page)) {
dev_warn_ratelimited(netcp->ndev_dev, "Secondary page alloc failed\n"); dev_warn_ratelimited(netcp->ndev_dev, "Secondary page alloc failed\n");
goto fail; goto fail;
@ -1010,7 +1009,7 @@ netcp_tx_map_skb(struct sk_buff *skb, struct netcp_intf *netcp)
/* Map the linear buffer */ /* Map the linear buffer */
dma_addr = dma_map_single(dev, skb->data, pkt_len, DMA_TO_DEVICE); dma_addr = dma_map_single(dev, skb->data, pkt_len, DMA_TO_DEVICE);
if (unlikely(!dma_addr)) { if (unlikely(dma_mapping_error(dev, dma_addr))) {
dev_err(netcp->ndev_dev, "Failed to map skb buffer\n"); dev_err(netcp->ndev_dev, "Failed to map skb buffer\n");
return NULL; return NULL;
} }
@ -1546,8 +1545,8 @@ static int netcp_setup_navigator_resources(struct net_device *ndev)
knav_queue_disable_notify(netcp->rx_queue); knav_queue_disable_notify(netcp->rx_queue);
/* open Rx FDQs */ /* open Rx FDQs */
for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && netcp->rx_queue_depths[i];
netcp->rx_queue_depths[i] && netcp->rx_buffer_sizes[i]; ++i) { ++i) {
snprintf(name, sizeof(name), "rx-fdq-%s-%d", ndev->name, i); snprintf(name, sizeof(name), "rx-fdq-%s-%d", ndev->name, i);
netcp->rx_fdq[i] = knav_queue_open(name, KNAV_QUEUE_GP, 0); netcp->rx_fdq[i] = knav_queue_open(name, KNAV_QUEUE_GP, 0);
if (IS_ERR_OR_NULL(netcp->rx_fdq[i])) { if (IS_ERR_OR_NULL(netcp->rx_fdq[i])) {
@ -1941,14 +1940,6 @@ static int netcp_create_interface(struct netcp_device *netcp_device,
netcp->rx_queue_depths[0] = 128; netcp->rx_queue_depths[0] = 128;
} }
ret = of_property_read_u32_array(node_interface, "rx-buffer-size",
netcp->rx_buffer_sizes,
KNAV_DMA_FDQ_PER_CHAN);
if (ret) {
dev_err(dev, "missing \"rx-buffer-size\" parameter\n");
netcp->rx_buffer_sizes[0] = 1536;
}
ret = of_property_read_u32_array(node_interface, "rx-pool", temp, 2); ret = of_property_read_u32_array(node_interface, "rx-pool", temp, 2);
if (ret < 0) { if (ret < 0) {
dev_err(dev, "missing \"rx-pool\" parameter\n"); dev_err(dev, "missing \"rx-pool\" parameter\n");

View File

@ -728,11 +728,12 @@ static int mkiss_open(struct tty_struct *tty)
dev->type = ARPHRD_AX25; dev->type = ARPHRD_AX25;
/* Perform the low-level AX25 initialization. */ /* Perform the low-level AX25 initialization. */
if ((err = ax_open(ax->dev))) { err = ax_open(ax->dev);
if (err)
goto out_free_netdev; goto out_free_netdev;
}
if (register_netdev(dev)) err = register_netdev(dev);
if (err)
goto out_free_buffers; goto out_free_buffers;
/* after register_netdev() - because else printk smashes the kernel */ /* after register_netdev() - because else printk smashes the kernel */

View File

@ -1756,9 +1756,9 @@ static int virtnet_probe(struct virtio_device *vdev)
/* Do we support "hardware" checksums? */ /* Do we support "hardware" checksums? */
if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) { if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
/* This opens up the world of extra features. */ /* This opens up the world of extra features. */
dev->hw_features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG;
if (csum) if (csum)
dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO

View File

@ -589,7 +589,8 @@ static int cosa_probe(int base, int irq, int dma)
chan->netdev->base_addr = chan->cosa->datareg; chan->netdev->base_addr = chan->cosa->datareg;
chan->netdev->irq = chan->cosa->irq; chan->netdev->irq = chan->cosa->irq;
chan->netdev->dma = chan->cosa->dma; chan->netdev->dma = chan->cosa->dma;
if (register_hdlc_device(chan->netdev)) { err = register_hdlc_device(chan->netdev);
if (err) {
netdev_warn(chan->netdev, netdev_warn(chan->netdev,
"register_hdlc_device() failed\n"); "register_hdlc_device() failed\n");
free_netdev(chan->netdev); free_netdev(chan->netdev);

View File

@ -3728,7 +3728,7 @@ const u32 *b43_nphy_get_tx_gain_table(struct b43_wldev *dev)
switch (phy->rev) { switch (phy->rev) {
case 6: case 6:
case 5: case 5:
if (sprom->fem.ghz5.extpa_gain == 3) if (sprom->fem.ghz2.extpa_gain == 3)
return b43_ntab_tx_gain_epa_rev3_hi_pwr_2g; return b43_ntab_tx_gain_epa_rev3_hi_pwr_2g;
/* fall through */ /* fall through */
case 4: case 4:

View File

@ -1023,7 +1023,7 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
cmd->scan_priority = cmd->scan_priority =
iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6); iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
if (iwl_mvm_scan_total_iterations(params) == 0) if (iwl_mvm_scan_total_iterations(params) == 1)
cmd->ooc_priority = cmd->ooc_priority =
iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6); iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
else else

View File

@ -478,10 +478,16 @@ static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000)
iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG, iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
APMG_PCIDEV_STT_VAL_WAKE_ME); APMG_PCIDEV_STT_VAL_WAKE_ME);
else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
CSR_RESET_LINK_PWR_MGMT_DISABLED);
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_PREPARE | CSR_HW_IF_CONFIG_REG_PREPARE |
CSR_HW_IF_CONFIG_REG_ENABLE_PME); CSR_HW_IF_CONFIG_REG_ENABLE_PME);
mdelay(1);
iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
CSR_RESET_LINK_PWR_MGMT_DISABLED);
}
mdelay(5); mdelay(5);
} }
@ -575,6 +581,10 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
if (ret >= 0) if (ret >= 0)
return 0; return 0;
iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
CSR_RESET_LINK_PWR_MGMT_DISABLED);
msleep(1);
for (iter = 0; iter < 10; iter++) { for (iter = 0; iter < 10; iter++) {
/* If HW is not ready, prepare the conditions to check again */ /* If HW is not ready, prepare the conditions to check again */
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
@ -582,8 +592,10 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
do { do {
ret = iwl_pcie_set_hw_ready(trans); ret = iwl_pcie_set_hw_ready(trans);
if (ret >= 0) if (ret >= 0) {
return 0; ret = 0;
goto out;
}
usleep_range(200, 1000); usleep_range(200, 1000);
t += 200; t += 200;
@ -593,6 +605,10 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
IWL_ERR(trans, "Couldn't prepare the card\n"); IWL_ERR(trans, "Couldn't prepare the card\n");
out:
iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
CSR_RESET_LINK_PWR_MGMT_DISABLED);
return ret; return ret;
} }

View File

@ -1875,8 +1875,19 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
/* start timer if queue currently empty */ /* start timer if queue currently empty */
if (q->read_ptr == q->write_ptr) { if (q->read_ptr == q->write_ptr) {
if (txq->wd_timeout) if (txq->wd_timeout) {
mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); /*
* If the TXQ is active, then set the timer, if not,
* set the timer in remainder so that the timer will
* be armed with the right value when the station will
* wake up.
*/
if (!txq->frozen)
mod_timer(&txq->stuck_timer,
jiffies + txq->wd_timeout);
else
txq->frozen_expiry_remainder = txq->wd_timeout;
}
IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", q->id); IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", q->id);
iwl_trans_pcie_ref(trans); iwl_trans_pcie_ref(trans);
} }

View File

@ -172,6 +172,7 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
(struct rsi_91x_sdiodev *)adapter->rsi_dev; (struct rsi_91x_sdiodev *)adapter->rsi_dev;
u32 len; u32 len;
u32 num_blocks; u32 num_blocks;
const u8 *fw;
const struct firmware *fw_entry = NULL; const struct firmware *fw_entry = NULL;
u32 block_size = dev->tx_blk_size; u32 block_size = dev->tx_blk_size;
int status = 0; int status = 0;
@ -200,6 +201,10 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
return status; return status;
} }
/* Copy firmware into DMA-accessible memory */
fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL);
if (!fw)
return -ENOMEM;
len = fw_entry->size; len = fw_entry->size;
if (len % 4) if (len % 4)
@ -210,7 +215,8 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
rsi_dbg(INIT_ZONE, "%s: Instruction size:%d\n", __func__, len); rsi_dbg(INIT_ZONE, "%s: Instruction size:%d\n", __func__, len);
rsi_dbg(INIT_ZONE, "%s: num blocks: %d\n", __func__, num_blocks); rsi_dbg(INIT_ZONE, "%s: num blocks: %d\n", __func__, num_blocks);
status = rsi_copy_to_card(common, fw_entry->data, len, num_blocks); status = rsi_copy_to_card(common, fw, len, num_blocks);
kfree(fw);
release_firmware(fw_entry); release_firmware(fw_entry);
return status; return status;
} }

View File

@ -146,7 +146,10 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
return status; return status;
} }
/* Copy firmware into DMA-accessible memory */
fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL); fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL);
if (!fw)
return -ENOMEM;
len = fw_entry->size; len = fw_entry->size;
if (len % 4) if (len % 4)
@ -158,6 +161,7 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
rsi_dbg(INIT_ZONE, "%s: num blocks: %d\n", __func__, num_blocks); rsi_dbg(INIT_ZONE, "%s: num blocks: %d\n", __func__, num_blocks);
status = rsi_copy_to_card(common, fw, len, num_blocks); status = rsi_copy_to_card(common, fw, len, num_blocks);
kfree(fw);
release_firmware(fw_entry); release_firmware(fw_entry);
return status; return status;
} }

View File

@ -1015,9 +1015,12 @@ static void send_beacon_frame(struct ieee80211_hw *hw,
{ {
struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_priv *rtlpriv = rtl_priv(hw);
struct sk_buff *skb = ieee80211_beacon_get(hw, vif); struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
struct rtl_tcb_desc tcb_desc;
if (skb) if (skb) {
rtlpriv->intf_ops->adapter_tx(hw, NULL, skb, NULL); memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
rtlpriv->intf_ops->adapter_tx(hw, NULL, skb, &tcb_desc);
}
} }
static void rtl_op_bss_info_changed(struct ieee80211_hw *hw, static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,

View File

@ -385,6 +385,7 @@ module_param_named(debug, rtl8723be_mod_params.debug, int, 0444);
module_param_named(ips, rtl8723be_mod_params.inactiveps, bool, 0444); module_param_named(ips, rtl8723be_mod_params.inactiveps, bool, 0444);
module_param_named(swlps, rtl8723be_mod_params.swctrl_lps, bool, 0444); module_param_named(swlps, rtl8723be_mod_params.swctrl_lps, bool, 0444);
module_param_named(fwlps, rtl8723be_mod_params.fwctrl_lps, bool, 0444); module_param_named(fwlps, rtl8723be_mod_params.fwctrl_lps, bool, 0444);
module_param_named(msi, rtl8723be_mod_params.msi_support, bool, 0444);
module_param_named(disable_watchdog, rtl8723be_mod_params.disable_watchdog, module_param_named(disable_watchdog, rtl8723be_mod_params.disable_watchdog,
bool, 0444); bool, 0444);
MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n"); MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");

View File

@ -61,6 +61,12 @@ void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue,
void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue) void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue)
{ {
atomic_dec(&queue->inflight_packets); atomic_dec(&queue->inflight_packets);
/* Wake the dealloc thread _after_ decrementing inflight_packets so
* that if kthread_stop() has already been called, the dealloc thread
* does not wait forever with nothing to wake it.
*/
wake_up(&queue->dealloc_wq);
} }
int xenvif_schedulable(struct xenvif *vif) int xenvif_schedulable(struct xenvif *vif)

View File

@ -810,23 +810,17 @@ static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue, static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue,
struct sk_buff *skb, struct sk_buff *skb,
struct xen_netif_tx_request *txp, struct xen_netif_tx_request *txp,
struct gnttab_map_grant_ref *gop) struct gnttab_map_grant_ref *gop,
unsigned int frag_overflow,
struct sk_buff *nskb)
{ {
struct skb_shared_info *shinfo = skb_shinfo(skb); struct skb_shared_info *shinfo = skb_shinfo(skb);
skb_frag_t *frags = shinfo->frags; skb_frag_t *frags = shinfo->frags;
u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx; u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
int start; int start;
pending_ring_idx_t index; pending_ring_idx_t index;
unsigned int nr_slots, frag_overflow = 0; unsigned int nr_slots;
/* At this point shinfo->nr_frags is in fact the number of
* slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
*/
if (shinfo->nr_frags > MAX_SKB_FRAGS) {
frag_overflow = shinfo->nr_frags - MAX_SKB_FRAGS;
BUG_ON(frag_overflow > MAX_SKB_FRAGS);
shinfo->nr_frags = MAX_SKB_FRAGS;
}
nr_slots = shinfo->nr_frags; nr_slots = shinfo->nr_frags;
/* Skip first skb fragment if it is on same page as header fragment. */ /* Skip first skb fragment if it is on same page as header fragment. */
@ -841,13 +835,6 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *que
} }
if (frag_overflow) { if (frag_overflow) {
struct sk_buff *nskb = xenvif_alloc_skb(0);
if (unlikely(nskb == NULL)) {
if (net_ratelimit())
netdev_err(queue->vif->dev,
"Can't allocate the frag_list skb.\n");
return NULL;
}
shinfo = skb_shinfo(nskb); shinfo = skb_shinfo(nskb);
frags = shinfo->frags; frags = shinfo->frags;
@ -1175,9 +1162,10 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
unsigned *copy_ops, unsigned *copy_ops,
unsigned *map_ops) unsigned *map_ops)
{ {
struct gnttab_map_grant_ref *gop = queue->tx_map_ops, *request_gop; struct gnttab_map_grant_ref *gop = queue->tx_map_ops;
struct sk_buff *skb; struct sk_buff *skb, *nskb;
int ret; int ret;
unsigned int frag_overflow;
while (skb_queue_len(&queue->tx_queue) < budget) { while (skb_queue_len(&queue->tx_queue) < budget) {
struct xen_netif_tx_request txreq; struct xen_netif_tx_request txreq;
@ -1265,6 +1253,29 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
break; break;
} }
skb_shinfo(skb)->nr_frags = ret;
if (data_len < txreq.size)
skb_shinfo(skb)->nr_frags++;
/* At this point shinfo->nr_frags is in fact the number of
* slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
*/
frag_overflow = 0;
nskb = NULL;
if (skb_shinfo(skb)->nr_frags > MAX_SKB_FRAGS) {
frag_overflow = skb_shinfo(skb)->nr_frags - MAX_SKB_FRAGS;
BUG_ON(frag_overflow > MAX_SKB_FRAGS);
skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS;
nskb = xenvif_alloc_skb(0);
if (unlikely(nskb == NULL)) {
kfree_skb(skb);
xenvif_tx_err(queue, &txreq, idx);
if (net_ratelimit())
netdev_err(queue->vif->dev,
"Can't allocate the frag_list skb.\n");
break;
}
}
if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) { if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
struct xen_netif_extra_info *gso; struct xen_netif_extra_info *gso;
gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
@ -1272,6 +1283,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
if (xenvif_set_skb_gso(queue->vif, skb, gso)) { if (xenvif_set_skb_gso(queue->vif, skb, gso)) {
/* Failure in xenvif_set_skb_gso is fatal. */ /* Failure in xenvif_set_skb_gso is fatal. */
kfree_skb(skb); kfree_skb(skb);
kfree_skb(nskb);
break; break;
} }
} }
@ -1294,9 +1306,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
(*copy_ops)++; (*copy_ops)++;
skb_shinfo(skb)->nr_frags = ret;
if (data_len < txreq.size) { if (data_len < txreq.size) {
skb_shinfo(skb)->nr_frags++;
frag_set_pending_idx(&skb_shinfo(skb)->frags[0], frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
pending_idx); pending_idx);
xenvif_tx_create_map_op(queue, pending_idx, &txreq, gop); xenvif_tx_create_map_op(queue, pending_idx, &txreq, gop);
@ -1310,13 +1320,8 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
queue->pending_cons++; queue->pending_cons++;
request_gop = xenvif_get_requests(queue, skb, txfrags, gop); gop = xenvif_get_requests(queue, skb, txfrags, gop,
if (request_gop == NULL) { frag_overflow, nskb);
kfree_skb(skb);
xenvif_tx_err(queue, &txreq, idx);
break;
}
gop = request_gop;
__skb_queue_tail(&queue->tx_queue, skb); __skb_queue_tail(&queue->tx_queue, skb);
@ -1536,7 +1541,6 @@ void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success)
smp_wmb(); smp_wmb();
queue->dealloc_prod++; queue->dealloc_prod++;
} while (ubuf); } while (ubuf);
wake_up(&queue->dealloc_wq);
spin_unlock_irqrestore(&queue->callback_lock, flags); spin_unlock_irqrestore(&queue->callback_lock, flags);
if (likely(zerocopy_success)) if (likely(zerocopy_success))

View File

@ -2884,11 +2884,11 @@ static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)
* *
* PHY drivers may accept clones of transmitted packets for * PHY drivers may accept clones of transmitted packets for
* timestamping via their phy_driver.txtstamp method. These drivers * timestamping via their phy_driver.txtstamp method. These drivers
* must call this function to return the skb back to the stack, with * must call this function to return the skb back to the stack with a
* or without a timestamp. * timestamp.
* *
* @skb: clone of the the original outgoing packet * @skb: clone of the the original outgoing packet
* @hwtstamps: hardware time stamps, may be NULL if not available * @hwtstamps: hardware time stamps
* *
*/ */
void skb_complete_tx_timestamp(struct sk_buff *skb, void skb_complete_tx_timestamp(struct sk_buff *skb,

View File

@ -1138,6 +1138,9 @@ void batadv_dat_snoop_outgoing_arp_reply(struct batadv_priv *bat_priv,
* @bat_priv: the bat priv with all the soft interface information * @bat_priv: the bat priv with all the soft interface information
* @skb: packet to check * @skb: packet to check
* @hdr_size: size of the encapsulation header * @hdr_size: size of the encapsulation header
*
* Returns true if the packet was snooped and consumed by DAT. False if the
* packet has to be delivered to the interface
*/ */
bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv, bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv,
struct sk_buff *skb, int hdr_size) struct sk_buff *skb, int hdr_size)
@ -1145,7 +1148,7 @@ bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv,
uint16_t type; uint16_t type;
__be32 ip_src, ip_dst; __be32 ip_src, ip_dst;
uint8_t *hw_src, *hw_dst; uint8_t *hw_src, *hw_dst;
bool ret = false; bool dropped = false;
unsigned short vid; unsigned short vid;
if (!atomic_read(&bat_priv->distributed_arp_table)) if (!atomic_read(&bat_priv->distributed_arp_table))
@ -1174,12 +1177,17 @@ bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv,
/* if this REPLY is directed to a client of mine, let's deliver the /* if this REPLY is directed to a client of mine, let's deliver the
* packet to the interface * packet to the interface
*/ */
ret = !batadv_is_my_client(bat_priv, hw_dst, vid); dropped = !batadv_is_my_client(bat_priv, hw_dst, vid);
/* if this REPLY is sent on behalf of a client of mine, let's drop the
* packet because the client will reply by itself
*/
dropped |= batadv_is_my_client(bat_priv, hw_src, vid);
out: out:
if (ret) if (dropped)
kfree_skb(skb); kfree_skb(skb);
/* if ret == false -> packet has to be delivered to the interface */ /* if dropped == false -> deliver to the interface */
return ret; return dropped;
} }
/** /**

View File

@ -439,6 +439,8 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv,
INIT_HLIST_NODE(&gw_node->list); INIT_HLIST_NODE(&gw_node->list);
gw_node->orig_node = orig_node; gw_node->orig_node = orig_node;
gw_node->bandwidth_down = ntohl(gateway->bandwidth_down);
gw_node->bandwidth_up = ntohl(gateway->bandwidth_up);
atomic_set(&gw_node->refcount, 1); atomic_set(&gw_node->refcount, 1);
spin_lock_bh(&bat_priv->gw.list_lock); spin_lock_bh(&bat_priv->gw.list_lock);

View File

@ -479,6 +479,9 @@ out:
*/ */
void batadv_softif_vlan_free_ref(struct batadv_softif_vlan *vlan) void batadv_softif_vlan_free_ref(struct batadv_softif_vlan *vlan)
{ {
if (!vlan)
return;
if (atomic_dec_and_test(&vlan->refcount)) { if (atomic_dec_and_test(&vlan->refcount)) {
spin_lock_bh(&vlan->bat_priv->softif_vlan_list_lock); spin_lock_bh(&vlan->bat_priv->softif_vlan_list_lock);
hlist_del_rcu(&vlan->list); hlist_del_rcu(&vlan->list);

View File

@ -594,6 +594,9 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
/* increase the refcounter of the related vlan */ /* increase the refcounter of the related vlan */
vlan = batadv_softif_vlan_get(bat_priv, vid); vlan = batadv_softif_vlan_get(bat_priv, vid);
if (WARN(!vlan, "adding TT local entry %pM to non-existent VLAN %d",
addr, BATADV_PRINT_VID(vid)))
goto out;
batadv_dbg(BATADV_DBG_TT, bat_priv, batadv_dbg(BATADV_DBG_TT, bat_priv,
"Creating new local tt entry: %pM (vid: %d, ttvn: %d)\n", "Creating new local tt entry: %pM (vid: %d, ttvn: %d)\n",
@ -1034,6 +1037,7 @@ uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv,
struct batadv_tt_local_entry *tt_local_entry; struct batadv_tt_local_entry *tt_local_entry;
uint16_t flags, curr_flags = BATADV_NO_FLAGS; uint16_t flags, curr_flags = BATADV_NO_FLAGS;
struct batadv_softif_vlan *vlan; struct batadv_softif_vlan *vlan;
void *tt_entry_exists;
tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid); tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid);
if (!tt_local_entry) if (!tt_local_entry)
@ -1061,11 +1065,22 @@ uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv,
* immediately purge it * immediately purge it
*/ */
batadv_tt_local_event(bat_priv, tt_local_entry, BATADV_TT_CLIENT_DEL); batadv_tt_local_event(bat_priv, tt_local_entry, BATADV_TT_CLIENT_DEL);
hlist_del_rcu(&tt_local_entry->common.hash_entry);
tt_entry_exists = batadv_hash_remove(bat_priv->tt.local_hash,
batadv_compare_tt,
batadv_choose_tt,
&tt_local_entry->common);
if (!tt_entry_exists)
goto out;
/* extra call to free the local tt entry */
batadv_tt_local_entry_free_ref(tt_local_entry); batadv_tt_local_entry_free_ref(tt_local_entry);
/* decrease the reference held for this vlan */ /* decrease the reference held for this vlan */
vlan = batadv_softif_vlan_get(bat_priv, vid); vlan = batadv_softif_vlan_get(bat_priv, vid);
if (!vlan)
goto out;
batadv_softif_vlan_free_ref(vlan); batadv_softif_vlan_free_ref(vlan);
batadv_softif_vlan_free_ref(vlan); batadv_softif_vlan_free_ref(vlan);
@ -1166,8 +1181,10 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
/* decrease the reference held for this vlan */ /* decrease the reference held for this vlan */
vlan = batadv_softif_vlan_get(bat_priv, vlan = batadv_softif_vlan_get(bat_priv,
tt_common_entry->vid); tt_common_entry->vid);
batadv_softif_vlan_free_ref(vlan); if (vlan) {
batadv_softif_vlan_free_ref(vlan); batadv_softif_vlan_free_ref(vlan);
batadv_softif_vlan_free_ref(vlan);
}
batadv_tt_local_entry_free_ref(tt_local); batadv_tt_local_entry_free_ref(tt_local);
} }
@ -3207,8 +3224,10 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
/* decrease the reference held for this vlan */ /* decrease the reference held for this vlan */
vlan = batadv_softif_vlan_get(bat_priv, tt_common->vid); vlan = batadv_softif_vlan_get(bat_priv, tt_common->vid);
batadv_softif_vlan_free_ref(vlan); if (vlan) {
batadv_softif_vlan_free_ref(vlan); batadv_softif_vlan_free_ref(vlan);
batadv_softif_vlan_free_ref(vlan);
}
batadv_tt_local_entry_free_ref(tt_local); batadv_tt_local_entry_free_ref(tt_local);
} }

View File

@ -7820,7 +7820,7 @@ void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
/* Make sure we copy only the significant bytes based on the /* Make sure we copy only the significant bytes based on the
* encryption key size, and set the rest of the value to zeroes. * encryption key size, and set the rest of the value to zeroes.
*/ */
memcpy(ev.key.val, key->val, sizeof(key->enc_size)); memcpy(ev.key.val, key->val, key->enc_size);
memset(ev.key.val + key->enc_size, 0, memset(ev.key.val + key->enc_size, 0,
sizeof(ev.key.val) - key->enc_size); sizeof(ev.key.val) - key->enc_size);

View File

@ -112,6 +112,8 @@ static inline size_t br_port_info_size(void)
+ nla_total_size(1) /* IFLA_BRPORT_FAST_LEAVE */ + nla_total_size(1) /* IFLA_BRPORT_FAST_LEAVE */
+ nla_total_size(1) /* IFLA_BRPORT_LEARNING */ + nla_total_size(1) /* IFLA_BRPORT_LEARNING */
+ nla_total_size(1) /* IFLA_BRPORT_UNICAST_FLOOD */ + nla_total_size(1) /* IFLA_BRPORT_UNICAST_FLOOD */
+ nla_total_size(1) /* IFLA_BRPORT_PROXYARP */
+ nla_total_size(1) /* IFLA_BRPORT_PROXYARP_WIFI */
+ 0; + 0;
} }
@ -506,6 +508,8 @@ static const struct nla_policy br_port_policy[IFLA_BRPORT_MAX + 1] = {
[IFLA_BRPORT_FAST_LEAVE]= { .type = NLA_U8 }, [IFLA_BRPORT_FAST_LEAVE]= { .type = NLA_U8 },
[IFLA_BRPORT_LEARNING] = { .type = NLA_U8 }, [IFLA_BRPORT_LEARNING] = { .type = NLA_U8 },
[IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 }, [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 },
[IFLA_BRPORT_PROXYARP] = { .type = NLA_U8 },
[IFLA_BRPORT_PROXYARP_WIFI] = { .type = NLA_U8 },
}; };
/* Change the state of the port and notify spanning tree */ /* Change the state of the port and notify spanning tree */

View File

@ -131,12 +131,12 @@ out_noerr:
goto out; goto out;
} }
static int skb_set_peeked(struct sk_buff *skb) static struct sk_buff *skb_set_peeked(struct sk_buff *skb)
{ {
struct sk_buff *nskb; struct sk_buff *nskb;
if (skb->peeked) if (skb->peeked)
return 0; return skb;
/* We have to unshare an skb before modifying it. */ /* We have to unshare an skb before modifying it. */
if (!skb_shared(skb)) if (!skb_shared(skb))
@ -144,7 +144,7 @@ static int skb_set_peeked(struct sk_buff *skb)
nskb = skb_clone(skb, GFP_ATOMIC); nskb = skb_clone(skb, GFP_ATOMIC);
if (!nskb) if (!nskb)
return -ENOMEM; return ERR_PTR(-ENOMEM);
skb->prev->next = nskb; skb->prev->next = nskb;
skb->next->prev = nskb; skb->next->prev = nskb;
@ -157,7 +157,7 @@ static int skb_set_peeked(struct sk_buff *skb)
done: done:
skb->peeked = 1; skb->peeked = 1;
return 0; return skb;
} }
/** /**
@ -229,8 +229,9 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
continue; continue;
} }
error = skb_set_peeked(skb); skb = skb_set_peeked(skb);
if (error) error = PTR_ERR(skb);
if (IS_ERR(skb))
goto unlock_err; goto unlock_err;
atomic_inc(&skb->users); atomic_inc(&skb->users);

View File

@ -3514,8 +3514,6 @@ static int pktgen_thread_worker(void *arg)
set_freezable(); set_freezable();
__set_current_state(TASK_RUNNING);
while (!kthread_should_stop()) { while (!kthread_should_stop()) {
pkt_dev = next_to_run(t); pkt_dev = next_to_run(t);
@ -3560,7 +3558,6 @@ static int pktgen_thread_worker(void *arg)
try_to_freeze(); try_to_freeze();
} }
set_current_state(TASK_INTERRUPTIBLE);
pr_debug("%s stopping all device\n", t->tsk->comm); pr_debug("%s stopping all device\n", t->tsk->comm);
pktgen_stop(t); pktgen_stop(t);

View File

@ -103,10 +103,16 @@ void reqsk_queue_destroy(struct request_sock_queue *queue)
spin_lock_bh(&queue->syn_wait_lock); spin_lock_bh(&queue->syn_wait_lock);
while ((req = lopt->syn_table[i]) != NULL) { while ((req = lopt->syn_table[i]) != NULL) {
lopt->syn_table[i] = req->dl_next; lopt->syn_table[i] = req->dl_next;
/* Because of following del_timer_sync(),
* we must release the spinlock here
* or risk a dead lock.
*/
spin_unlock_bh(&queue->syn_wait_lock);
atomic_inc(&lopt->qlen_dec); atomic_inc(&lopt->qlen_dec);
if (del_timer(&req->rsk_timer)) if (del_timer_sync(&req->rsk_timer))
reqsk_put(req); reqsk_put(req);
reqsk_put(req); reqsk_put(req);
spin_lock_bh(&queue->syn_wait_lock);
} }
spin_unlock_bh(&queue->syn_wait_lock); spin_unlock_bh(&queue->syn_wait_lock);
} }

View File

@ -756,7 +756,8 @@ static int dsa_slave_phy_connect(struct dsa_slave_priv *p,
return -ENODEV; return -ENODEV;
/* Use already configured phy mode */ /* Use already configured phy mode */
p->phy_interface = p->phy->interface; if (p->phy_interface == PHY_INTERFACE_MODE_NA)
p->phy_interface = p->phy->interface;
phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link, phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link,
p->phy_interface); p->phy_interface);

View File

@ -593,7 +593,7 @@ static bool reqsk_queue_unlink(struct request_sock_queue *queue,
} }
spin_unlock(&queue->syn_wait_lock); spin_unlock(&queue->syn_wait_lock);
if (del_timer(&req->rsk_timer)) if (del_timer_sync(&req->rsk_timer))
reqsk_put(req); reqsk_put(req);
return found; return found;
} }

View File

@ -226,7 +226,8 @@ synproxy_send_client_ack(const struct synproxy_net *snet,
synproxy_build_options(nth, opts); synproxy_build_options(nth, opts);
synproxy_send_tcp(skb, nskb, NULL, 0, niph, nth, tcp_hdr_size); synproxy_send_tcp(skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
niph, nth, tcp_hdr_size);
} }
static bool static bool

View File

@ -1348,7 +1348,7 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
req = inet_csk_search_req(sk, th->source, iph->saddr, iph->daddr); req = inet_csk_search_req(sk, th->source, iph->saddr, iph->daddr);
if (req) { if (req) {
nsk = tcp_check_req(sk, skb, req, false); nsk = tcp_check_req(sk, skb, req, false);
if (!nsk) if (!nsk || nsk == sk)
reqsk_put(req); reqsk_put(req);
return nsk; return nsk;
} }

View File

@ -1995,12 +1995,19 @@ void udp_v4_early_demux(struct sk_buff *skb)
skb->sk = sk; skb->sk = sk;
skb->destructor = sock_efree; skb->destructor = sock_efree;
dst = sk->sk_rx_dst; dst = READ_ONCE(sk->sk_rx_dst);
if (dst) if (dst)
dst = dst_check(dst, 0); dst = dst_check(dst, 0);
if (dst) if (dst) {
skb_dst_set_noref(skb, dst); /* DST_NOCACHE can not be used without taking a reference */
if (dst->flags & DST_NOCACHE) {
if (likely(atomic_inc_not_zero(&dst->__refcnt)))
skb_dst_set(skb, dst);
} else {
skb_dst_set_noref(skb, dst);
}
}
} }
int udp_rcv(struct sk_buff *skb) int udp_rcv(struct sk_buff *skb)

View File

@ -37,12 +37,13 @@ synproxy_build_ip(struct sk_buff *skb, const struct in6_addr *saddr,
} }
static void static void
synproxy_send_tcp(const struct sk_buff *skb, struct sk_buff *nskb, synproxy_send_tcp(const struct synproxy_net *snet,
const struct sk_buff *skb, struct sk_buff *nskb,
struct nf_conntrack *nfct, enum ip_conntrack_info ctinfo, struct nf_conntrack *nfct, enum ip_conntrack_info ctinfo,
struct ipv6hdr *niph, struct tcphdr *nth, struct ipv6hdr *niph, struct tcphdr *nth,
unsigned int tcp_hdr_size) unsigned int tcp_hdr_size)
{ {
struct net *net = nf_ct_net((struct nf_conn *)nfct); struct net *net = nf_ct_net(snet->tmpl);
struct dst_entry *dst; struct dst_entry *dst;
struct flowi6 fl6; struct flowi6 fl6;
@ -83,7 +84,8 @@ free_nskb:
} }
static void static void
synproxy_send_client_synack(const struct sk_buff *skb, const struct tcphdr *th, synproxy_send_client_synack(const struct synproxy_net *snet,
const struct sk_buff *skb, const struct tcphdr *th,
const struct synproxy_options *opts) const struct synproxy_options *opts)
{ {
struct sk_buff *nskb; struct sk_buff *nskb;
@ -119,7 +121,7 @@ synproxy_send_client_synack(const struct sk_buff *skb, const struct tcphdr *th,
synproxy_build_options(nth, opts); synproxy_build_options(nth, opts);
synproxy_send_tcp(skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY, synproxy_send_tcp(snet, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
niph, nth, tcp_hdr_size); niph, nth, tcp_hdr_size);
} }
@ -163,7 +165,7 @@ synproxy_send_server_syn(const struct synproxy_net *snet,
synproxy_build_options(nth, opts); synproxy_build_options(nth, opts);
synproxy_send_tcp(skb, nskb, &snet->tmpl->ct_general, IP_CT_NEW, synproxy_send_tcp(snet, skb, nskb, &snet->tmpl->ct_general, IP_CT_NEW,
niph, nth, tcp_hdr_size); niph, nth, tcp_hdr_size);
} }
@ -203,7 +205,7 @@ synproxy_send_server_ack(const struct synproxy_net *snet,
synproxy_build_options(nth, opts); synproxy_build_options(nth, opts);
synproxy_send_tcp(skb, nskb, NULL, 0, niph, nth, tcp_hdr_size); synproxy_send_tcp(snet, skb, nskb, NULL, 0, niph, nth, tcp_hdr_size);
} }
static void static void
@ -241,7 +243,8 @@ synproxy_send_client_ack(const struct synproxy_net *snet,
synproxy_build_options(nth, opts); synproxy_build_options(nth, opts);
synproxy_send_tcp(skb, nskb, NULL, 0, niph, nth, tcp_hdr_size); synproxy_send_tcp(snet, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
niph, nth, tcp_hdr_size);
} }
static bool static bool
@ -301,7 +304,7 @@ synproxy_tg6(struct sk_buff *skb, const struct xt_action_param *par)
XT_SYNPROXY_OPT_SACK_PERM | XT_SYNPROXY_OPT_SACK_PERM |
XT_SYNPROXY_OPT_ECN); XT_SYNPROXY_OPT_ECN);
synproxy_send_client_synack(skb, th, &opts); synproxy_send_client_synack(snet, skb, th, &opts);
return NF_DROP; return NF_DROP;
} else if (th->ack && !(th->fin || th->rst || th->syn)) { } else if (th->ack && !(th->fin || th->rst || th->syn)) {

View File

@ -1831,6 +1831,7 @@ int ip6_route_add(struct fib6_config *cfg)
int gwa_type; int gwa_type;
gw_addr = &cfg->fc_gateway; gw_addr = &cfg->fc_gateway;
gwa_type = ipv6_addr_type(gw_addr);
/* if gw_addr is local we will fail to detect this in case /* if gw_addr is local we will fail to detect this in case
* address is still TENTATIVE (DAD in progress). rt6_lookup() * address is still TENTATIVE (DAD in progress). rt6_lookup()
@ -1838,11 +1839,12 @@ int ip6_route_add(struct fib6_config *cfg)
* prefix route was assigned to, which might be non-loopback. * prefix route was assigned to, which might be non-loopback.
*/ */
err = -EINVAL; err = -EINVAL;
if (ipv6_chk_addr_and_flags(net, gw_addr, NULL, 0, 0)) if (ipv6_chk_addr_and_flags(net, gw_addr,
gwa_type & IPV6_ADDR_LINKLOCAL ?
dev : NULL, 0, 0))
goto out; goto out;
rt->rt6i_gateway = *gw_addr; rt->rt6i_gateway = *gw_addr;
gwa_type = ipv6_addr_type(gw_addr);
if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) { if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) {
struct rt6_info *grt; struct rt6_info *grt;

View File

@ -943,7 +943,7 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb)
&ipv6_hdr(skb)->daddr, tcp_v6_iif(skb)); &ipv6_hdr(skb)->daddr, tcp_v6_iif(skb));
if (req) { if (req) {
nsk = tcp_check_req(sk, skb, req, false); nsk = tcp_check_req(sk, skb, req, false);
if (!nsk) if (!nsk || nsk == sk)
reqsk_put(req); reqsk_put(req);
return nsk; return nsk;
} }

View File

@ -292,7 +292,7 @@ struct nf_conn *nf_ct_tmpl_alloc(struct net *net, u16 zone, gfp_t flags)
{ {
struct nf_conn *tmpl; struct nf_conn *tmpl;
tmpl = kzalloc(sizeof(struct nf_conn), GFP_KERNEL); tmpl = kzalloc(sizeof(*tmpl), flags);
if (tmpl == NULL) if (tmpl == NULL)
return NULL; return NULL;
@ -303,7 +303,7 @@ struct nf_conn *nf_ct_tmpl_alloc(struct net *net, u16 zone, gfp_t flags)
if (zone) { if (zone) {
struct nf_conntrack_zone *nf_ct_zone; struct nf_conntrack_zone *nf_ct_zone;
nf_ct_zone = nf_ct_ext_add(tmpl, NF_CT_EXT_ZONE, GFP_ATOMIC); nf_ct_zone = nf_ct_ext_add(tmpl, NF_CT_EXT_ZONE, flags);
if (!nf_ct_zone) if (!nf_ct_zone)
goto out_free; goto out_free;
nf_ct_zone->id = zone; nf_ct_zone->id = zone;
@ -1544,10 +1544,8 @@ void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
sz = nr_slots * sizeof(struct hlist_nulls_head); sz = nr_slots * sizeof(struct hlist_nulls_head);
hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
get_order(sz)); get_order(sz));
if (!hash) { if (!hash)
printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n");
hash = vzalloc(sz); hash = vzalloc(sz);
}
if (hash && nulls) if (hash && nulls)
for (i = 0; i < nr_slots; i++) for (i = 0; i < nr_slots; i++)

View File

@ -353,10 +353,8 @@ static int __net_init synproxy_net_init(struct net *net)
int err = -ENOMEM; int err = -ENOMEM;
ct = nf_ct_tmpl_alloc(net, 0, GFP_KERNEL); ct = nf_ct_tmpl_alloc(net, 0, GFP_KERNEL);
if (IS_ERR(ct)) { if (!ct)
err = PTR_ERR(ct);
goto err1; goto err1;
}
if (!nfct_seqadj_ext_add(ct)) if (!nfct_seqadj_ext_add(ct))
goto err2; goto err2;

View File

@ -202,9 +202,10 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par,
goto err1; goto err1;
ct = nf_ct_tmpl_alloc(par->net, info->zone, GFP_KERNEL); ct = nf_ct_tmpl_alloc(par->net, info->zone, GFP_KERNEL);
ret = PTR_ERR(ct); if (!ct) {
if (IS_ERR(ct)) ret = -ENOMEM;
goto err2; goto err2;
}
ret = 0; ret = 0;
if ((info->ct_events || info->exp_events) && if ((info->ct_events || info->exp_events) &&

View File

@ -1096,6 +1096,11 @@ static int netlink_insert(struct sock *sk, u32 portid)
err = __netlink_insert(table, sk); err = __netlink_insert(table, sk);
if (err) { if (err) {
/* In case the hashtable backend returns with -EBUSY
* from here, it must not escape to the caller.
*/
if (unlikely(err == -EBUSY))
err = -EOVERFLOW;
if (err == -EEXIST) if (err == -EEXIST)
err = -EADDRINUSE; err = -EADDRINUSE;
nlk_sk(sk)->portid = 0; nlk_sk(sk)->portid = 0;

View File

@ -273,28 +273,36 @@ static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
return 0; return 0;
} }
static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh, static void update_ip_l4_checksum(struct sk_buff *skb, struct iphdr *nh,
__be32 *addr, __be32 new_addr) __be32 addr, __be32 new_addr)
{ {
int transport_len = skb->len - skb_transport_offset(skb); int transport_len = skb->len - skb_transport_offset(skb);
if (nh->frag_off & htons(IP_OFFSET))
return;
if (nh->protocol == IPPROTO_TCP) { if (nh->protocol == IPPROTO_TCP) {
if (likely(transport_len >= sizeof(struct tcphdr))) if (likely(transport_len >= sizeof(struct tcphdr)))
inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb, inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
*addr, new_addr, 1); addr, new_addr, 1);
} else if (nh->protocol == IPPROTO_UDP) { } else if (nh->protocol == IPPROTO_UDP) {
if (likely(transport_len >= sizeof(struct udphdr))) { if (likely(transport_len >= sizeof(struct udphdr))) {
struct udphdr *uh = udp_hdr(skb); struct udphdr *uh = udp_hdr(skb);
if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) { if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
inet_proto_csum_replace4(&uh->check, skb, inet_proto_csum_replace4(&uh->check, skb,
*addr, new_addr, 1); addr, new_addr, 1);
if (!uh->check) if (!uh->check)
uh->check = CSUM_MANGLED_0; uh->check = CSUM_MANGLED_0;
} }
} }
} }
}
static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
__be32 *addr, __be32 new_addr)
{
update_ip_l4_checksum(skb, nh, *addr, new_addr);
csum_replace4(&nh->check, *addr, new_addr); csum_replace4(&nh->check, *addr, new_addr);
skb_clear_hash(skb); skb_clear_hash(skb);
*addr = new_addr; *addr = new_addr;

View File

@ -176,7 +176,7 @@ int rds_info_getsockopt(struct socket *sock, int optname, char __user *optval,
/* check for all kinds of wrapping and the like */ /* check for all kinds of wrapping and the like */
start = (unsigned long)optval; start = (unsigned long)optval;
if (len < 0 || len + PAGE_SIZE - 1 < len || start + len < start) { if (len < 0 || len > INT_MAX - PAGE_SIZE + 1 || start + len < start) {
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
} }

View File

@ -98,6 +98,8 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
return ret; return ret;
ret = ACT_P_CREATED; ret = ACT_P_CREATED;
} else { } else {
if (bind)
return 0;
if (!ovr) { if (!ovr) {
tcf_hash_release(a, bind); tcf_hash_release(a, bind);
return -EEXIST; return -EEXIST;

View File

@ -288,10 +288,26 @@ begin:
static void fq_codel_reset(struct Qdisc *sch) static void fq_codel_reset(struct Qdisc *sch)
{ {
struct sk_buff *skb; struct fq_codel_sched_data *q = qdisc_priv(sch);
int i;
while ((skb = fq_codel_dequeue(sch)) != NULL) INIT_LIST_HEAD(&q->new_flows);
kfree_skb(skb); INIT_LIST_HEAD(&q->old_flows);
for (i = 0; i < q->flows_cnt; i++) {
struct fq_codel_flow *flow = q->flows + i;
while (flow->head) {
struct sk_buff *skb = dequeue_head(flow);
qdisc_qstats_backlog_dec(sch, skb);
kfree_skb(skb);
}
INIT_LIST_HEAD(&flow->flowchain);
codel_vars_init(&flow->cvars);
}
memset(q->backlogs, 0, q->flows_cnt * sizeof(u32));
sch->q.qlen = 0;
} }
static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = { static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = {