Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

 1) Segregate namespaces properly in conntrack dumps, from Liping Zhang.

 2) tcp listener refcount fix in netfilter tproxy, from Eric Dumazet.

 3) Fix timeouts in qed driver due to xmit_more, from Yuval Mintz.

 4) Fix use-after-free in tcp_xmit_retransmit_queue().

 5) Userspace header fixups (use of __u32, missing includes, etc.) from
    Mikko Rapeli.

 6) Further refinements to fragmentation wrt gso and tunnels, from
    Shmulik Ladkani.

 7) Trigger poll correctly for zero length UDP packets, from Eric
    Dumazet.

 8) TCP window scaling fix, also from Eric Dumazet.

 9) SLAB_DESTROY_BY_RCU is not relevant any more for UDP sockets.

10) Module refcount leak in qdisc_create_dflt(), from Eric Dumazet.

11) Fix deadlock in cp_rx_poll() of 8139cp driver, from Gao Feng.

12) Memory leak in rhashtable's alloc_bucket_locks(), from Eric Dumazet.

13) Add new device ID to alx driver, from Owen Lin.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (83 commits)
  Add Killer E2500 device ID in alx driver.
  net: smc91x: fix SMC accesses
  Documentation: networking: dsa: Remove platform device TODO
  net/mlx5: Increase number of ethtool steering priorities
  net/mlx5: Add error prints when validate ETS failed
  net/mlx5e: Fix memory leak if refreshing TIRs fails
  net/mlx5e: Add ethtool counter for TX xmit_more
  net/mlx5e: Fix ethtool -g/G rx ring parameter report with striding RQ
  net/mlx5e: Don't wait for SQ completions on close
  net/mlx5e: Don't post fragmented MPWQE when RQ is disabled
  net/mlx5e: Don't wait for RQ completions on close
  net/mlx5e: Limit UMR length to the device's limitation
  rhashtable: fix a memory leak in alloc_bucket_locks()
  sfc: fix potential stack corruption from running past stat bitmask
  team: loadbalance: push lacpdus to exact delivery
  net: hns: dereference ppe_cb->ppe_common_cb if it is non-null
  8139cp: Fix one possible deadloop in cp_rx_poll
  i40e: Change some init flow for the client
  Revert "phy: IRQ cannot be shared"
  net: dsa: bcm_sf2: Fix race condition while unmasking interrupts
  ...
This commit is contained in:
Linus Torvalds 2016-08-29 12:29:13 -07:00
commit 1f6a563ee0
100 changed files with 820 additions and 460 deletions

View File

@ -587,26 +587,6 @@ of DSA, would be the its port-based VLAN, used by the associated bridge device.
TODO TODO
==== ====
The platform device problem
---------------------------
DSA is currently implemented as a platform device driver which is far from ideal
as was discussed in this thread:
http://permalink.gmane.org/gmane.linux.network/329848
This basically prevents the device driver model to be properly used and applied,
and support non-MDIO, non-MMIO Ethernet connected switches.
Another problem with the platform device driver approach is that it prevents the
use of a modular switch drivers build due to a circular dependency, illustrated
here:
http://comments.gmane.org/gmane.linux.network/345803
Attempts of reworking this has been done here:
https://lwn.net/Articles/643149/
Making SWITCHDEV and DSA converge towards an unified codebase Making SWITCHDEV and DSA converge towards an unified codebase
------------------------------------------------------------- -------------------------------------------------------------

View File

@ -83,7 +83,8 @@ static struct resource smc91x_resources[] = {
}; };
static struct smc91x_platdata smc91x_platdata = { static struct smc91x_platdata smc91x_platdata = {
.flags = SMC91X_USE_32BIT | SMC91X_USE_DMA | SMC91X_NOWAIT, .flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT |
SMC91X_USE_DMA | SMC91X_NOWAIT,
}; };
static struct platform_device smc91x_device = { static struct platform_device smc91x_device = {

View File

@ -120,7 +120,8 @@ static struct resource smc91x_resources[] = {
}; };
static struct smc91x_platdata xcep_smc91x_info = { static struct smc91x_platdata xcep_smc91x_info = {
.flags = SMC91X_USE_32BIT | SMC91X_NOWAIT | SMC91X_USE_DMA, .flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT |
SMC91X_NOWAIT | SMC91X_USE_DMA,
}; };
static struct platform_device smc91x_device = { static struct platform_device smc91x_device = {

View File

@ -93,7 +93,8 @@ static struct smsc911x_platform_config smsc911x_config = {
}; };
static struct smc91x_platdata smc91x_platdata = { static struct smc91x_platdata smc91x_platdata = {
.flags = SMC91X_USE_32BIT | SMC91X_NOWAIT, .flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT |
SMC91X_NOWAIT,
}; };
static struct platform_device realview_eth_device = { static struct platform_device realview_eth_device = {

View File

@ -45,7 +45,7 @@ static struct resource smc91x_resources[] = {
}; };
static struct smc91x_platdata smc91x_platdata = { static struct smc91x_platdata smc91x_platdata = {
.flags = SMC91X_USE_16BIT | SMC91X_NOWAIT, .flags = SMC91X_USE_16BIT | SMC91X_USE_8BIT | SMC91X_NOWAIT,
}; };
static struct platform_device smc91x_device = { static struct platform_device smc91x_device = {

View File

@ -146,7 +146,8 @@ static struct platform_device hitachi_fb_device = {
#include <linux/smc91x.h> #include <linux/smc91x.h>
static struct smc91x_platdata smc91x_info = { static struct smc91x_platdata smc91x_info = {
.flags = SMC91X_USE_32BIT | SMC91X_NOWAIT, .flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT |
SMC91X_NOWAIT,
.leda = RPC_LED_100_10, .leda = RPC_LED_100_10,
.ledb = RPC_LED_TX_RX, .ledb = RPC_LED_TX_RX,
}; };

View File

@ -134,7 +134,8 @@ static struct platform_device net2272_bfin_device = {
#include <linux/smc91x.h> #include <linux/smc91x.h>
static struct smc91x_platdata smc91x_info = { static struct smc91x_platdata smc91x_info = {
.flags = SMC91X_USE_32BIT | SMC91X_NOWAIT, .flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT |
SMC91X_NOWAIT,
.leda = RPC_LED_100_10, .leda = RPC_LED_100_10,
.ledb = RPC_LED_TX_RX, .ledb = RPC_LED_TX_RX,
}; };

View File

@ -205,8 +205,8 @@ static inline void name##_writeq(struct bcm_sf2_priv *priv, u64 val, \
static inline void intrl2_##which##_mask_clear(struct bcm_sf2_priv *priv, \ static inline void intrl2_##which##_mask_clear(struct bcm_sf2_priv *priv, \
u32 mask) \ u32 mask) \
{ \ { \
intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR); \
priv->irq##which##_mask &= ~(mask); \ priv->irq##which##_mask &= ~(mask); \
intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR); \
} \ } \
static inline void intrl2_##which##_mask_set(struct bcm_sf2_priv *priv, \ static inline void intrl2_##which##_mask_set(struct bcm_sf2_priv *priv, \
u32 mask) \ u32 mask) \

View File

@ -2656,15 +2656,19 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
return ret; return ret;
} }
/* Rate Control: disable ingress rate limiting. */
if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) || if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) ||
mv88e6xxx_6165_family(chip) || mv88e6xxx_6097_family(chip) || mv88e6xxx_6165_family(chip) || mv88e6xxx_6097_family(chip) ||
mv88e6xxx_6185_family(chip) || mv88e6xxx_6095_family(chip) ||
mv88e6xxx_6320_family(chip)) { mv88e6xxx_6320_family(chip)) {
/* Rate Control: disable ingress rate limiting. */
ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
PORT_RATE_CONTROL, 0x0001); PORT_RATE_CONTROL, 0x0001);
if (ret) if (ret)
return ret; return ret;
} else if (mv88e6xxx_6185_family(chip) || mv88e6xxx_6095_family(chip)) {
ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
PORT_RATE_CONTROL, 0x0000);
if (ret)
return ret;
} }
/* Port Control 1: disable trunking, disable sending /* Port Control 1: disable trunking, disable sending

View File

@ -1545,6 +1545,8 @@ static const struct pci_device_id alx_pci_tbl[] = {
.driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
{ PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2400), { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2400),
.driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
{ PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2500),
.driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
{ PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8162), { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8162),
.driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
{ PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8171) }, { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8171) },

View File

@ -38,6 +38,7 @@
#define ALX_DEV_ID_AR8161 0x1091 #define ALX_DEV_ID_AR8161 0x1091
#define ALX_DEV_ID_E2200 0xe091 #define ALX_DEV_ID_E2200 0xe091
#define ALX_DEV_ID_E2400 0xe0a1 #define ALX_DEV_ID_E2400 0xe0a1
#define ALX_DEV_ID_E2500 0xe0b1
#define ALX_DEV_ID_AR8162 0x1090 #define ALX_DEV_ID_AR8162 0x1090
#define ALX_DEV_ID_AR8171 0x10A1 #define ALX_DEV_ID_AR8171 0x10A1
#define ALX_DEV_ID_AR8172 0x10A0 #define ALX_DEV_ID_AR8172 0x10A0

View File

@ -159,7 +159,7 @@ static int bgmac_probe(struct bcma_device *core)
if (!bgmac_is_bcm4707_family(core)) { if (!bgmac_is_bcm4707_family(core)) {
mii_bus = bcma_mdio_mii_register(core, bgmac->phyaddr); mii_bus = bcma_mdio_mii_register(core, bgmac->phyaddr);
if (!IS_ERR(mii_bus)) { if (IS_ERR(mii_bus)) {
err = PTR_ERR(mii_bus); err = PTR_ERR(mii_bus);
goto err; goto err;
} }

View File

@ -170,7 +170,6 @@
#define NIC_QSET_SQ_0_7_DOOR (0x010838) #define NIC_QSET_SQ_0_7_DOOR (0x010838)
#define NIC_QSET_SQ_0_7_STATUS (0x010840) #define NIC_QSET_SQ_0_7_STATUS (0x010840)
#define NIC_QSET_SQ_0_7_DEBUG (0x010848) #define NIC_QSET_SQ_0_7_DEBUG (0x010848)
#define NIC_QSET_SQ_0_7_CNM_CHG (0x010860)
#define NIC_QSET_SQ_0_7_STAT_0_1 (0x010900) #define NIC_QSET_SQ_0_7_STAT_0_1 (0x010900)
#define NIC_QSET_RBDR_0_1_CFG (0x010C00) #define NIC_QSET_RBDR_0_1_CFG (0x010C00)

View File

@ -382,7 +382,10 @@ static void nicvf_get_regs(struct net_device *dev,
p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_DOOR, q); p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_DOOR, q);
p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STATUS, q); p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STATUS, q);
p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_DEBUG, q); p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_DEBUG, q);
p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CNM_CHG, q); /* Padding, was NIC_QSET_SQ_0_7_CNM_CHG, which
* produces bus errors when read
*/
p[i++] = 0;
p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1, q); p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1, q);
reg_offset = NIC_QSET_SQ_0_7_STAT_0_1 | (1 << 3); reg_offset = NIC_QSET_SQ_0_7_STAT_0_1 | (1 << 3);
p[i++] = nicvf_queue_reg_read(nic, reg_offset, q); p[i++] = nicvf_queue_reg_read(nic, reg_offset, q);

View File

@ -4335,6 +4335,11 @@ static void cfg_queues(struct adapter *adap)
#endif #endif
int ciq_size; int ciq_size;
/* Reduce memory usage in kdump environment, disable all offload.
*/
if (is_kdump_kernel())
adap->params.offload = 0;
for_each_port(adap, i) for_each_port(adap, i)
n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg); n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
#ifdef CONFIG_CHELSIO_T4_DCB #ifdef CONFIG_CHELSIO_T4_DCB
@ -4365,11 +4370,6 @@ static void cfg_queues(struct adapter *adap)
if (q10g > netif_get_num_default_rss_queues()) if (q10g > netif_get_num_default_rss_queues())
q10g = netif_get_num_default_rss_queues(); q10g = netif_get_num_default_rss_queues();
/* Reduce memory usage in kdump environment, disable all offload.
*/
if (is_kdump_kernel())
adap->params.offload = 0;
for_each_port(adap, i) { for_each_port(adap, i) {
struct port_info *pi = adap2pinfo(adap, i); struct port_info *pi = adap2pinfo(adap, i);

View File

@ -2922,17 +2922,25 @@ static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
{ {
unsigned int size = lstatus & BD_LENGTH_MASK; unsigned int size = lstatus & BD_LENGTH_MASK;
struct page *page = rxb->page; struct page *page = rxb->page;
bool last = !!(lstatus & BD_LFLAG(RXBD_LAST));
/* Remove the FCS from the packet length */ /* Remove the FCS from the packet length */
if (likely(lstatus & BD_LFLAG(RXBD_LAST))) if (last)
size -= ETH_FCS_LEN; size -= ETH_FCS_LEN;
if (likely(first)) if (likely(first)) {
skb_put(skb, size); skb_put(skb, size);
else } else {
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, /* the last fragments' length contains the full frame length */
rxb->page_offset + RXBUF_ALIGNMENT, if (last)
size, GFAR_RXB_TRUESIZE); size -= skb->len;
/* in case the last fragment consisted only of the FCS */
if (size > 0)
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
rxb->page_offset + RXBUF_ALIGNMENT,
size, GFAR_RXB_TRUESIZE);
}
/* try reuse page */ /* try reuse page */
if (unlikely(page_count(page) != 1)) if (unlikely(page_count(page) != 1))

View File

@ -100,7 +100,8 @@ extern const char gfar_driver_version[];
#define DEFAULT_RX_LFC_THR 16 #define DEFAULT_RX_LFC_THR 16
#define DEFAULT_LFC_PTVVAL 4 #define DEFAULT_LFC_PTVVAL 4
#define GFAR_RXB_SIZE 1536 /* prevent fragmenation by HW in DSA environments */
#define GFAR_RXB_SIZE roundup(1536 + 8, 64)
#define GFAR_SKBFRAG_SIZE (RXBUF_ALIGNMENT + GFAR_RXB_SIZE \ #define GFAR_SKBFRAG_SIZE (RXBUF_ALIGNMENT + GFAR_RXB_SIZE \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
#define GFAR_RXB_TRUESIZE 2048 #define GFAR_RXB_TRUESIZE 2048

View File

@ -328,9 +328,10 @@ static void hns_ppe_init_hw(struct hns_ppe_cb *ppe_cb)
static void hns_ppe_uninit_hw(struct hns_ppe_cb *ppe_cb) static void hns_ppe_uninit_hw(struct hns_ppe_cb *ppe_cb)
{ {
u32 port; u32 port;
struct dsaf_device *dsaf_dev = ppe_cb->ppe_common_cb->dsaf_dev;
if (ppe_cb->ppe_common_cb) { if (ppe_cb->ppe_common_cb) {
struct dsaf_device *dsaf_dev = ppe_cb->ppe_common_cb->dsaf_dev;
port = ppe_cb->index; port = ppe_cb->index;
dsaf_dev->misc_op->ppe_srst(dsaf_dev, port, 0); dsaf_dev->misc_op->ppe_srst(dsaf_dev, port, 0);
} }

View File

@ -199,6 +199,7 @@ void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi)
void i40e_notify_client_of_netdev_open(struct i40e_vsi *vsi) void i40e_notify_client_of_netdev_open(struct i40e_vsi *vsi)
{ {
struct i40e_client_instance *cdev; struct i40e_client_instance *cdev;
int ret = 0;
if (!vsi) if (!vsi)
return; return;
@ -211,7 +212,14 @@ void i40e_notify_client_of_netdev_open(struct i40e_vsi *vsi)
"Cannot locate client instance open routine\n"); "Cannot locate client instance open routine\n");
continue; continue;
} }
cdev->client->ops->open(&cdev->lan_info, cdev->client); if (!(test_bit(__I40E_CLIENT_INSTANCE_OPENED,
&cdev->state))) {
ret = cdev->client->ops->open(&cdev->lan_info,
cdev->client);
if (!ret)
set_bit(__I40E_CLIENT_INSTANCE_OPENED,
&cdev->state);
}
} }
} }
mutex_unlock(&i40e_client_instance_mutex); mutex_unlock(&i40e_client_instance_mutex);
@ -407,12 +415,14 @@ struct i40e_vsi *i40e_vsi_lookup(struct i40e_pf *pf,
* i40e_client_add_instance - add a client instance struct to the instance list * i40e_client_add_instance - add a client instance struct to the instance list
* @pf: pointer to the board struct * @pf: pointer to the board struct
* @client: pointer to a client struct in the client list. * @client: pointer to a client struct in the client list.
* @existing: if there was already an existing instance
* *
* Returns cdev ptr on success, NULL on failure * Returns cdev ptr on success or if already exists, NULL on failure
**/ **/
static static
struct i40e_client_instance *i40e_client_add_instance(struct i40e_pf *pf, struct i40e_client_instance *i40e_client_add_instance(struct i40e_pf *pf,
struct i40e_client *client) struct i40e_client *client,
bool *existing)
{ {
struct i40e_client_instance *cdev; struct i40e_client_instance *cdev;
struct netdev_hw_addr *mac = NULL; struct netdev_hw_addr *mac = NULL;
@ -421,7 +431,7 @@ struct i40e_client_instance *i40e_client_add_instance(struct i40e_pf *pf,
mutex_lock(&i40e_client_instance_mutex); mutex_lock(&i40e_client_instance_mutex);
list_for_each_entry(cdev, &i40e_client_instances, list) { list_for_each_entry(cdev, &i40e_client_instances, list) {
if ((cdev->lan_info.pf == pf) && (cdev->client == client)) { if ((cdev->lan_info.pf == pf) && (cdev->client == client)) {
cdev = NULL; *existing = true;
goto out; goto out;
} }
} }
@ -505,6 +515,7 @@ void i40e_client_subtask(struct i40e_pf *pf)
{ {
struct i40e_client_instance *cdev; struct i40e_client_instance *cdev;
struct i40e_client *client; struct i40e_client *client;
bool existing = false;
int ret = 0; int ret = 0;
if (!(pf->flags & I40E_FLAG_SERVICE_CLIENT_REQUESTED)) if (!(pf->flags & I40E_FLAG_SERVICE_CLIENT_REQUESTED))
@ -528,18 +539,25 @@ void i40e_client_subtask(struct i40e_pf *pf)
/* check if L2 VSI is up, if not we are not ready */ /* check if L2 VSI is up, if not we are not ready */
if (test_bit(__I40E_DOWN, &pf->vsi[pf->lan_vsi]->state)) if (test_bit(__I40E_DOWN, &pf->vsi[pf->lan_vsi]->state))
continue; continue;
} else {
dev_warn(&pf->pdev->dev, "This client %s is being instanciated at probe\n",
client->name);
} }
/* Add the client instance to the instance list */ /* Add the client instance to the instance list */
cdev = i40e_client_add_instance(pf, client); cdev = i40e_client_add_instance(pf, client, &existing);
if (!cdev) if (!cdev)
continue; continue;
/* Also up the ref_cnt of no. of instances of this client */ if (!existing) {
atomic_inc(&client->ref_cnt); /* Also up the ref_cnt for no. of instances of this
dev_info(&pf->pdev->dev, "Added instance of Client %s to PF%d bus=0x%02x func=0x%02x\n", * client.
client->name, pf->hw.pf_id, */
pf->hw.bus.device, pf->hw.bus.func); atomic_inc(&client->ref_cnt);
dev_info(&pf->pdev->dev, "Added instance of Client %s to PF%d bus=0x%02x func=0x%02x\n",
client->name, pf->hw.pf_id,
pf->hw.bus.device, pf->hw.bus.func);
}
/* Send an Open request to the client */ /* Send an Open request to the client */
atomic_inc(&cdev->ref_cnt); atomic_inc(&cdev->ref_cnt);
@ -588,7 +606,8 @@ int i40e_lan_add_device(struct i40e_pf *pf)
pf->hw.pf_id, pf->hw.bus.device, pf->hw.bus.func); pf->hw.pf_id, pf->hw.bus.device, pf->hw.bus.func);
/* Since in some cases register may have happened before a device gets /* Since in some cases register may have happened before a device gets
* added, we can schedule a subtask to go initiate the clients. * added, we can schedule a subtask to go initiate the clients if
* they can be launched at probe time.
*/ */
pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED; pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED;
i40e_service_event_schedule(pf); i40e_service_event_schedule(pf);

View File

@ -5431,7 +5431,6 @@ int i40e_open(struct net_device *netdev)
wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16); wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
udp_tunnel_get_rx_info(netdev); udp_tunnel_get_rx_info(netdev);
i40e_notify_client_of_netdev_open(vsi);
return 0; return 0;
} }

View File

@ -2958,8 +2958,10 @@ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
} }
/* was that the last pool using this rar? */ /* was that the last pool using this rar? */
if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0) if (mpsar_lo == 0 && mpsar_hi == 0 &&
rar != 0 && rar != hw->mac.san_mac_rar_index)
hw->mac.ops.clear_rar(hw, rar); hw->mac.ops.clear_rar(hw, rar);
return 0; return 0;
} }

View File

@ -143,13 +143,14 @@ static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx)
return cmd->cmd_buf + (idx << cmd->log_stride); return cmd->cmd_buf + (idx << cmd->log_stride);
} }
static u8 xor8_buf(void *buf, int len) static u8 xor8_buf(void *buf, size_t offset, int len)
{ {
u8 *ptr = buf; u8 *ptr = buf;
u8 sum = 0; u8 sum = 0;
int i; int i;
int end = len + offset;
for (i = 0; i < len; i++) for (i = offset; i < end; i++)
sum ^= ptr[i]; sum ^= ptr[i];
return sum; return sum;
@ -157,41 +158,49 @@ static u8 xor8_buf(void *buf, int len)
static int verify_block_sig(struct mlx5_cmd_prot_block *block) static int verify_block_sig(struct mlx5_cmd_prot_block *block)
{ {
if (xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 1) != 0xff) size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0);
int xor_len = sizeof(*block) - sizeof(block->data) - 1;
if (xor8_buf(block, rsvd0_off, xor_len) != 0xff)
return -EINVAL; return -EINVAL;
if (xor8_buf(block, sizeof(*block)) != 0xff) if (xor8_buf(block, 0, sizeof(*block)) != 0xff)
return -EINVAL; return -EINVAL;
return 0; return 0;
} }
static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token, static void calc_block_sig(struct mlx5_cmd_prot_block *block)
int csum)
{ {
block->token = token; int ctrl_xor_len = sizeof(*block) - sizeof(block->data) - 2;
if (csum) { size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0);
block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) -
sizeof(block->data) - 2); block->ctrl_sig = ~xor8_buf(block, rsvd0_off, ctrl_xor_len);
block->sig = ~xor8_buf(block, sizeof(*block) - 1); block->sig = ~xor8_buf(block, 0, sizeof(*block) - 1);
}
} }
static void calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token, int csum) static void calc_chain_sig(struct mlx5_cmd_msg *msg)
{ {
struct mlx5_cmd_mailbox *next = msg->next; struct mlx5_cmd_mailbox *next = msg->next;
int size = msg->len;
int blen = size - min_t(int, sizeof(msg->first.data), size);
int n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1)
/ MLX5_CMD_DATA_BLOCK_SIZE;
int i = 0;
while (next) { for (i = 0; i < n && next; i++) {
calc_block_sig(next->buf, token, csum); calc_block_sig(next->buf);
next = next->next; next = next->next;
} }
} }
static void set_signature(struct mlx5_cmd_work_ent *ent, int csum) static void set_signature(struct mlx5_cmd_work_ent *ent, int csum)
{ {
ent->lay->sig = ~xor8_buf(ent->lay, sizeof(*ent->lay)); ent->lay->sig = ~xor8_buf(ent->lay, 0, sizeof(*ent->lay));
calc_chain_sig(ent->in, ent->token, csum); if (csum) {
calc_chain_sig(ent->out, ent->token, csum); calc_chain_sig(ent->in);
calc_chain_sig(ent->out);
}
} }
static void poll_timeout(struct mlx5_cmd_work_ent *ent) static void poll_timeout(struct mlx5_cmd_work_ent *ent)
@ -222,12 +231,17 @@ static int verify_signature(struct mlx5_cmd_work_ent *ent)
struct mlx5_cmd_mailbox *next = ent->out->next; struct mlx5_cmd_mailbox *next = ent->out->next;
int err; int err;
u8 sig; u8 sig;
int size = ent->out->len;
int blen = size - min_t(int, sizeof(ent->out->first.data), size);
int n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1)
/ MLX5_CMD_DATA_BLOCK_SIZE;
int i = 0;
sig = xor8_buf(ent->lay, sizeof(*ent->lay)); sig = xor8_buf(ent->lay, 0, sizeof(*ent->lay));
if (sig != 0xff) if (sig != 0xff)
return -EINVAL; return -EINVAL;
while (next) { for (i = 0; i < n && next; i++) {
err = verify_block_sig(next->buf); err = verify_block_sig(next->buf);
if (err) if (err)
return err; return err;
@ -656,7 +670,6 @@ static void cmd_work_handler(struct work_struct *work)
spin_unlock_irqrestore(&cmd->alloc_lock, flags); spin_unlock_irqrestore(&cmd->alloc_lock, flags);
} }
ent->token = alloc_token(cmd);
cmd->ent_arr[ent->idx] = ent; cmd->ent_arr[ent->idx] = ent;
lay = get_inst(cmd, ent->idx); lay = get_inst(cmd, ent->idx);
ent->lay = lay; ent->lay = lay;
@ -766,7 +779,8 @@ static u8 *get_status_ptr(struct mlx5_outbox_hdr *out)
static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
struct mlx5_cmd_msg *out, void *uout, int uout_size, struct mlx5_cmd_msg *out, void *uout, int uout_size,
mlx5_cmd_cbk_t callback, mlx5_cmd_cbk_t callback,
void *context, int page_queue, u8 *status) void *context, int page_queue, u8 *status,
u8 token)
{ {
struct mlx5_cmd *cmd = &dev->cmd; struct mlx5_cmd *cmd = &dev->cmd;
struct mlx5_cmd_work_ent *ent; struct mlx5_cmd_work_ent *ent;
@ -783,6 +797,8 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
if (IS_ERR(ent)) if (IS_ERR(ent))
return PTR_ERR(ent); return PTR_ERR(ent);
ent->token = token;
if (!callback) if (!callback)
init_completion(&ent->done); init_completion(&ent->done);
@ -854,7 +870,8 @@ static const struct file_operations fops = {
.write = dbg_write, .write = dbg_write,
}; };
static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size) static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size,
u8 token)
{ {
struct mlx5_cmd_prot_block *block; struct mlx5_cmd_prot_block *block;
struct mlx5_cmd_mailbox *next; struct mlx5_cmd_mailbox *next;
@ -880,6 +897,7 @@ static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size)
memcpy(block->data, from, copy); memcpy(block->data, from, copy);
from += copy; from += copy;
size -= copy; size -= copy;
block->token = token;
next = next->next; next = next->next;
} }
@ -949,7 +967,8 @@ static void free_cmd_box(struct mlx5_core_dev *dev,
} }
static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev, static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev,
gfp_t flags, int size) gfp_t flags, int size,
u8 token)
{ {
struct mlx5_cmd_mailbox *tmp, *head = NULL; struct mlx5_cmd_mailbox *tmp, *head = NULL;
struct mlx5_cmd_prot_block *block; struct mlx5_cmd_prot_block *block;
@ -978,6 +997,7 @@ static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev,
tmp->next = head; tmp->next = head;
block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0); block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0);
block->block_num = cpu_to_be32(n - i - 1); block->block_num = cpu_to_be32(n - i - 1);
block->token = token;
head = tmp; head = tmp;
} }
msg->next = head; msg->next = head;
@ -1352,7 +1372,7 @@ static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
} }
if (IS_ERR(msg)) if (IS_ERR(msg))
msg = mlx5_alloc_cmd_msg(dev, gfp, in_size); msg = mlx5_alloc_cmd_msg(dev, gfp, in_size, 0);
return msg; return msg;
} }
@ -1377,6 +1397,7 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
int err; int err;
u8 status = 0; u8 status = 0;
u32 drv_synd; u32 drv_synd;
u8 token;
if (pci_channel_offline(dev->pdev) || if (pci_channel_offline(dev->pdev) ||
dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
@ -1395,20 +1416,22 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
return err; return err;
} }
err = mlx5_copy_to_msg(inb, in, in_size); token = alloc_token(&dev->cmd);
err = mlx5_copy_to_msg(inb, in, in_size, token);
if (err) { if (err) {
mlx5_core_warn(dev, "err %d\n", err); mlx5_core_warn(dev, "err %d\n", err);
goto out_in; goto out_in;
} }
outb = mlx5_alloc_cmd_msg(dev, gfp, out_size); outb = mlx5_alloc_cmd_msg(dev, gfp, out_size, token);
if (IS_ERR(outb)) { if (IS_ERR(outb)) {
err = PTR_ERR(outb); err = PTR_ERR(outb);
goto out_in; goto out_in;
} }
err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context, err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context,
pages_queue, &status); pages_queue, &status, token);
if (err) if (err)
goto out_out; goto out_out;
@ -1476,7 +1499,7 @@ static int create_msg_cache(struct mlx5_core_dev *dev)
INIT_LIST_HEAD(&cmd->cache.med.head); INIT_LIST_HEAD(&cmd->cache.med.head);
for (i = 0; i < NUM_LONG_LISTS; i++) { for (i = 0; i < NUM_LONG_LISTS; i++) {
msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE); msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE, 0);
if (IS_ERR(msg)) { if (IS_ERR(msg)) {
err = PTR_ERR(msg); err = PTR_ERR(msg);
goto ex_err; goto ex_err;
@ -1486,7 +1509,7 @@ static int create_msg_cache(struct mlx5_core_dev *dev)
} }
for (i = 0; i < NUM_MED_LISTS; i++) { for (i = 0; i < NUM_MED_LISTS; i++) {
msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE); msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE, 0);
if (IS_ERR(msg)) { if (IS_ERR(msg)) {
err = PTR_ERR(msg); err = PTR_ERR(msg);
goto ex_err; goto ex_err;

View File

@ -73,8 +73,12 @@
#define MLX5_MPWRQ_PAGES_PER_WQE BIT(MLX5_MPWRQ_WQE_PAGE_ORDER) #define MLX5_MPWRQ_PAGES_PER_WQE BIT(MLX5_MPWRQ_WQE_PAGE_ORDER)
#define MLX5_MPWRQ_STRIDES_PER_PAGE (MLX5_MPWRQ_NUM_STRIDES >> \ #define MLX5_MPWRQ_STRIDES_PER_PAGE (MLX5_MPWRQ_NUM_STRIDES >> \
MLX5_MPWRQ_WQE_PAGE_ORDER) MLX5_MPWRQ_WQE_PAGE_ORDER)
#define MLX5_CHANNEL_MAX_NUM_MTTS (ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8) * \
BIT(MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW)) #define MLX5_MTT_OCTW(npages) (ALIGN(npages, 8) / 2)
#define MLX5E_REQUIRED_MTTS(rqs, wqes)\
(rqs * wqes * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8))
#define MLX5E_VALID_NUM_MTTS(num_mtts) (MLX5_MTT_OCTW(num_mtts) <= U16_MAX)
#define MLX5_UMR_ALIGN (2048) #define MLX5_UMR_ALIGN (2048)
#define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (128) #define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (128)
@ -219,9 +223,8 @@ struct mlx5e_tstamp {
}; };
enum { enum {
MLX5E_RQ_STATE_POST_WQES_ENABLE, MLX5E_RQ_STATE_FLUSH,
MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS,
MLX5E_RQ_STATE_FLUSH_TIMEOUT,
MLX5E_RQ_STATE_AM, MLX5E_RQ_STATE_AM,
}; };
@ -304,6 +307,7 @@ struct mlx5e_rq {
unsigned long state; unsigned long state;
int ix; int ix;
u32 mpwqe_mtt_offset;
struct mlx5e_rx_am am; /* Adaptive Moderation */ struct mlx5e_rx_am am; /* Adaptive Moderation */
@ -365,9 +369,8 @@ struct mlx5e_sq_dma {
}; };
enum { enum {
MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, MLX5E_SQ_STATE_FLUSH,
MLX5E_SQ_STATE_BF_ENABLE, MLX5E_SQ_STATE_BF_ENABLE,
MLX5E_SQ_STATE_TX_TIMEOUT,
}; };
struct mlx5e_ico_wqe_info { struct mlx5e_ico_wqe_info {
@ -698,7 +701,6 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget);
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget); bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget); int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
void mlx5e_free_tx_descs(struct mlx5e_sq *sq); void mlx5e_free_tx_descs(struct mlx5e_sq *sq);
void mlx5e_free_rx_descs(struct mlx5e_rq *rq);
void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
@ -814,11 +816,6 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
MLX5E_MAX_NUM_CHANNELS); MLX5E_MAX_NUM_CHANNELS);
} }
static inline int mlx5e_get_mtt_octw(int npages)
{
return ALIGN(npages, 8) / 2;
}
extern const struct ethtool_ops mlx5e_ethtool_ops; extern const struct ethtool_ops mlx5e_ethtool_ops;
#ifdef CONFIG_MLX5_CORE_EN_DCB #ifdef CONFIG_MLX5_CORE_EN_DCB
extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops; extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops;

View File

@ -139,7 +139,7 @@ int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5_core_dev *mdev)
struct mlx5e_tir *tir; struct mlx5e_tir *tir;
void *in; void *in;
int inlen; int inlen;
int err; int err = 0;
inlen = MLX5_ST_SZ_BYTES(modify_tir_in); inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
in = mlx5_vzalloc(inlen); in = mlx5_vzalloc(inlen);
@ -151,10 +151,11 @@ int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5_core_dev *mdev)
list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) { list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) {
err = mlx5_core_modify_tir(mdev, tir->tirn, in, inlen); err = mlx5_core_modify_tir(mdev, tir->tirn, in, inlen);
if (err) if (err)
return err; goto out;
} }
out:
kvfree(in); kvfree(in);
return 0; return err;
} }

View File

@ -127,29 +127,40 @@ int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
return mlx5_set_port_tc_bw_alloc(mdev, tc_tx_bw); return mlx5_set_port_tc_bw_alloc(mdev, tc_tx_bw);
} }
static int mlx5e_dbcnl_validate_ets(struct ieee_ets *ets) static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
struct ieee_ets *ets)
{ {
int bw_sum = 0; int bw_sum = 0;
int i; int i;
/* Validate Priority */ /* Validate Priority */
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
if (ets->prio_tc[i] >= MLX5E_MAX_PRIORITY) if (ets->prio_tc[i] >= MLX5E_MAX_PRIORITY) {
netdev_err(netdev,
"Failed to validate ETS: priority value greater than max(%d)\n",
MLX5E_MAX_PRIORITY);
return -EINVAL; return -EINVAL;
}
} }
/* Validate Bandwidth Sum */ /* Validate Bandwidth Sum */
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) { if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
if (!ets->tc_tx_bw[i]) if (!ets->tc_tx_bw[i]) {
netdev_err(netdev,
"Failed to validate ETS: BW 0 is illegal\n");
return -EINVAL; return -EINVAL;
}
bw_sum += ets->tc_tx_bw[i]; bw_sum += ets->tc_tx_bw[i];
} }
} }
if (bw_sum != 0 && bw_sum != 100) if (bw_sum != 0 && bw_sum != 100) {
netdev_err(netdev,
"Failed to validate ETS: BW sum is illegal\n");
return -EINVAL; return -EINVAL;
}
return 0; return 0;
} }
@ -159,7 +170,7 @@ static int mlx5e_dcbnl_ieee_setets(struct net_device *netdev,
struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_priv *priv = netdev_priv(netdev);
int err; int err;
err = mlx5e_dbcnl_validate_ets(ets); err = mlx5e_dbcnl_validate_ets(netdev, ets);
if (err) if (err)
return err; return err;

View File

@ -352,15 +352,61 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
sq_stats_desc, j); sq_stats_desc, j);
} }
static u32 mlx5e_rx_wqes_to_packets(struct mlx5e_priv *priv, int rq_wq_type,
int num_wqe)
{
int packets_per_wqe;
int stride_size;
int num_strides;
int wqe_size;
if (rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
return num_wqe;
stride_size = 1 << priv->params.mpwqe_log_stride_sz;
num_strides = 1 << priv->params.mpwqe_log_num_strides;
wqe_size = stride_size * num_strides;
packets_per_wqe = wqe_size /
ALIGN(ETH_DATA_LEN, stride_size);
return (1 << (order_base_2(num_wqe * packets_per_wqe) - 1));
}
static u32 mlx5e_packets_to_rx_wqes(struct mlx5e_priv *priv, int rq_wq_type,
int num_packets)
{
int packets_per_wqe;
int stride_size;
int num_strides;
int wqe_size;
int num_wqes;
if (rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
return num_packets;
stride_size = 1 << priv->params.mpwqe_log_stride_sz;
num_strides = 1 << priv->params.mpwqe_log_num_strides;
wqe_size = stride_size * num_strides;
num_packets = (1 << order_base_2(num_packets));
packets_per_wqe = wqe_size /
ALIGN(ETH_DATA_LEN, stride_size);
num_wqes = DIV_ROUND_UP(num_packets, packets_per_wqe);
return 1 << (order_base_2(num_wqes));
}
static void mlx5e_get_ringparam(struct net_device *dev, static void mlx5e_get_ringparam(struct net_device *dev,
struct ethtool_ringparam *param) struct ethtool_ringparam *param)
{ {
struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_priv *priv = netdev_priv(dev);
int rq_wq_type = priv->params.rq_wq_type; int rq_wq_type = priv->params.rq_wq_type;
param->rx_max_pending = 1 << mlx5_max_log_rq_size(rq_wq_type); param->rx_max_pending = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
1 << mlx5_max_log_rq_size(rq_wq_type));
param->tx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE; param->tx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE;
param->rx_pending = 1 << priv->params.log_rq_size; param->rx_pending = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
1 << priv->params.log_rq_size);
param->tx_pending = 1 << priv->params.log_sq_size; param->tx_pending = 1 << priv->params.log_sq_size;
} }
@ -370,9 +416,13 @@ static int mlx5e_set_ringparam(struct net_device *dev,
struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_priv *priv = netdev_priv(dev);
bool was_opened; bool was_opened;
int rq_wq_type = priv->params.rq_wq_type; int rq_wq_type = priv->params.rq_wq_type;
u32 rx_pending_wqes;
u32 min_rq_size;
u32 max_rq_size;
u16 min_rx_wqes; u16 min_rx_wqes;
u8 log_rq_size; u8 log_rq_size;
u8 log_sq_size; u8 log_sq_size;
u32 num_mtts;
int err = 0; int err = 0;
if (param->rx_jumbo_pending) { if (param->rx_jumbo_pending) {
@ -385,18 +435,36 @@ static int mlx5e_set_ringparam(struct net_device *dev,
__func__); __func__);
return -EINVAL; return -EINVAL;
} }
if (param->rx_pending < (1 << mlx5_min_log_rq_size(rq_wq_type))) {
min_rq_size = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
1 << mlx5_min_log_rq_size(rq_wq_type));
max_rq_size = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
1 << mlx5_max_log_rq_size(rq_wq_type));
rx_pending_wqes = mlx5e_packets_to_rx_wqes(priv, rq_wq_type,
param->rx_pending);
if (param->rx_pending < min_rq_size) {
netdev_info(dev, "%s: rx_pending (%d) < min (%d)\n", netdev_info(dev, "%s: rx_pending (%d) < min (%d)\n",
__func__, param->rx_pending, __func__, param->rx_pending,
1 << mlx5_min_log_rq_size(rq_wq_type)); min_rq_size);
return -EINVAL; return -EINVAL;
} }
if (param->rx_pending > (1 << mlx5_max_log_rq_size(rq_wq_type))) { if (param->rx_pending > max_rq_size) {
netdev_info(dev, "%s: rx_pending (%d) > max (%d)\n", netdev_info(dev, "%s: rx_pending (%d) > max (%d)\n",
__func__, param->rx_pending, __func__, param->rx_pending,
1 << mlx5_max_log_rq_size(rq_wq_type)); max_rq_size);
return -EINVAL; return -EINVAL;
} }
num_mtts = MLX5E_REQUIRED_MTTS(priv->params.num_channels,
rx_pending_wqes);
if (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
!MLX5E_VALID_NUM_MTTS(num_mtts)) {
netdev_info(dev, "%s: rx_pending (%d) request can't be satisfied, try to reduce.\n",
__func__, param->rx_pending);
return -EINVAL;
}
if (param->tx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)) { if (param->tx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)) {
netdev_info(dev, "%s: tx_pending (%d) < min (%d)\n", netdev_info(dev, "%s: tx_pending (%d) < min (%d)\n",
__func__, param->tx_pending, __func__, param->tx_pending,
@ -410,9 +478,9 @@ static int mlx5e_set_ringparam(struct net_device *dev,
return -EINVAL; return -EINVAL;
} }
log_rq_size = order_base_2(param->rx_pending); log_rq_size = order_base_2(rx_pending_wqes);
log_sq_size = order_base_2(param->tx_pending); log_sq_size = order_base_2(param->tx_pending);
min_rx_wqes = mlx5_min_rx_wqes(rq_wq_type, param->rx_pending); min_rx_wqes = mlx5_min_rx_wqes(rq_wq_type, rx_pending_wqes);
if (log_rq_size == priv->params.log_rq_size && if (log_rq_size == priv->params.log_rq_size &&
log_sq_size == priv->params.log_sq_size && log_sq_size == priv->params.log_sq_size &&
@ -454,6 +522,7 @@ static int mlx5e_set_channels(struct net_device *dev,
unsigned int count = ch->combined_count; unsigned int count = ch->combined_count;
bool arfs_enabled; bool arfs_enabled;
bool was_opened; bool was_opened;
u32 num_mtts;
int err = 0; int err = 0;
if (!count) { if (!count) {
@ -472,6 +541,14 @@ static int mlx5e_set_channels(struct net_device *dev,
return -EINVAL; return -EINVAL;
} }
num_mtts = MLX5E_REQUIRED_MTTS(count, BIT(priv->params.log_rq_size));
if (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
!MLX5E_VALID_NUM_MTTS(num_mtts)) {
netdev_info(dev, "%s: rx count (%d) request can't be satisfied, try to reduce.\n",
__func__, count);
return -EINVAL;
}
if (priv->params.num_channels == count) if (priv->params.num_channels == count)
return 0; return 0;

View File

@ -39,13 +39,6 @@
#include "eswitch.h" #include "eswitch.h"
#include "vxlan.h" #include "vxlan.h"
enum {
MLX5_EN_QP_FLUSH_TIMEOUT_MS = 5000,
MLX5_EN_QP_FLUSH_MSLEEP_QUANT = 20,
MLX5_EN_QP_FLUSH_MAX_ITER = MLX5_EN_QP_FLUSH_TIMEOUT_MS /
MLX5_EN_QP_FLUSH_MSLEEP_QUANT,
};
struct mlx5e_rq_param { struct mlx5e_rq_param {
u32 rqc[MLX5_ST_SZ_DW(rqc)]; u32 rqc[MLX5_ST_SZ_DW(rqc)];
struct mlx5_wq_param wq; struct mlx5_wq_param wq;
@ -162,6 +155,7 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
s->tx_queue_stopped += sq_stats->stopped; s->tx_queue_stopped += sq_stats->stopped;
s->tx_queue_wake += sq_stats->wake; s->tx_queue_wake += sq_stats->wake;
s->tx_queue_dropped += sq_stats->dropped; s->tx_queue_dropped += sq_stats->dropped;
s->tx_xmit_more += sq_stats->xmit_more;
s->tx_csum_partial_inner += sq_stats->csum_partial_inner; s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
tx_offload_none += sq_stats->csum_none; tx_offload_none += sq_stats->csum_none;
} }
@ -340,6 +334,9 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
rq->alloc_wqe = mlx5e_alloc_rx_mpwqe; rq->alloc_wqe = mlx5e_alloc_rx_mpwqe;
rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe; rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
rq->mpwqe_mtt_offset = c->ix *
MLX5E_REQUIRED_MTTS(1, BIT(priv->params.log_rq_size));
rq->mpwqe_stride_sz = BIT(priv->params.mpwqe_log_stride_sz); rq->mpwqe_stride_sz = BIT(priv->params.mpwqe_log_stride_sz);
rq->mpwqe_num_strides = BIT(priv->params.mpwqe_log_num_strides); rq->mpwqe_num_strides = BIT(priv->params.mpwqe_log_num_strides);
rq->wqe_sz = rq->mpwqe_stride_sz * rq->mpwqe_num_strides; rq->wqe_sz = rq->mpwqe_stride_sz * rq->mpwqe_num_strides;
@ -428,7 +425,6 @@ static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
MLX5_SET(rqc, rqc, cqn, rq->cq.mcq.cqn); MLX5_SET(rqc, rqc, cqn, rq->cq.mcq.cqn);
MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST); MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
MLX5_SET(rqc, rqc, flush_in_error_en, 1);
MLX5_SET(rqc, rqc, vsd, priv->params.vlan_strip_disable); MLX5_SET(rqc, rqc, vsd, priv->params.vlan_strip_disable);
MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift - MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift -
MLX5_ADAPTER_PAGE_SHIFT); MLX5_ADAPTER_PAGE_SHIFT);
@ -525,6 +521,27 @@ static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
return -ETIMEDOUT; return -ETIMEDOUT;
} }
static void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
{
struct mlx5_wq_ll *wq = &rq->wq;
struct mlx5e_rx_wqe *wqe;
__be16 wqe_ix_be;
u16 wqe_ix;
/* UMR WQE (if in progress) is always at wq->head */
if (test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state))
mlx5e_free_rx_fragmented_mpwqe(rq, &rq->wqe_info[wq->head]);
while (!mlx5_wq_ll_is_empty(wq)) {
wqe_ix_be = *wq->tail_next;
wqe_ix = be16_to_cpu(wqe_ix_be);
wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_ix);
rq->dealloc_wqe(rq, wqe_ix);
mlx5_wq_ll_pop(&rq->wq, wqe_ix_be,
&wqe->next.next_wqe_index);
}
}
static int mlx5e_open_rq(struct mlx5e_channel *c, static int mlx5e_open_rq(struct mlx5e_channel *c,
struct mlx5e_rq_param *param, struct mlx5e_rq_param *param,
struct mlx5e_rq *rq) struct mlx5e_rq *rq)
@ -548,8 +565,6 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
if (param->am_enabled) if (param->am_enabled)
set_bit(MLX5E_RQ_STATE_AM, &c->rq.state); set_bit(MLX5E_RQ_STATE_AM, &c->rq.state);
set_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
sq->ico_wqe_info[pi].opcode = MLX5_OPCODE_NOP; sq->ico_wqe_info[pi].opcode = MLX5_OPCODE_NOP;
sq->ico_wqe_info[pi].num_wqebbs = 1; sq->ico_wqe_info[pi].num_wqebbs = 1;
mlx5e_send_nop(sq, true); /* trigger mlx5e_post_rx_wqes() */ mlx5e_send_nop(sq, true); /* trigger mlx5e_post_rx_wqes() */
@ -566,23 +581,8 @@ err_destroy_rq:
static void mlx5e_close_rq(struct mlx5e_rq *rq) static void mlx5e_close_rq(struct mlx5e_rq *rq)
{ {
int tout = 0; set_bit(MLX5E_RQ_STATE_FLUSH, &rq->state);
int err;
clear_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */ napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
while (!mlx5_wq_ll_is_empty(&rq->wq) && !err &&
tout++ < MLX5_EN_QP_FLUSH_MAX_ITER)
msleep(MLX5_EN_QP_FLUSH_MSLEEP_QUANT);
if (err || tout == MLX5_EN_QP_FLUSH_MAX_ITER)
set_bit(MLX5E_RQ_STATE_FLUSH_TIMEOUT, &rq->state);
/* avoid destroying rq before mlx5e_poll_rx_cq() is done with it */
napi_synchronize(&rq->channel->napi);
cancel_work_sync(&rq->am.work); cancel_work_sync(&rq->am.work);
mlx5e_disable_rq(rq); mlx5e_disable_rq(rq);
@ -821,7 +821,6 @@ static int mlx5e_open_sq(struct mlx5e_channel *c,
goto err_disable_sq; goto err_disable_sq;
if (sq->txq) { if (sq->txq) {
set_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
netdev_tx_reset_queue(sq->txq); netdev_tx_reset_queue(sq->txq);
netif_tx_start_queue(sq->txq); netif_tx_start_queue(sq->txq);
} }
@ -845,38 +844,20 @@ static inline void netif_tx_disable_queue(struct netdev_queue *txq)
static void mlx5e_close_sq(struct mlx5e_sq *sq) static void mlx5e_close_sq(struct mlx5e_sq *sq)
{ {
int tout = 0; set_bit(MLX5E_SQ_STATE_FLUSH, &sq->state);
int err; /* prevent netif_tx_wake_queue */
if (sq->txq) {
clear_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
/* prevent netif_tx_wake_queue */
napi_synchronize(&sq->channel->napi);
netif_tx_disable_queue(sq->txq);
/* ensure hw is notified of all pending wqes */
if (mlx5e_sq_has_room_for(sq, 1))
mlx5e_send_nop(sq, true);
err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY,
MLX5_SQC_STATE_ERR, false, 0);
if (err)
set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state);
}
/* wait till sq is empty, unless a TX timeout occurred on this SQ */
while (sq->cc != sq->pc &&
!test_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state)) {
msleep(MLX5_EN_QP_FLUSH_MSLEEP_QUANT);
if (tout++ > MLX5_EN_QP_FLUSH_MAX_ITER)
set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state);
}
/* avoid destroying sq before mlx5e_poll_tx_cq() is done with it */
napi_synchronize(&sq->channel->napi); napi_synchronize(&sq->channel->napi);
mlx5e_free_tx_descs(sq); if (sq->txq) {
netif_tx_disable_queue(sq->txq);
/* last doorbell out, godspeed .. */
if (mlx5e_sq_has_room_for(sq, 1))
mlx5e_send_nop(sq, true);
}
mlx5e_disable_sq(sq); mlx5e_disable_sq(sq);
mlx5e_free_tx_descs(sq);
mlx5e_destroy_sq(sq); mlx5e_destroy_sq(sq);
} }
@ -1826,10 +1807,6 @@ int mlx5e_open_locked(struct net_device *netdev)
netif_set_real_num_tx_queues(netdev, num_txqs); netif_set_real_num_tx_queues(netdev, num_txqs);
netif_set_real_num_rx_queues(netdev, priv->params.num_channels); netif_set_real_num_rx_queues(netdev, priv->params.num_channels);
err = mlx5e_set_dev_port_mtu(netdev);
if (err)
goto err_clear_state_opened_flag;
err = mlx5e_open_channels(priv); err = mlx5e_open_channels(priv);
if (err) { if (err) {
netdev_err(netdev, "%s: mlx5e_open_channels failed, %d\n", netdev_err(netdev, "%s: mlx5e_open_channels failed, %d\n",
@ -2573,6 +2550,7 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
u16 max_mtu; u16 max_mtu;
u16 min_mtu; u16 min_mtu;
int err = 0; int err = 0;
bool reset;
mlx5_query_port_max_mtu(mdev, &max_mtu, 1); mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
@ -2588,13 +2566,18 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
mutex_lock(&priv->state_lock); mutex_lock(&priv->state_lock);
reset = !priv->params.lro_en &&
(priv->params.rq_wq_type !=
MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ);
was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
if (was_opened) if (was_opened && reset)
mlx5e_close_locked(netdev); mlx5e_close_locked(netdev);
netdev->mtu = new_mtu; netdev->mtu = new_mtu;
mlx5e_set_dev_port_mtu(netdev);
if (was_opened) if (was_opened && reset)
err = mlx5e_open_locked(netdev); err = mlx5e_open_locked(netdev);
mutex_unlock(&priv->state_lock); mutex_unlock(&priv->state_lock);
@ -2794,7 +2777,7 @@ static void mlx5e_tx_timeout(struct net_device *dev)
if (!netif_xmit_stopped(netdev_get_tx_queue(dev, i))) if (!netif_xmit_stopped(netdev_get_tx_queue(dev, i)))
continue; continue;
sched_work = true; sched_work = true;
set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state); set_bit(MLX5E_SQ_STATE_FLUSH, &sq->state);
netdev_err(dev, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x\n", netdev_err(dev, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x\n",
i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc); i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc);
} }
@ -3231,8 +3214,8 @@ static int mlx5e_create_umr_mkey(struct mlx5e_priv *priv)
struct mlx5_create_mkey_mbox_in *in; struct mlx5_create_mkey_mbox_in *in;
struct mlx5_mkey_seg *mkc; struct mlx5_mkey_seg *mkc;
int inlen = sizeof(*in); int inlen = sizeof(*in);
u64 npages = u64 npages = MLX5E_REQUIRED_MTTS(priv->profile->max_nch(mdev),
priv->profile->max_nch(mdev) * MLX5_CHANNEL_MAX_NUM_MTTS; BIT(MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW));
int err; int err;
in = mlx5_vzalloc(inlen); in = mlx5_vzalloc(inlen);
@ -3246,10 +3229,12 @@ static int mlx5e_create_umr_mkey(struct mlx5e_priv *priv)
MLX5_PERM_LOCAL_WRITE | MLX5_PERM_LOCAL_WRITE |
MLX5_ACCESS_MODE_MTT; MLX5_ACCESS_MODE_MTT;
npages = min_t(u32, ALIGN(U16_MAX, 4) * 2, npages);
mkc->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); mkc->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
mkc->flags_pd = cpu_to_be32(mdev->mlx5e_res.pdn); mkc->flags_pd = cpu_to_be32(mdev->mlx5e_res.pdn);
mkc->len = cpu_to_be64(npages << PAGE_SHIFT); mkc->len = cpu_to_be64(npages << PAGE_SHIFT);
mkc->xlt_oct_size = cpu_to_be32(mlx5e_get_mtt_octw(npages)); mkc->xlt_oct_size = cpu_to_be32(MLX5_MTT_OCTW(npages));
mkc->log2_page_size = PAGE_SHIFT; mkc->log2_page_size = PAGE_SHIFT;
err = mlx5_core_create_mkey(mdev, &priv->umr_mkey, in, inlen, NULL, err = mlx5_core_create_mkey(mdev, &priv->umr_mkey, in, inlen, NULL,
@ -3385,6 +3370,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
queue_work(priv->wq, &priv->set_rx_mode_work); queue_work(priv->wq, &priv->set_rx_mode_work);
if (MLX5_CAP_GEN(mdev, vport_group_manager)) { if (MLX5_CAP_GEN(mdev, vport_group_manager)) {
mlx5_query_nic_vport_mac_address(mdev, 0, rep.hw_id);
rep.load = mlx5e_nic_rep_load; rep.load = mlx5e_nic_rep_load;
rep.unload = mlx5e_nic_rep_unload; rep.unload = mlx5e_nic_rep_unload;
rep.vport = 0; rep.vport = 0;
@ -3463,6 +3449,8 @@ void *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
mlx5e_init_l2_addr(priv); mlx5e_init_l2_addr(priv);
mlx5e_set_dev_port_mtu(netdev);
err = register_netdev(netdev); err = register_netdev(netdev);
if (err) { if (err) {
mlx5_core_err(mdev, "register_netdev failed, %d\n", err); mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
@ -3501,16 +3489,20 @@ static void mlx5e_register_vport_rep(struct mlx5_core_dev *mdev)
struct mlx5_eswitch *esw = mdev->priv.eswitch; struct mlx5_eswitch *esw = mdev->priv.eswitch;
int total_vfs = MLX5_TOTAL_VPORTS(mdev); int total_vfs = MLX5_TOTAL_VPORTS(mdev);
int vport; int vport;
u8 mac[ETH_ALEN];
if (!MLX5_CAP_GEN(mdev, vport_group_manager)) if (!MLX5_CAP_GEN(mdev, vport_group_manager))
return; return;
mlx5_query_nic_vport_mac_address(mdev, 0, mac);
for (vport = 1; vport < total_vfs; vport++) { for (vport = 1; vport < total_vfs; vport++) {
struct mlx5_eswitch_rep rep; struct mlx5_eswitch_rep rep;
rep.load = mlx5e_vport_rep_load; rep.load = mlx5e_vport_rep_load;
rep.unload = mlx5e_vport_rep_unload; rep.unload = mlx5e_vport_rep_unload;
rep.vport = vport; rep.vport = vport;
ether_addr_copy(rep.hw_id, mac);
mlx5_eswitch_register_vport_rep(esw, &rep); mlx5_eswitch_register_vport_rep(esw, &rep);
} }
} }

View File

@ -135,17 +135,16 @@ static const struct ethtool_ops mlx5e_rep_ethtool_ops = {
int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr) int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr)
{ {
struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_eswitch_rep *rep = priv->ppriv;
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
u8 mac[ETH_ALEN];
if (esw->mode == SRIOV_NONE) if (esw->mode == SRIOV_NONE)
return -EOPNOTSUPP; return -EOPNOTSUPP;
switch (attr->id) { switch (attr->id) {
case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
mlx5_query_nic_vport_mac_address(priv->mdev, 0, mac);
attr->u.ppid.id_len = ETH_ALEN; attr->u.ppid.id_len = ETH_ALEN;
memcpy(&attr->u.ppid.id, &mac, ETH_ALEN); ether_addr_copy(attr->u.ppid.id, rep->hw_id);
break; break;
default: default:
return -EOPNOTSUPP; return -EOPNOTSUPP;

View File

@ -324,9 +324,9 @@ mlx5e_copy_skb_header_fragmented_mpwqe(struct device *pdev,
} }
} }
static u16 mlx5e_get_wqe_mtt_offset(u16 rq_ix, u16 wqe_ix) static u32 mlx5e_get_wqe_mtt_offset(struct mlx5e_rq *rq, u16 wqe_ix)
{ {
return rq_ix * MLX5_CHANNEL_MAX_NUM_MTTS + return rq->mpwqe_mtt_offset +
wqe_ix * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8); wqe_ix * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8);
} }
@ -340,7 +340,7 @@ static void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
struct mlx5_wqe_data_seg *dseg = &wqe->data; struct mlx5_wqe_data_seg *dseg = &wqe->data;
struct mlx5e_mpw_info *wi = &rq->wqe_info[ix]; struct mlx5e_mpw_info *wi = &rq->wqe_info[ix];
u8 ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS); u8 ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS);
u16 umr_wqe_mtt_offset = mlx5e_get_wqe_mtt_offset(rq->ix, ix); u32 umr_wqe_mtt_offset = mlx5e_get_wqe_mtt_offset(rq, ix);
memset(wqe, 0, sizeof(*wqe)); memset(wqe, 0, sizeof(*wqe));
cseg->opmod_idx_opcode = cseg->opmod_idx_opcode =
@ -353,9 +353,9 @@ static void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN; ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN;
ucseg->klm_octowords = ucseg->klm_octowords =
cpu_to_be16(mlx5e_get_mtt_octw(MLX5_MPWRQ_PAGES_PER_WQE)); cpu_to_be16(MLX5_MTT_OCTW(MLX5_MPWRQ_PAGES_PER_WQE));
ucseg->bsf_octowords = ucseg->bsf_octowords =
cpu_to_be16(mlx5e_get_mtt_octw(umr_wqe_mtt_offset)); cpu_to_be16(MLX5_MTT_OCTW(umr_wqe_mtt_offset));
ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE); ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
dseg->lkey = sq->mkey_be; dseg->lkey = sq->mkey_be;
@ -423,7 +423,7 @@ static int mlx5e_alloc_rx_fragmented_mpwqe(struct mlx5e_rq *rq,
{ {
struct mlx5e_mpw_info *wi = &rq->wqe_info[ix]; struct mlx5e_mpw_info *wi = &rq->wqe_info[ix];
int mtt_sz = mlx5e_get_wqe_mtt_sz(); int mtt_sz = mlx5e_get_wqe_mtt_sz();
u32 dma_offset = mlx5e_get_wqe_mtt_offset(rq->ix, ix) << PAGE_SHIFT; u64 dma_offset = (u64)mlx5e_get_wqe_mtt_offset(rq, ix) << PAGE_SHIFT;
int i; int i;
wi->umr.dma_info = kmalloc(sizeof(*wi->umr.dma_info) * wi->umr.dma_info = kmalloc(sizeof(*wi->umr.dma_info) *
@ -506,6 +506,12 @@ void mlx5e_post_rx_fragmented_mpwqe(struct mlx5e_rq *rq)
struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head); struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head);
clear_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state); clear_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state);
if (unlikely(test_bit(MLX5E_RQ_STATE_FLUSH, &rq->state))) {
mlx5e_free_rx_fragmented_mpwqe(rq, &rq->wqe_info[wq->head]);
return;
}
mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index)); mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index));
rq->stats.mpwqe_frag++; rq->stats.mpwqe_frag++;
@ -595,26 +601,9 @@ void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
wi->free_wqe(rq, wi); wi->free_wqe(rq, wi);
} }
void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
{
struct mlx5_wq_ll *wq = &rq->wq;
struct mlx5e_rx_wqe *wqe;
__be16 wqe_ix_be;
u16 wqe_ix;
while (!mlx5_wq_ll_is_empty(wq)) {
wqe_ix_be = *wq->tail_next;
wqe_ix = be16_to_cpu(wqe_ix_be);
wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_ix);
rq->dealloc_wqe(rq, wqe_ix);
mlx5_wq_ll_pop(&rq->wq, wqe_ix_be,
&wqe->next.next_wqe_index);
}
}
#define RQ_CANNOT_POST(rq) \ #define RQ_CANNOT_POST(rq) \
(!test_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state) || \ (test_bit(MLX5E_RQ_STATE_FLUSH, &rq->state) || \
test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state)) test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state))
bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq) bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
{ {
@ -916,7 +905,7 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq); struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
int work_done = 0; int work_done = 0;
if (unlikely(test_bit(MLX5E_RQ_STATE_FLUSH_TIMEOUT, &rq->state))) if (unlikely(test_bit(MLX5E_RQ_STATE_FLUSH, &rq->state)))
return 0; return 0;
if (cq->decmprs_left) if (cq->decmprs_left)

View File

@ -70,6 +70,7 @@ struct mlx5e_sw_stats {
u64 tx_queue_stopped; u64 tx_queue_stopped;
u64 tx_queue_wake; u64 tx_queue_wake;
u64 tx_queue_dropped; u64 tx_queue_dropped;
u64 tx_xmit_more;
u64 rx_wqe_err; u64 rx_wqe_err;
u64 rx_mpwqe_filler; u64 rx_mpwqe_filler;
u64 rx_mpwqe_frag; u64 rx_mpwqe_frag;
@ -101,6 +102,7 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_frag) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_frag) },
@ -298,6 +300,7 @@ struct mlx5e_sq_stats {
/* commonly accessed in data path */ /* commonly accessed in data path */
u64 packets; u64 packets;
u64 bytes; u64 bytes;
u64 xmit_more;
u64 tso_packets; u64 tso_packets;
u64 tso_bytes; u64 tso_bytes;
u64 tso_inner_packets; u64 tso_inner_packets;
@ -324,6 +327,7 @@ static const struct counter_desc sq_stats_desc[] = {
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
}; };
#define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc) #define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc)

View File

@ -170,7 +170,7 @@ static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) { if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
struct flow_dissector_key_control *key = struct flow_dissector_key_control *key =
skb_flow_dissector_target(f->dissector, skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_BASIC, FLOW_DISSECTOR_KEY_CONTROL,
f->key); f->key);
addr_type = key->addr_type; addr_type = key->addr_type;
} }

View File

@ -375,6 +375,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
sq->stats.packets++; sq->stats.packets++;
sq->stats.bytes += num_bytes; sq->stats.bytes += num_bytes;
sq->stats.xmit_more += skb->xmit_more;
return NETDEV_TX_OK; return NETDEV_TX_OK;
dma_unmap_wqe_err: dma_unmap_wqe_err:
@ -394,35 +395,6 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
return mlx5e_sq_xmit(sq, skb); return mlx5e_sq_xmit(sq, skb);
} }
void mlx5e_free_tx_descs(struct mlx5e_sq *sq)
{
struct mlx5e_tx_wqe_info *wi;
struct sk_buff *skb;
u16 ci;
int i;
while (sq->cc != sq->pc) {
ci = sq->cc & sq->wq.sz_m1;
skb = sq->skb[ci];
wi = &sq->wqe_info[ci];
if (!skb) { /* nop */
sq->cc++;
continue;
}
for (i = 0; i < wi->num_dma; i++) {
struct mlx5e_sq_dma *dma =
mlx5e_dma_get(sq, sq->dma_fifo_cc++);
mlx5e_tx_dma_unmap(sq->pdev, dma);
}
dev_kfree_skb_any(skb);
sq->cc += wi->num_wqebbs;
}
}
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
{ {
struct mlx5e_sq *sq; struct mlx5e_sq *sq;
@ -434,7 +406,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
sq = container_of(cq, struct mlx5e_sq, cq); sq = container_of(cq, struct mlx5e_sq, cq);
if (unlikely(test_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state))) if (unlikely(test_bit(MLX5E_SQ_STATE_FLUSH, &sq->state)))
return false; return false;
npkts = 0; npkts = 0;
@ -512,11 +484,39 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
netdev_tx_completed_queue(sq->txq, npkts, nbytes); netdev_tx_completed_queue(sq->txq, npkts, nbytes);
if (netif_tx_queue_stopped(sq->txq) && if (netif_tx_queue_stopped(sq->txq) &&
mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM) && mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM)) {
likely(test_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state))) { netif_tx_wake_queue(sq->txq);
netif_tx_wake_queue(sq->txq); sq->stats.wake++;
sq->stats.wake++;
} }
return (i == MLX5E_TX_CQ_POLL_BUDGET); return (i == MLX5E_TX_CQ_POLL_BUDGET);
} }
void mlx5e_free_tx_descs(struct mlx5e_sq *sq)
{
struct mlx5e_tx_wqe_info *wi;
struct sk_buff *skb;
u16 ci;
int i;
while (sq->cc != sq->pc) {
ci = sq->cc & sq->wq.sz_m1;
skb = sq->skb[ci];
wi = &sq->wqe_info[ci];
if (!skb) { /* nop */
sq->cc++;
continue;
}
for (i = 0; i < wi->num_dma; i++) {
struct mlx5e_sq_dma *dma =
mlx5e_dma_get(sq, sq->dma_fifo_cc++);
mlx5e_tx_dma_unmap(sq->pdev, dma);
}
dev_kfree_skb_any(skb);
sq->cc += wi->num_wqebbs;
}
}

View File

@ -51,16 +51,18 @@ struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq)
static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq) static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
{ {
struct mlx5e_sq *sq = container_of(cq, struct mlx5e_sq, cq);
struct mlx5_wq_cyc *wq; struct mlx5_wq_cyc *wq;
struct mlx5_cqe64 *cqe; struct mlx5_cqe64 *cqe;
struct mlx5e_sq *sq;
u16 sqcc; u16 sqcc;
if (unlikely(test_bit(MLX5E_SQ_STATE_FLUSH, &sq->state)))
return;
cqe = mlx5e_get_cqe(cq); cqe = mlx5e_get_cqe(cq);
if (likely(!cqe)) if (likely(!cqe))
return; return;
sq = container_of(cq, struct mlx5e_sq, cq);
wq = &sq->wq; wq = &sq->wq;
/* sq->cc must be updated only after mlx5_cqwq_update_db_record(), /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),

View File

@ -1451,7 +1451,8 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num); esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num);
if (vport_num) { /* Only VFs need ACLs for VST and spoofchk filtering */ /* Only VFs need ACLs for VST and spoofchk filtering */
if (vport_num && esw->mode == SRIOV_LEGACY) {
esw_vport_ingress_config(esw, vport); esw_vport_ingress_config(esw, vport);
esw_vport_egress_config(esw, vport); esw_vport_egress_config(esw, vport);
} }
@ -1502,7 +1503,7 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
*/ */
esw_vport_change_handle_locked(vport); esw_vport_change_handle_locked(vport);
vport->enabled_events = 0; vport->enabled_events = 0;
if (vport_num) { if (vport_num && esw->mode == SRIOV_LEGACY) {
esw_vport_disable_egress_acl(esw, vport); esw_vport_disable_egress_acl(esw, vport);
esw_vport_disable_ingress_acl(esw, vport); esw_vport_disable_ingress_acl(esw, vport);
} }
@ -1767,7 +1768,7 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
vport, err); vport, err);
mutex_lock(&esw->state_lock); mutex_lock(&esw->state_lock);
if (evport->enabled) if (evport->enabled && esw->mode == SRIOV_LEGACY)
err = esw_vport_ingress_config(esw, evport); err = esw_vport_ingress_config(esw, evport);
mutex_unlock(&esw->state_lock); mutex_unlock(&esw->state_lock);
return err; return err;
@ -1839,7 +1840,7 @@ int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
mutex_lock(&esw->state_lock); mutex_lock(&esw->state_lock);
evport->vlan = vlan; evport->vlan = vlan;
evport->qos = qos; evport->qos = qos;
if (evport->enabled) { if (evport->enabled && esw->mode == SRIOV_LEGACY) {
err = esw_vport_ingress_config(esw, evport); err = esw_vport_ingress_config(esw, evport);
if (err) if (err)
goto out; goto out;
@ -1868,10 +1869,11 @@ int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
mutex_lock(&esw->state_lock); mutex_lock(&esw->state_lock);
pschk = evport->spoofchk; pschk = evport->spoofchk;
evport->spoofchk = spoofchk; evport->spoofchk = spoofchk;
if (evport->enabled) if (evport->enabled && esw->mode == SRIOV_LEGACY) {
err = esw_vport_ingress_config(esw, evport); err = esw_vport_ingress_config(esw, evport);
if (err) if (err)
evport->spoofchk = pschk; evport->spoofchk = pschk;
}
mutex_unlock(&esw->state_lock); mutex_unlock(&esw->state_lock);
return err; return err;

View File

@ -174,6 +174,7 @@ struct mlx5_eswitch_rep {
void *priv_data; void *priv_data;
struct list_head vport_sqs_list; struct list_head vport_sqs_list;
bool valid; bool valid;
u8 hw_id[ETH_ALEN];
}; };
struct mlx5_esw_offload { struct mlx5_esw_offload {

View File

@ -113,7 +113,7 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest.vport_num = vport; dest.vport_num = vport;
flow_rule = mlx5_add_flow_rule(esw->fdb_table.fdb, spec, flow_rule = mlx5_add_flow_rule(esw->fdb_table.offloads.fdb, spec,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
0, &dest); 0, &dest);
if (IS_ERR(flow_rule)) if (IS_ERR(flow_rule))
@ -535,7 +535,7 @@ void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports)
esw_destroy_offloads_fdb_table(esw); esw_destroy_offloads_fdb_table(esw);
} }
static int mlx5_esw_mode_from_devlink(u16 mode, u16 *mlx5_mode) static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
{ {
switch (mode) { switch (mode) {
case DEVLINK_ESWITCH_MODE_LEGACY: case DEVLINK_ESWITCH_MODE_LEGACY:
@ -551,6 +551,22 @@ static int mlx5_esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
return 0; return 0;
} }
static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
{
switch (mlx5_mode) {
case SRIOV_LEGACY:
*mode = DEVLINK_ESWITCH_MODE_LEGACY;
break;
case SRIOV_OFFLOADS:
*mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
break;
default:
return -EINVAL;
}
return 0;
}
int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode) int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
{ {
struct mlx5_core_dev *dev; struct mlx5_core_dev *dev;
@ -566,7 +582,7 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
if (cur_mlx5_mode == SRIOV_NONE) if (cur_mlx5_mode == SRIOV_NONE)
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (mlx5_esw_mode_from_devlink(mode, &mlx5_mode)) if (esw_mode_from_devlink(mode, &mlx5_mode))
return -EINVAL; return -EINVAL;
if (cur_mlx5_mode == mlx5_mode) if (cur_mlx5_mode == mlx5_mode)
@ -592,9 +608,7 @@ int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
if (dev->priv.eswitch->mode == SRIOV_NONE) if (dev->priv.eswitch->mode == SRIOV_NONE)
return -EOPNOTSUPP; return -EOPNOTSUPP;
*mode = dev->priv.eswitch->mode; return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
return 0;
} }
void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw, void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,

View File

@ -80,7 +80,7 @@
LEFTOVERS_NUM_PRIOS) LEFTOVERS_NUM_PRIOS)
#define ETHTOOL_PRIO_NUM_LEVELS 1 #define ETHTOOL_PRIO_NUM_LEVELS 1
#define ETHTOOL_NUM_PRIOS 10 #define ETHTOOL_NUM_PRIOS 11
#define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS) #define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
/* Vlan, mac, ttc, aRFS */ /* Vlan, mac, ttc, aRFS */
#define KERNEL_NIC_PRIO_NUM_LEVELS 4 #define KERNEL_NIC_PRIO_NUM_LEVELS 4

View File

@ -126,12 +126,21 @@ static struct rb_node *mlx5_fc_stats_query(struct mlx5_core_dev *dev,
for (node = &first->node; node; node = rb_next(node)) { for (node = &first->node; node; node = rb_next(node)) {
struct mlx5_fc *counter = rb_entry(node, struct mlx5_fc, node); struct mlx5_fc *counter = rb_entry(node, struct mlx5_fc, node);
struct mlx5_fc_cache *c = &counter->cache; struct mlx5_fc_cache *c = &counter->cache;
u64 packets;
u64 bytes;
if (counter->id > last_id) if (counter->id > last_id)
break; break;
mlx5_cmd_fc_bulk_get(dev, b, mlx5_cmd_fc_bulk_get(dev, b,
counter->id, &c->packets, &c->bytes); counter->id, &packets, &bytes);
if (c->packets == packets)
continue;
c->packets = packets;
c->bytes = bytes;
c->lastuse = jiffies;
} }
out: out:

View File

@ -1420,36 +1420,12 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
dev_info(&pdev->dev, "%s was called\n", __func__); dev_info(&pdev->dev, "%s was called\n", __func__);
mlx5_enter_error_state(dev); mlx5_enter_error_state(dev);
mlx5_unload_one(dev, priv); mlx5_unload_one(dev, priv);
pci_save_state(pdev);
mlx5_pci_disable_device(dev); mlx5_pci_disable_device(dev);
return state == pci_channel_io_perm_failure ? return state == pci_channel_io_perm_failure ?
PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET; PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
} }
static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
int err = 0;
dev_info(&pdev->dev, "%s was called\n", __func__);
err = mlx5_pci_enable_device(dev);
if (err) {
dev_err(&pdev->dev, "%s: mlx5_pci_enable_device failed with error code: %d\n"
, __func__, err);
return PCI_ERS_RESULT_DISCONNECT;
}
pci_set_master(pdev);
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
return err ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
}
void mlx5_disable_device(struct mlx5_core_dev *dev)
{
mlx5_pci_err_detected(dev->pdev, 0);
}
/* wait for the device to show vital signs by waiting /* wait for the device to show vital signs by waiting
* for the health counter to start counting. * for the health counter to start counting.
*/ */
@ -1477,6 +1453,36 @@ static int wait_vital(struct pci_dev *pdev)
return -ETIMEDOUT; return -ETIMEDOUT;
} }
static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
int err;
dev_info(&pdev->dev, "%s was called\n", __func__);
err = mlx5_pci_enable_device(dev);
if (err) {
dev_err(&pdev->dev, "%s: mlx5_pci_enable_device failed with error code: %d\n"
, __func__, err);
return PCI_ERS_RESULT_DISCONNECT;
}
pci_set_master(pdev);
pci_restore_state(pdev);
if (wait_vital(pdev)) {
dev_err(&pdev->dev, "%s: wait_vital timed out\n", __func__);
return PCI_ERS_RESULT_DISCONNECT;
}
return PCI_ERS_RESULT_RECOVERED;
}
void mlx5_disable_device(struct mlx5_core_dev *dev)
{
mlx5_pci_err_detected(dev->pdev, 0);
}
static void mlx5_pci_resume(struct pci_dev *pdev) static void mlx5_pci_resume(struct pci_dev *pdev)
{ {
struct mlx5_core_dev *dev = pci_get_drvdata(pdev); struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
@ -1485,13 +1491,6 @@ static void mlx5_pci_resume(struct pci_dev *pdev)
dev_info(&pdev->dev, "%s was called\n", __func__); dev_info(&pdev->dev, "%s was called\n", __func__);
pci_save_state(pdev);
err = wait_vital(pdev);
if (err) {
dev_err(&pdev->dev, "%s: wait_vital timed out\n", __func__);
return;
}
err = mlx5_load_one(dev, priv); err = mlx5_load_one(dev, priv);
if (err) if (err)
dev_err(&pdev->dev, "%s: mlx5_load_one failed with error code: %d\n" dev_err(&pdev->dev, "%s: mlx5_load_one failed with error code: %d\n"

View File

@ -56,6 +56,7 @@
#define MLXSW_PORT_PHY_BITS_MASK (MLXSW_PORT_MAX_PHY_PORTS - 1) #define MLXSW_PORT_PHY_BITS_MASK (MLXSW_PORT_MAX_PHY_PORTS - 1)
#define MLXSW_PORT_CPU_PORT 0x0 #define MLXSW_PORT_CPU_PORT 0x0
#define MLXSW_PORT_ROUTER_PORT (MLXSW_PORT_MAX_PHY_PORTS + 2)
#define MLXSW_PORT_DONT_CARE (MLXSW_PORT_MAX_PORTS) #define MLXSW_PORT_DONT_CARE (MLXSW_PORT_MAX_PORTS)

View File

@ -3324,6 +3324,39 @@ static struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp,
return mlxsw_sp_fid_find(mlxsw_sp, fid); return mlxsw_sp_fid_find(mlxsw_sp, fid);
} }
static enum mlxsw_flood_table_type mlxsw_sp_flood_table_type_get(u16 fid)
{
return mlxsw_sp_fid_is_vfid(fid) ? MLXSW_REG_SFGC_TABLE_TYPE_FID :
MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
}
static u16 mlxsw_sp_flood_table_index_get(u16 fid)
{
return mlxsw_sp_fid_is_vfid(fid) ? mlxsw_sp_fid_to_vfid(fid) : fid;
}
static int mlxsw_sp_router_port_flood_set(struct mlxsw_sp *mlxsw_sp, u16 fid,
bool set)
{
enum mlxsw_flood_table_type table_type;
char *sftr_pl;
u16 index;
int err;
sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
if (!sftr_pl)
return -ENOMEM;
table_type = mlxsw_sp_flood_table_type_get(fid);
index = mlxsw_sp_flood_table_index_get(fid);
mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, index, table_type,
1, MLXSW_PORT_ROUTER_PORT, set);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
kfree(sftr_pl);
return err;
}
static enum mlxsw_reg_ritr_if_type mlxsw_sp_rif_type_get(u16 fid) static enum mlxsw_reg_ritr_if_type mlxsw_sp_rif_type_get(u16 fid)
{ {
if (mlxsw_sp_fid_is_vfid(fid)) if (mlxsw_sp_fid_is_vfid(fid))
@ -3360,10 +3393,14 @@ static int mlxsw_sp_rif_bridge_create(struct mlxsw_sp *mlxsw_sp,
if (rif == MLXSW_SP_RIF_MAX) if (rif == MLXSW_SP_RIF_MAX)
return -ERANGE; return -ERANGE;
err = mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, true); err = mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, true);
if (err) if (err)
return err; return err;
err = mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, true);
if (err)
goto err_rif_bridge_op;
err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, true); err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, true);
if (err) if (err)
goto err_rif_fdb_op; goto err_rif_fdb_op;
@ -3385,6 +3422,8 @@ err_rif_alloc:
mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false); mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
err_rif_fdb_op: err_rif_fdb_op:
mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false); mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false);
err_rif_bridge_op:
mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false);
return err; return err;
} }
@ -3404,6 +3443,8 @@ void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false); mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false);
mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false);
netdev_dbg(l3_dev, "RIF=%d destroyed\n", rif); netdev_dbg(l3_dev, "RIF=%d destroyed\n", rif);
} }

View File

@ -717,22 +717,18 @@ int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port,
u8 local_port = mlxsw_sp_port->local_port; u8 local_port = mlxsw_sp_port->local_port;
u8 pg_buff = tc_index; u8 pg_buff = tc_index;
enum mlxsw_reg_sbxx_dir dir = pool_type; enum mlxsw_reg_sbxx_dir dir = pool_type;
u8 pool = pool_index; u8 pool = pool_get(pool_index);
u32 max_buff; u32 max_buff;
int err; int err;
if (dir != dir_get(pool_index))
return -EINVAL;
err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool, dir, err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool, dir,
threshold, &max_buff); threshold, &max_buff);
if (err) if (err)
return err; return err;
if (pool_type == DEVLINK_SB_POOL_TYPE_EGRESS) {
if (pool < MLXSW_SP_SB_POOL_COUNT)
return -EINVAL;
pool -= MLXSW_SP_SB_POOL_COUNT;
} else if (pool >= MLXSW_SP_SB_POOL_COUNT) {
return -EINVAL;
}
return mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, pg_buff, dir, return mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, pg_buff, dir,
0, max_buff, pool); 0, max_buff, pool);
} }

View File

@ -657,7 +657,7 @@ int mlxsw_sp_router_neigh_construct(struct net_device *dev,
return 0; return 0;
} }
r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
if (WARN_ON(!r)) if (WARN_ON(!r))
return -EINVAL; return -EINVAL;

View File

@ -475,14 +475,6 @@ static void __lpc_get_mac(struct netdata_local *pldat, u8 *mac)
mac[5] = tmp >> 8; mac[5] = tmp >> 8;
} }
static void __lpc_eth_clock_enable(struct netdata_local *pldat, bool enable)
{
if (enable)
clk_prepare_enable(pldat->clk);
else
clk_disable_unprepare(pldat->clk);
}
static void __lpc_params_setup(struct netdata_local *pldat) static void __lpc_params_setup(struct netdata_local *pldat)
{ {
u32 tmp; u32 tmp;
@ -1056,7 +1048,7 @@ static int lpc_eth_close(struct net_device *ndev)
writel(0, LPC_ENET_MAC2(pldat->net_base)); writel(0, LPC_ENET_MAC2(pldat->net_base));
spin_unlock_irqrestore(&pldat->lock, flags); spin_unlock_irqrestore(&pldat->lock, flags);
__lpc_eth_clock_enable(pldat, false); clk_disable_unprepare(pldat->clk);
return 0; return 0;
} }
@ -1197,11 +1189,14 @@ static int lpc_eth_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
static int lpc_eth_open(struct net_device *ndev) static int lpc_eth_open(struct net_device *ndev)
{ {
struct netdata_local *pldat = netdev_priv(ndev); struct netdata_local *pldat = netdev_priv(ndev);
int ret;
if (netif_msg_ifup(pldat)) if (netif_msg_ifup(pldat))
dev_dbg(&pldat->pdev->dev, "enabling %s\n", ndev->name); dev_dbg(&pldat->pdev->dev, "enabling %s\n", ndev->name);
__lpc_eth_clock_enable(pldat, true); ret = clk_prepare_enable(pldat->clk);
if (ret)
return ret;
/* Suspended PHY makes LPC ethernet core block, so resume now */ /* Suspended PHY makes LPC ethernet core block, so resume now */
phy_resume(ndev->phydev); phy_resume(ndev->phydev);
@ -1320,7 +1315,9 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
} }
/* Enable network clock */ /* Enable network clock */
__lpc_eth_clock_enable(pldat, true); ret = clk_prepare_enable(pldat->clk);
if (ret)
goto err_out_clk_put;
/* Map IO space */ /* Map IO space */
pldat->net_base = ioremap(res->start, resource_size(res)); pldat->net_base = ioremap(res->start, resource_size(res));
@ -1454,6 +1451,7 @@ err_out_iounmap:
iounmap(pldat->net_base); iounmap(pldat->net_base);
err_out_disable_clocks: err_out_disable_clocks:
clk_disable_unprepare(pldat->clk); clk_disable_unprepare(pldat->clk);
err_out_clk_put:
clk_put(pldat->clk); clk_put(pldat->clk);
err_out_free_dev: err_out_free_dev:
free_netdev(ndev); free_netdev(ndev);

View File

@ -561,9 +561,18 @@ struct qed_dev {
static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev, static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev,
u32 concrete_fid) u32 concrete_fid)
{ {
u8 vfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID);
u8 pfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID); u8 pfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID);
u8 vf_valid = GET_FIELD(concrete_fid,
PXP_CONCRETE_FID_VFVALID);
u8 sw_fid;
return pfid; if (vf_valid)
sw_fid = vfid + MAX_NUM_PFS;
else
sw_fid = pfid;
return sw_fid;
} }
#define PURE_LB_TC 8 #define PURE_LB_TC 8

View File

@ -722,11 +722,14 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
txq->tx_db.data.bd_prod = txq->tx_db.data.bd_prod =
cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl)); cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl));
if (!skb->xmit_more || netif_tx_queue_stopped(netdev_txq)) if (!skb->xmit_more || netif_xmit_stopped(netdev_txq))
qede_update_tx_producer(txq); qede_update_tx_producer(txq);
if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl) if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl)
< (MAX_SKB_FRAGS + 1))) { < (MAX_SKB_FRAGS + 1))) {
if (skb->xmit_more)
qede_update_tx_producer(txq);
netif_tx_stop_queue(netdev_txq); netif_tx_stop_queue(netdev_txq);
DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED, DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
"Stop queue was called\n"); "Stop queue was called\n");

View File

@ -467,8 +467,8 @@ static int cp_rx_poll(struct napi_struct *napi, int budget)
unsigned int rx_tail = cp->rx_tail; unsigned int rx_tail = cp->rx_tail;
int rx; int rx;
rx_status_loop:
rx = 0; rx = 0;
rx_status_loop:
cpw16(IntrStatus, cp_rx_intr_mask); cpw16(IntrStatus, cp_rx_intr_mask);
while (rx < budget) { while (rx < budget) {

View File

@ -1517,13 +1517,14 @@ static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask)
} }
#if BITS_PER_LONG == 64 #if BITS_PER_LONG == 64
BUILD_BUG_ON(BITS_TO_LONGS(EF10_STAT_COUNT) != 2);
mask[0] = raw_mask[0]; mask[0] = raw_mask[0];
mask[1] = raw_mask[1]; mask[1] = raw_mask[1];
#else #else
BUILD_BUG_ON(BITS_TO_LONGS(EF10_STAT_COUNT) != 3);
mask[0] = raw_mask[0] & 0xffffffff; mask[0] = raw_mask[0] & 0xffffffff;
mask[1] = raw_mask[0] >> 32; mask[1] = raw_mask[0] >> 32;
mask[2] = raw_mask[1] & 0xffffffff; mask[2] = raw_mask[1] & 0xffffffff;
mask[3] = raw_mask[1] >> 32;
#endif #endif
} }

View File

@ -2275,6 +2275,13 @@ static int smc_drv_probe(struct platform_device *pdev)
if (pd) { if (pd) {
memcpy(&lp->cfg, pd, sizeof(lp->cfg)); memcpy(&lp->cfg, pd, sizeof(lp->cfg));
lp->io_shift = SMC91X_IO_SHIFT(lp->cfg.flags); lp->io_shift = SMC91X_IO_SHIFT(lp->cfg.flags);
if (!SMC_8BIT(lp) && !SMC_16BIT(lp)) {
dev_err(&pdev->dev,
"at least one of 8-bit or 16-bit access support is required.\n");
ret = -ENXIO;
goto out_free_netdev;
}
} }
#if IS_BUILTIN(CONFIG_OF) #if IS_BUILTIN(CONFIG_OF)

View File

@ -36,6 +36,27 @@
#include <linux/dmaengine.h> #include <linux/dmaengine.h>
#include <linux/smc91x.h> #include <linux/smc91x.h>
/*
* Any 16-bit access is performed with two 8-bit accesses if the hardware
* can't do it directly. Most registers are 16-bit so those are mandatory.
*/
#define SMC_outw_b(x, a, r) \
do { \
unsigned int __val16 = (x); \
unsigned int __reg = (r); \
SMC_outb(__val16, a, __reg); \
SMC_outb(__val16 >> 8, a, __reg + (1 << SMC_IO_SHIFT)); \
} while (0)
#define SMC_inw_b(a, r) \
({ \
unsigned int __val16; \
unsigned int __reg = r; \
__val16 = SMC_inb(a, __reg); \
__val16 |= SMC_inb(a, __reg + (1 << SMC_IO_SHIFT)) << 8; \
__val16; \
})
/* /*
* Define your architecture specific bus configuration parameters here. * Define your architecture specific bus configuration parameters here.
*/ */
@ -55,10 +76,30 @@
#define SMC_IO_SHIFT (lp->io_shift) #define SMC_IO_SHIFT (lp->io_shift)
#define SMC_inb(a, r) readb((a) + (r)) #define SMC_inb(a, r) readb((a) + (r))
#define SMC_inw(a, r) readw((a) + (r)) #define SMC_inw(a, r) \
({ \
unsigned int __smc_r = r; \
SMC_16BIT(lp) ? readw((a) + __smc_r) : \
SMC_8BIT(lp) ? SMC_inw_b(a, __smc_r) : \
({ BUG(); 0; }); \
})
#define SMC_inl(a, r) readl((a) + (r)) #define SMC_inl(a, r) readl((a) + (r))
#define SMC_outb(v, a, r) writeb(v, (a) + (r)) #define SMC_outb(v, a, r) writeb(v, (a) + (r))
#define SMC_outw(v, a, r) \
do { \
unsigned int __v = v, __smc_r = r; \
if (SMC_16BIT(lp)) \
__SMC_outw(__v, a, __smc_r); \
else if (SMC_8BIT(lp)) \
SMC_outw_b(__v, a, __smc_r); \
else \
BUG(); \
} while (0)
#define SMC_outl(v, a, r) writel(v, (a) + (r)) #define SMC_outl(v, a, r) writel(v, (a) + (r))
#define SMC_insb(a, r, p, l) readsb((a) + (r), p, l)
#define SMC_outsb(a, r, p, l) writesb((a) + (r), p, l)
#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) #define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) #define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l) #define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
@ -66,7 +107,7 @@
#define SMC_IRQ_FLAGS (-1) /* from resource */ #define SMC_IRQ_FLAGS (-1) /* from resource */
/* We actually can't write halfwords properly if not word aligned */ /* We actually can't write halfwords properly if not word aligned */
static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg) static inline void __SMC_outw(u16 val, void __iomem *ioaddr, int reg)
{ {
if ((machine_is_mainstone() || machine_is_stargate2() || if ((machine_is_mainstone() || machine_is_stargate2() ||
machine_is_pxa_idp()) && reg & 2) { machine_is_pxa_idp()) && reg & 2) {
@ -416,24 +457,8 @@ smc_pxa_dma_insw(void __iomem *ioaddr, struct smc_local *lp, int reg, int dma,
#if ! SMC_CAN_USE_16BIT #if ! SMC_CAN_USE_16BIT
/* #define SMC_outw(x, ioaddr, reg) SMC_outw_b(x, ioaddr, reg)
* Any 16-bit access is performed with two 8-bit accesses if the hardware #define SMC_inw(ioaddr, reg) SMC_inw_b(ioaddr, reg)
* can't do it directly. Most registers are 16-bit so those are mandatory.
*/
#define SMC_outw(x, ioaddr, reg) \
do { \
unsigned int __val16 = (x); \
SMC_outb( __val16, ioaddr, reg ); \
SMC_outb( __val16 >> 8, ioaddr, reg + (1 << SMC_IO_SHIFT));\
} while (0)
#define SMC_inw(ioaddr, reg) \
({ \
unsigned int __val16; \
__val16 = SMC_inb( ioaddr, reg ); \
__val16 |= SMC_inb( ioaddr, reg + (1 << SMC_IO_SHIFT)) << 8; \
__val16; \
})
#define SMC_insw(a, r, p, l) BUG() #define SMC_insw(a, r, p, l) BUG()
#define SMC_outsw(a, r, p, l) BUG() #define SMC_outsw(a, r, p, l) BUG()

View File

@ -1622,13 +1622,7 @@ static void dwceqos_init_hw(struct net_local *lp)
DWCEQOS_MMC_CTRL_RSTONRD); DWCEQOS_MMC_CTRL_RSTONRD);
dwceqos_enable_mmc_interrupt(lp); dwceqos_enable_mmc_interrupt(lp);
/* Enable Interrupts */ dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, 0);
dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE,
DWCEQOS_DMA_CH0_IE_NIE |
DWCEQOS_DMA_CH0_IE_RIE | DWCEQOS_DMA_CH0_IE_TIE |
DWCEQOS_DMA_CH0_IE_AIE |
DWCEQOS_DMA_CH0_IE_FBEE);
dwceqos_write(lp, REG_DWCEQOS_MAC_IE, 0); dwceqos_write(lp, REG_DWCEQOS_MAC_IE, 0);
dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, DWCEQOS_MAC_CFG_IPC | dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, DWCEQOS_MAC_CFG_IPC |
@ -1905,6 +1899,15 @@ static int dwceqos_open(struct net_device *ndev)
netif_start_queue(ndev); netif_start_queue(ndev);
tasklet_enable(&lp->tx_bdreclaim_tasklet); tasklet_enable(&lp->tx_bdreclaim_tasklet);
/* Enable Interrupts -- do this only after we enable NAPI and the
* tasklet.
*/
dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE,
DWCEQOS_DMA_CH0_IE_NIE |
DWCEQOS_DMA_CH0_IE_RIE | DWCEQOS_DMA_CH0_IE_TIE |
DWCEQOS_DMA_CH0_IE_AIE |
DWCEQOS_DMA_CH0_IE_FBEE);
return 0; return 0;
} }

View File

@ -1987,7 +1987,7 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if ((readl(nic->regs + FPGA_VER) & 0xFFF) >= 378) { if ((readl(nic->regs + FPGA_VER) & 0xFFF) >= 378) {
err = pci_enable_msi(pdev); err = pci_enable_msi(pdev);
if (err) if (err)
pr_err("Can't eneble msi. error is %d\n", err); pr_err("Can't enable msi. error is %d\n", err);
else else
nic->irq_type = IRQ_MSI; nic->irq_type = IRQ_MSI;
} else } else

View File

@ -1131,11 +1131,13 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
lp->rx_ping_pong = get_bool(ofdev, "xlnx,rx-ping-pong"); lp->rx_ping_pong = get_bool(ofdev, "xlnx,rx-ping-pong");
mac_address = of_get_mac_address(ofdev->dev.of_node); mac_address = of_get_mac_address(ofdev->dev.of_node);
if (mac_address) if (mac_address) {
/* Set the MAC address. */ /* Set the MAC address. */
memcpy(ndev->dev_addr, mac_address, ETH_ALEN); memcpy(ndev->dev_addr, mac_address, ETH_ALEN);
else } else {
dev_warn(dev, "No MAC address found\n"); dev_warn(dev, "No MAC address found, using random\n");
eth_hw_addr_random(ndev);
}
/* Clear the Tx CSR's in case this is a restart */ /* Clear the Tx CSR's in case this is a restart */
__raw_writel(0, lp->base_addr + XEL_TSR_OFFSET); __raw_writel(0, lp->base_addr + XEL_TSR_OFFSET);

View File

@ -964,7 +964,7 @@ static struct phy_driver ksphy_driver[] = {
.get_strings = kszphy_get_strings, .get_strings = kszphy_get_strings,
.get_stats = kszphy_get_stats, .get_stats = kszphy_get_stats,
.suspend = genphy_suspend, .suspend = genphy_suspend,
.resume = genphy_resume, .resume = kszphy_resume,
}, { }, {
.phy_id = PHY_ID_KSZ8873MLL, .phy_id = PHY_ID_KSZ8873MLL,
.phy_id_mask = MICREL_PHY_ID_MASK, .phy_id_mask = MICREL_PHY_ID_MASK,

View File

@ -722,8 +722,10 @@ phy_err:
int phy_start_interrupts(struct phy_device *phydev) int phy_start_interrupts(struct phy_device *phydev)
{ {
atomic_set(&phydev->irq_disable, 0); atomic_set(&phydev->irq_disable, 0);
if (request_irq(phydev->irq, phy_interrupt, 0, "phy_interrupt", if (request_irq(phydev->irq, phy_interrupt,
phydev) < 0) { IRQF_SHARED,
"phy_interrupt",
phydev) < 0) {
pr_warn("%s: Can't get IRQ %d (PHY)\n", pr_warn("%s: Can't get IRQ %d (PHY)\n",
phydev->mdio.bus->name, phydev->irq); phydev->mdio.bus->name, phydev->irq);
phydev->irq = PHY_POLL; phydev->irq = PHY_POLL;

View File

@ -14,9 +14,23 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/netdevice.h> #include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/filter.h> #include <linux/filter.h>
#include <linux/if_team.h> #include <linux/if_team.h>
static rx_handler_result_t lb_receive(struct team *team, struct team_port *port,
struct sk_buff *skb)
{
if (unlikely(skb->protocol == htons(ETH_P_SLOW))) {
/* LACPDU packets should go to exact delivery */
const unsigned char *dest = eth_hdr(skb)->h_dest;
if (is_link_local_ether_addr(dest) && dest[5] == 0x02)
return RX_HANDLER_EXACT;
}
return RX_HANDLER_ANOTHER;
}
struct lb_priv; struct lb_priv;
typedef struct team_port *lb_select_tx_port_func_t(struct team *, typedef struct team_port *lb_select_tx_port_func_t(struct team *,
@ -652,6 +666,7 @@ static const struct team_mode_ops lb_mode_ops = {
.port_enter = lb_port_enter, .port_enter = lb_port_enter,
.port_leave = lb_port_leave, .port_leave = lb_port_leave,
.port_disabled = lb_port_disabled, .port_disabled = lb_port_disabled,
.receive = lb_receive,
.transmit = lb_transmit, .transmit = lb_transmit,
}; };

View File

@ -894,11 +894,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC))) if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
goto drop; goto drop;
if (skb->sk && sk_fullsock(skb->sk)) { skb_tx_timestamp(skb);
sock_tx_timestamp(skb->sk, skb->sk->sk_tsflags,
&skb_shinfo(skb)->tx_flags);
sw_tx_timestamp(skb);
}
/* Orphan the skb - required as we might hang on to it /* Orphan the skb - required as we might hang on to it
* for indefinite time. * for indefinite time.

View File

@ -1009,6 +1009,7 @@ static int kaweth_probe(
struct net_device *netdev; struct net_device *netdev;
const eth_addr_t bcast_addr = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; const eth_addr_t bcast_addr = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
int result = 0; int result = 0;
int rv = -EIO;
dev_dbg(dev, dev_dbg(dev,
"Kawasaki Device Probe (Device number:%d): 0x%4.4x:0x%4.4x:0x%4.4x\n", "Kawasaki Device Probe (Device number:%d): 0x%4.4x:0x%4.4x:0x%4.4x\n",
@ -1029,6 +1030,7 @@ static int kaweth_probe(
kaweth = netdev_priv(netdev); kaweth = netdev_priv(netdev);
kaweth->dev = udev; kaweth->dev = udev;
kaweth->net = netdev; kaweth->net = netdev;
kaweth->intf = intf;
spin_lock_init(&kaweth->device_lock); spin_lock_init(&kaweth->device_lock);
init_waitqueue_head(&kaweth->term_wait); init_waitqueue_head(&kaweth->term_wait);
@ -1048,6 +1050,10 @@ static int kaweth_probe(
/* Download the firmware */ /* Download the firmware */
dev_info(dev, "Downloading firmware...\n"); dev_info(dev, "Downloading firmware...\n");
kaweth->firmware_buf = (__u8 *)__get_free_page(GFP_KERNEL); kaweth->firmware_buf = (__u8 *)__get_free_page(GFP_KERNEL);
if (!kaweth->firmware_buf) {
rv = -ENOMEM;
goto err_free_netdev;
}
if ((result = kaweth_download_firmware(kaweth, if ((result = kaweth_download_firmware(kaweth,
"kaweth/new_code.bin", "kaweth/new_code.bin",
100, 100,
@ -1139,8 +1145,6 @@ err_fw:
dev_dbg(dev, "Initializing net device.\n"); dev_dbg(dev, "Initializing net device.\n");
kaweth->intf = intf;
kaweth->tx_urb = usb_alloc_urb(0, GFP_KERNEL); kaweth->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!kaweth->tx_urb) if (!kaweth->tx_urb)
goto err_free_netdev; goto err_free_netdev;
@ -1204,7 +1208,7 @@ err_only_tx:
err_free_netdev: err_free_netdev:
free_netdev(netdev); free_netdev(netdev);
return -EIO; return rv;
} }
/**************************************************************** /****************************************************************

View File

@ -914,7 +914,9 @@ vmxnet3_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
{ {
struct Vmxnet3_TxDataDesc *tdd; struct Vmxnet3_TxDataDesc *tdd;
tdd = tq->data_ring.base + tq->tx_ring.next2fill; tdd = (struct Vmxnet3_TxDataDesc *)((u8 *)tq->data_ring.base +
tq->tx_ring.next2fill *
tq->txdata_desc_size);
memcpy(tdd->data, skb->data, ctx->copy_size); memcpy(tdd->data, skb->data, ctx->copy_size);
netdev_dbg(adapter->netdev, netdev_dbg(adapter->netdev,

View File

@ -69,10 +69,10 @@
/* /*
* Version numbers * Version numbers
*/ */
#define VMXNET3_DRIVER_VERSION_STRING "1.4.9.0-k" #define VMXNET3_DRIVER_VERSION_STRING "1.4.a.0-k"
/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
#define VMXNET3_DRIVER_VERSION_NUM 0x01040900 #define VMXNET3_DRIVER_VERSION_NUM 0x01040a00
#if defined(CONFIG_PCI_MSI) #if defined(CONFIG_PCI_MSI)
/* RSS only makes sense if MSI-X is supported. */ /* RSS only makes sense if MSI-X is supported. */

View File

@ -15,6 +15,6 @@ struct nf_acct;
struct nf_acct *nfnl_acct_find_get(struct net *net, const char *filter_name); struct nf_acct *nfnl_acct_find_get(struct net *net, const char *filter_name);
void nfnl_acct_put(struct nf_acct *acct); void nfnl_acct_put(struct nf_acct *acct);
void nfnl_acct_update(const struct sk_buff *skb, struct nf_acct *nfacct); void nfnl_acct_update(const struct sk_buff *skb, struct nf_acct *nfacct);
extern int nfnl_acct_overquota(const struct sk_buff *skb, int nfnl_acct_overquota(struct net *net, const struct sk_buff *skb,
struct nf_acct *nfacct); struct nf_acct *nfacct);
#endif /* _NFNL_ACCT_H */ #endif /* _NFNL_ACCT_H */

View File

@ -1,6 +1,16 @@
#ifndef __SMC91X_H__ #ifndef __SMC91X_H__
#define __SMC91X_H__ #define __SMC91X_H__
/*
* These bits define which access sizes a platform can support, rather
* than the maximal access size. So, if your platform can do 16-bit
* and 32-bit accesses to the SMC91x device, but not 8-bit, set both
* SMC91X_USE_16BIT and SMC91X_USE_32BIT.
*
* The SMC91x driver requires at least one of SMC91X_USE_8BIT or
* SMC91X_USE_16BIT to be supported - just setting SMC91X_USE_32BIT is
* an invalid configuration.
*/
#define SMC91X_USE_8BIT (1 << 0) #define SMC91X_USE_8BIT (1 << 0)
#define SMC91X_USE_16BIT (1 << 1) #define SMC91X_USE_16BIT (1 << 1)
#define SMC91X_USE_32BIT (1 << 2) #define SMC91X_USE_32BIT (1 << 2)

View File

@ -1523,6 +1523,8 @@ static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unli
{ {
if (sk->sk_send_head == skb_unlinked) if (sk->sk_send_head == skb_unlinked)
sk->sk_send_head = NULL; sk->sk_send_head = NULL;
if (tcp_sk(sk)->highest_sack == skb_unlinked)
tcp_sk(sk)->highest_sack = NULL;
} }
static inline void tcp_init_send_head(struct sock *sk) static inline void tcp_init_send_head(struct sock *sk)

View File

@ -14,6 +14,7 @@
#include <linux/atmapi.h> #include <linux/atmapi.h>
#include <linux/atmioc.h> #include <linux/atmioc.h>
#include <linux/time.h>
#define ZATM_GETPOOL _IOW('a',ATMIOC_SARPRV+1,struct atmif_sioc) #define ZATM_GETPOOL _IOW('a',ATMIOC_SARPRV+1,struct atmif_sioc)
/* get pool statistics */ /* get pool statistics */

View File

@ -16,7 +16,8 @@
#define _UAPI__LINUX_IF_PPPOL2TP_H #define _UAPI__LINUX_IF_PPPOL2TP_H
#include <linux/types.h> #include <linux/types.h>
#include <linux/in.h>
#include <linux/in6.h>
/* Structure used to connect() the socket to a particular tunnel UDP /* Structure used to connect() the socket to a particular tunnel UDP
* socket over IPv4. * socket over IPv4.

View File

@ -21,8 +21,11 @@
#include <asm/byteorder.h> #include <asm/byteorder.h>
#include <linux/socket.h> #include <linux/socket.h>
#include <linux/if.h>
#include <linux/if_ether.h> #include <linux/if_ether.h>
#include <linux/if_pppol2tp.h> #include <linux/if_pppol2tp.h>
#include <linux/in.h>
#include <linux/in6.h>
/* For user-space programs to pick up these definitions /* For user-space programs to pick up these definitions
* which they wouldn't get otherwise without defining __KERNEL__ * which they wouldn't get otherwise without defining __KERNEL__

View File

@ -2,6 +2,9 @@
#define _UAPI_IF_TUNNEL_H_ #define _UAPI_IF_TUNNEL_H_
#include <linux/types.h> #include <linux/types.h>
#include <linux/if.h>
#include <linux/ip.h>
#include <linux/in6.h>
#include <asm/byteorder.h> #include <asm/byteorder.h>

View File

@ -1,11 +1,13 @@
#ifndef _IPX_H_ #ifndef _IPX_H_
#define _IPX_H_ #define _IPX_H_
#include <linux/libc-compat.h> /* for compatibility with glibc netipx/ipx.h */
#include <linux/types.h> #include <linux/types.h>
#include <linux/sockios.h> #include <linux/sockios.h>
#include <linux/socket.h> #include <linux/socket.h>
#define IPX_NODE_LEN 6 #define IPX_NODE_LEN 6
#define IPX_MTU 576 #define IPX_MTU 576
#if __UAPI_DEF_SOCKADDR_IPX
struct sockaddr_ipx { struct sockaddr_ipx {
__kernel_sa_family_t sipx_family; __kernel_sa_family_t sipx_family;
__be16 sipx_port; __be16 sipx_port;
@ -14,6 +16,7 @@ struct sockaddr_ipx {
__u8 sipx_type; __u8 sipx_type;
unsigned char sipx_zero; /* 16 byte fill */ unsigned char sipx_zero; /* 16 byte fill */
}; };
#endif /* __UAPI_DEF_SOCKADDR_IPX */
/* /*
* So we can fit the extra info for SIOCSIFADDR into the address nicely * So we can fit the extra info for SIOCSIFADDR into the address nicely
@ -23,12 +26,15 @@ struct sockaddr_ipx {
#define IPX_DLTITF 0 #define IPX_DLTITF 0
#define IPX_CRTITF 1 #define IPX_CRTITF 1
#if __UAPI_DEF_IPX_ROUTE_DEFINITION
struct ipx_route_definition { struct ipx_route_definition {
__be32 ipx_network; __be32 ipx_network;
__be32 ipx_router_network; __be32 ipx_router_network;
unsigned char ipx_router_node[IPX_NODE_LEN]; unsigned char ipx_router_node[IPX_NODE_LEN];
}; };
#endif /* __UAPI_DEF_IPX_ROUTE_DEFINITION */
#if __UAPI_DEF_IPX_INTERFACE_DEFINITION
struct ipx_interface_definition { struct ipx_interface_definition {
__be32 ipx_network; __be32 ipx_network;
unsigned char ipx_device[16]; unsigned char ipx_device[16];
@ -45,16 +51,20 @@ struct ipx_interface_definition {
#define IPX_INTERNAL 2 #define IPX_INTERNAL 2
unsigned char ipx_node[IPX_NODE_LEN]; unsigned char ipx_node[IPX_NODE_LEN];
}; };
#endif /* __UAPI_DEF_IPX_INTERFACE_DEFINITION */
#if __UAPI_DEF_IPX_CONFIG_DATA
struct ipx_config_data { struct ipx_config_data {
unsigned char ipxcfg_auto_select_primary; unsigned char ipxcfg_auto_select_primary;
unsigned char ipxcfg_auto_create_interfaces; unsigned char ipxcfg_auto_create_interfaces;
}; };
#endif /* __UAPI_DEF_IPX_CONFIG_DATA */
/* /*
* OLD Route Definition for backward compatibility. * OLD Route Definition for backward compatibility.
*/ */
#if __UAPI_DEF_IPX_ROUTE_DEF
struct ipx_route_def { struct ipx_route_def {
__be32 ipx_network; __be32 ipx_network;
__be32 ipx_router_network; __be32 ipx_router_network;
@ -67,6 +77,7 @@ struct ipx_route_def {
#define IPX_RT_BLUEBOOK 2 #define IPX_RT_BLUEBOOK 2
#define IPX_RT_ROUTED 1 #define IPX_RT_ROUTED 1
}; };
#endif /* __UAPI_DEF_IPX_ROUTE_DEF */
#define SIOCAIPXITFCRT (SIOCPROTOPRIVATE) #define SIOCAIPXITFCRT (SIOCPROTOPRIVATE)
#define SIOCAIPXPRISLT (SIOCPROTOPRIVATE + 1) #define SIOCAIPXPRISLT (SIOCPROTOPRIVATE + 1)

View File

@ -139,6 +139,25 @@
#endif /* _NETINET_IN_H */ #endif /* _NETINET_IN_H */
/* Coordinate with glibc netipx/ipx.h header. */
#if defined(__NETIPX_IPX_H)
#define __UAPI_DEF_SOCKADDR_IPX 0
#define __UAPI_DEF_IPX_ROUTE_DEFINITION 0
#define __UAPI_DEF_IPX_INTERFACE_DEFINITION 0
#define __UAPI_DEF_IPX_CONFIG_DATA 0
#define __UAPI_DEF_IPX_ROUTE_DEF 0
#else /* defined(__NETIPX_IPX_H) */
#define __UAPI_DEF_SOCKADDR_IPX 1
#define __UAPI_DEF_IPX_ROUTE_DEFINITION 1
#define __UAPI_DEF_IPX_INTERFACE_DEFINITION 1
#define __UAPI_DEF_IPX_CONFIG_DATA 1
#define __UAPI_DEF_IPX_ROUTE_DEF 1
#endif /* defined(__NETIPX_IPX_H) */
/* Definitions for xattr.h */ /* Definitions for xattr.h */
#if defined(_SYS_XATTR_H) #if defined(_SYS_XATTR_H)
#define __UAPI_DEF_XATTR 0 #define __UAPI_DEF_XATTR 0
@ -179,6 +198,13 @@
#define __UAPI_DEF_IN6_PKTINFO 1 #define __UAPI_DEF_IN6_PKTINFO 1
#define __UAPI_DEF_IP6_MTUINFO 1 #define __UAPI_DEF_IP6_MTUINFO 1
/* Definitions for ipx.h */
#define __UAPI_DEF_SOCKADDR_IPX 1
#define __UAPI_DEF_IPX_ROUTE_DEFINITION 1
#define __UAPI_DEF_IPX_INTERFACE_DEFINITION 1
#define __UAPI_DEF_IPX_CONFIG_DATA 1
#define __UAPI_DEF_IPX_ROUTE_DEF 1
/* Definitions for xattr.h */ /* Definitions for xattr.h */
#define __UAPI_DEF_XATTR 1 #define __UAPI_DEF_XATTR 1

View File

@ -583,7 +583,7 @@ enum ovs_userspace_attr {
#define OVS_USERSPACE_ATTR_MAX (__OVS_USERSPACE_ATTR_MAX - 1) #define OVS_USERSPACE_ATTR_MAX (__OVS_USERSPACE_ATTR_MAX - 1)
struct ovs_action_trunc { struct ovs_action_trunc {
uint32_t max_len; /* Max packet size in bytes. */ __u32 max_len; /* Max packet size in bytes. */
}; };
/** /**
@ -632,8 +632,8 @@ enum ovs_hash_alg {
* @hash_basis: basis used for computing hash. * @hash_basis: basis used for computing hash.
*/ */
struct ovs_action_hash { struct ovs_action_hash {
uint32_t hash_alg; /* One of ovs_hash_alg. */ __u32 hash_alg; /* One of ovs_hash_alg. */
uint32_t hash_basis; __u32 hash_basis;
}; };
/** /**

View File

@ -77,17 +77,18 @@ static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl,
size = min_t(unsigned int, size, tbl->size >> 1); size = min_t(unsigned int, size, tbl->size >> 1);
if (sizeof(spinlock_t) != 0) { if (sizeof(spinlock_t) != 0) {
tbl->locks = NULL;
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
if (size * sizeof(spinlock_t) > PAGE_SIZE && if (size * sizeof(spinlock_t) > PAGE_SIZE &&
gfp == GFP_KERNEL) gfp == GFP_KERNEL)
tbl->locks = vmalloc(size * sizeof(spinlock_t)); tbl->locks = vmalloc(size * sizeof(spinlock_t));
else
#endif #endif
if (gfp != GFP_KERNEL) if (gfp != GFP_KERNEL)
gfp |= __GFP_NOWARN | __GFP_NORETRY; gfp |= __GFP_NOWARN | __GFP_NORETRY;
tbl->locks = kmalloc_array(size, sizeof(spinlock_t), if (!tbl->locks)
gfp); tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
gfp);
if (!tbl->locks) if (!tbl->locks)
return -ENOMEM; return -ENOMEM;
for (i = 0; i < size; i++) for (i = 0; i < size; i++)

View File

@ -250,7 +250,7 @@ int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
skb_free_datagram(sk, skb); skb_free_datagram(sk, skb);
if (msg->msg_flags & MSG_TRUNC) if (flags & MSG_TRUNC)
copied = skblen; copied = skblen;
return err ? : copied; return err ? : copied;

View File

@ -262,6 +262,8 @@ int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
break; break;
} }
kfree_skb(hdev->req_skb);
hdev->req_skb = NULL;
hdev->req_status = hdev->req_result = 0; hdev->req_status = hdev->req_result = 0;
BT_DBG("%s end: err %d", hdev->name, err); BT_DBG("%s end: err %d", hdev->name, err);

View File

@ -1091,7 +1091,7 @@ static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
skb_free_datagram(sk, skb); skb_free_datagram(sk, skb);
if (msg->msg_flags & MSG_TRUNC) if (flags & MSG_TRUNC)
copied = skblen; copied = skblen;
return err ? : copied; return err ? : copied;

View File

@ -32,6 +32,7 @@
#include <linux/debugfs.h> #include <linux/debugfs.h>
#include <linux/crc16.h> #include <linux/crc16.h>
#include <linux/filter.h>
#include <net/bluetooth/bluetooth.h> #include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h> #include <net/bluetooth/hci_core.h>
@ -5835,6 +5836,9 @@ static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
if (chan->sdu) if (chan->sdu)
break; break;
if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
break;
chan->sdu_len = get_unaligned_le16(skb->data); chan->sdu_len = get_unaligned_le16(skb->data);
skb_pull(skb, L2CAP_SDULEN_SIZE); skb_pull(skb, L2CAP_SDULEN_SIZE);
@ -6610,6 +6614,10 @@ static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
goto drop; goto drop;
} }
if ((chan->mode == L2CAP_MODE_ERTM ||
chan->mode == L2CAP_MODE_STREAMING) && sk_filter(chan->data, skb))
goto drop;
if (!control->sframe) { if (!control->sframe) {
int err; int err;

View File

@ -1019,7 +1019,7 @@ static int l2cap_sock_recvmsg(struct socket *sock, struct msghdr *msg,
goto done; goto done;
if (pi->rx_busy_skb) { if (pi->rx_busy_skb) {
if (!sock_queue_rcv_skb(sk, pi->rx_busy_skb)) if (!__sock_queue_rcv_skb(sk, pi->rx_busy_skb))
pi->rx_busy_skb = NULL; pi->rx_busy_skb = NULL;
else else
goto done; goto done;
@ -1270,7 +1270,17 @@ static int l2cap_sock_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
goto done; goto done;
} }
err = sock_queue_rcv_skb(sk, skb); if (chan->mode != L2CAP_MODE_ERTM &&
chan->mode != L2CAP_MODE_STREAMING) {
/* Even if no filter is attached, we could potentially
* get errors from security modules, etc.
*/
err = sk_filter(sk, skb);
if (err)
goto done;
}
err = __sock_queue_rcv_skb(sk, skb);
/* For ERTM, handle one skb that doesn't fit into the recv /* For ERTM, handle one skb that doesn't fit into the recv
* buffer. This is important to do because the data frames * buffer. This is important to do because the data frames

View File

@ -249,7 +249,7 @@ static inline unsigned long get_index(t_key key, struct key_vector *kv)
* index into the parent's child array. That is, they will be used to find * index into the parent's child array. That is, they will be used to find
* 'n' among tp's children. * 'n' among tp's children.
* *
* The bits from (n->pos + n->bits) to (tn->pos - 1) - "S" - are skipped bits * The bits from (n->pos + n->bits) to (tp->pos - 1) - "S" - are skipped bits
* for the node n. * for the node n.
* *
* All the bits we have seen so far are significant to the node n. The rest * All the bits we have seen so far are significant to the node n. The rest
@ -258,7 +258,7 @@ static inline unsigned long get_index(t_key key, struct key_vector *kv)
* The bits from (n->pos) to (n->pos + n->bits - 1) - "C" - are the index into * The bits from (n->pos) to (n->pos + n->bits - 1) - "C" - are the index into
* n's child array, and will of course be different for each child. * n's child array, and will of course be different for each child.
* *
* The rest of the bits, from 0 to (n->pos + n->bits), are completely unknown * The rest of the bits, from 0 to (n->pos -1) - "u" - are completely unknown
* at this point. * at this point.
*/ */

View File

@ -73,9 +73,11 @@ void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
skb_dst_set(skb, &rt->dst); skb_dst_set(skb, &rt->dst);
memset(IPCB(skb), 0, sizeof(*IPCB(skb))); memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
if (skb_iif && proto == IPPROTO_UDP) { if (skb_iif && !(df & htons(IP_DF))) {
/* Arrived from an ingress interface and got udp encapuslated. /* Arrived from an ingress interface, got encapsulated, with
* The encapsulated network segment length may exceed dst mtu. * fragmentation of encapulating frames allowed.
* If skb is gso, the resulting encapsulated network segments
* may exceed dst mtu.
* Allow IP Fragmentation of segments. * Allow IP Fragmentation of segments.
*/ */
IPCB(skb)->flags |= IPSKB_FRAG_SEGS; IPCB(skb)->flags |= IPSKB_FRAG_SEGS;

View File

@ -3193,7 +3193,6 @@ int tcp_abort(struct sock *sk, int err)
local_bh_enable(); local_bh_enable();
return 0; return 0;
} }
sock_gen_put(sk);
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
@ -3222,7 +3221,6 @@ int tcp_abort(struct sock *sk, int err)
bh_unlock_sock(sk); bh_unlock_sock(sk);
local_bh_enable(); local_bh_enable();
release_sock(sk); release_sock(sk);
sock_put(sk);
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(tcp_abort); EXPORT_SYMBOL_GPL(tcp_abort);

View File

@ -54,11 +54,16 @@ static int tcp_diag_destroy(struct sk_buff *in_skb,
{ {
struct net *net = sock_net(in_skb->sk); struct net *net = sock_net(in_skb->sk);
struct sock *sk = inet_diag_find_one_icsk(net, &tcp_hashinfo, req); struct sock *sk = inet_diag_find_one_icsk(net, &tcp_hashinfo, req);
int err;
if (IS_ERR(sk)) if (IS_ERR(sk))
return PTR_ERR(sk); return PTR_ERR(sk);
return sock_diag_destroy(sk, ECONNABORTED); err = sock_diag_destroy(sk, ECONNABORTED);
sock_gen_put(sk);
return err;
} }
#endif #endif

View File

@ -814,8 +814,14 @@ static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 : u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
tcp_sk(sk)->snd_nxt; tcp_sk(sk)->snd_nxt;
/* RFC 7323 2.3
* The window field (SEG.WND) of every outgoing segment, with the
* exception of <SYN> segments, MUST be right-shifted by
* Rcv.Wind.Shift bits:
*/
tcp_v4_send_ack(sock_net(sk), skb, seq, tcp_v4_send_ack(sock_net(sk), skb, seq,
tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd, tcp_rsk(req)->rcv_nxt,
req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
tcp_time_stamp, tcp_time_stamp,
req->ts_recent, req->ts_recent,
0, 0,

View File

@ -1182,13 +1182,13 @@ out:
* @sk: socket * @sk: socket
* *
* Drops all bad checksum frames, until a valid one is found. * Drops all bad checksum frames, until a valid one is found.
* Returns the length of found skb, or 0 if none is found. * Returns the length of found skb, or -1 if none is found.
*/ */
static unsigned int first_packet_length(struct sock *sk) static int first_packet_length(struct sock *sk)
{ {
struct sk_buff_head list_kill, *rcvq = &sk->sk_receive_queue; struct sk_buff_head list_kill, *rcvq = &sk->sk_receive_queue;
struct sk_buff *skb; struct sk_buff *skb;
unsigned int res; int res;
__skb_queue_head_init(&list_kill); __skb_queue_head_init(&list_kill);
@ -1203,7 +1203,7 @@ static unsigned int first_packet_length(struct sock *sk)
__skb_unlink(skb, rcvq); __skb_unlink(skb, rcvq);
__skb_queue_tail(&list_kill, skb); __skb_queue_tail(&list_kill, skb);
} }
res = skb ? skb->len : 0; res = skb ? skb->len : -1;
spin_unlock_bh(&rcvq->lock); spin_unlock_bh(&rcvq->lock);
if (!skb_queue_empty(&list_kill)) { if (!skb_queue_empty(&list_kill)) {
@ -1232,7 +1232,7 @@ int udp_ioctl(struct sock *sk, int cmd, unsigned long arg)
case SIOCINQ: case SIOCINQ:
{ {
unsigned int amount = first_packet_length(sk); int amount = max_t(int, 0, first_packet_length(sk));
return put_user(amount, (int __user *)arg); return put_user(amount, (int __user *)arg);
} }
@ -2184,7 +2184,7 @@ unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait)
/* Check for false positives due to checksum errors */ /* Check for false positives due to checksum errors */
if ((mask & POLLRDNORM) && !(file->f_flags & O_NONBLOCK) && if ((mask & POLLRDNORM) && !(file->f_flags & O_NONBLOCK) &&
!(sk->sk_shutdown & RCV_SHUTDOWN) && !first_packet_length(sk)) !(sk->sk_shutdown & RCV_SHUTDOWN) && first_packet_length(sk) == -1)
mask &= ~(POLLIN | POLLRDNORM); mask &= ~(POLLIN | POLLRDNORM);
return mask; return mask;
@ -2216,7 +2216,6 @@ struct proto udp_prot = {
.sysctl_wmem = &sysctl_udp_wmem_min, .sysctl_wmem = &sysctl_udp_wmem_min,
.sysctl_rmem = &sysctl_udp_rmem_min, .sysctl_rmem = &sysctl_udp_rmem_min,
.obj_size = sizeof(struct udp_sock), .obj_size = sizeof(struct udp_sock),
.slab_flags = SLAB_DESTROY_BY_RCU,
.h.udp_table = &udp_table, .h.udp_table = &udp_table,
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
.compat_setsockopt = compat_udp_setsockopt, .compat_setsockopt = compat_udp_setsockopt,

View File

@ -55,7 +55,6 @@ struct proto udplite_prot = {
.unhash = udp_lib_unhash, .unhash = udp_lib_unhash,
.get_port = udp_v4_get_port, .get_port = udp_v4_get_port,
.obj_size = sizeof(struct udp_sock), .obj_size = sizeof(struct udp_sock),
.slab_flags = SLAB_DESTROY_BY_RCU,
.h.udp_table = &udplite_table, .h.udp_table = &udplite_table,
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
.compat_setsockopt = compat_udp_setsockopt, .compat_setsockopt = compat_udp_setsockopt,

View File

@ -1872,7 +1872,6 @@ static int addrconf_dad_end(struct inet6_ifaddr *ifp)
void addrconf_dad_failure(struct inet6_ifaddr *ifp) void addrconf_dad_failure(struct inet6_ifaddr *ifp)
{ {
struct in6_addr addr;
struct inet6_dev *idev = ifp->idev; struct inet6_dev *idev = ifp->idev;
struct net *net = dev_net(ifp->idev->dev); struct net *net = dev_net(ifp->idev->dev);
@ -1934,18 +1933,6 @@ void addrconf_dad_failure(struct inet6_ifaddr *ifp)
in6_ifa_put(ifp2); in6_ifa_put(ifp2);
lock_errdad: lock_errdad:
spin_lock_bh(&ifp->lock); spin_lock_bh(&ifp->lock);
} else if (idev->cnf.accept_dad > 1 && !idev->cnf.disable_ipv6) {
addr.s6_addr32[0] = htonl(0xfe800000);
addr.s6_addr32[1] = 0;
if (!ipv6_generate_eui64(addr.s6_addr + 8, idev->dev) &&
ipv6_addr_equal(&ifp->addr, &addr)) {
/* DAD failed for link-local based on MAC address */
idev->cnf.disable_ipv6 = 1;
pr_info("%s: IPv6 being disabled!\n",
ifp->idev->dev->name);
}
} }
errdad: errdad:
@ -3821,6 +3808,7 @@ static void addrconf_dad_work(struct work_struct *w)
dad_work); dad_work);
struct inet6_dev *idev = ifp->idev; struct inet6_dev *idev = ifp->idev;
struct in6_addr mcaddr; struct in6_addr mcaddr;
bool disable_ipv6 = false;
enum { enum {
DAD_PROCESS, DAD_PROCESS,
@ -3837,6 +3825,24 @@ static void addrconf_dad_work(struct work_struct *w)
} else if (ifp->state == INET6_IFADDR_STATE_ERRDAD) { } else if (ifp->state == INET6_IFADDR_STATE_ERRDAD) {
action = DAD_ABORT; action = DAD_ABORT;
ifp->state = INET6_IFADDR_STATE_POSTDAD; ifp->state = INET6_IFADDR_STATE_POSTDAD;
if (idev->cnf.accept_dad > 1 && !idev->cnf.disable_ipv6 &&
!(ifp->flags & IFA_F_STABLE_PRIVACY)) {
struct in6_addr addr;
addr.s6_addr32[0] = htonl(0xfe800000);
addr.s6_addr32[1] = 0;
if (!ipv6_generate_eui64(addr.s6_addr + 8, idev->dev) &&
ipv6_addr_equal(&ifp->addr, &addr)) {
/* DAD failed for link-local based on MAC */
idev->cnf.disable_ipv6 = 1;
pr_info("%s: IPv6 being disabled!\n",
ifp->idev->dev->name);
disable_ipv6 = true;
}
}
} }
spin_unlock_bh(&ifp->lock); spin_unlock_bh(&ifp->lock);
@ -3845,6 +3851,8 @@ static void addrconf_dad_work(struct work_struct *w)
goto out; goto out;
} else if (action == DAD_ABORT) { } else if (action == DAD_ABORT) {
addrconf_dad_stop(ifp, 1); addrconf_dad_stop(ifp, 1);
if (disable_ipv6)
addrconf_ifdown(idev->dev, 0);
goto out; goto out;
} }

View File

@ -944,9 +944,15 @@ static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
* sk->sk_state == TCP_SYN_RECV -> for Fast Open. * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
*/ */
/* RFC 7323 2.3
* The window field (SEG.WND) of every outgoing segment, with the
* exception of <SYN> segments, MUST be right-shifted by
* Rcv.Wind.Shift bits:
*/
tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ? tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt, tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd, tcp_rsk(req)->rcv_nxt,
req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if, tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr), tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
0, 0); 0, 0);

View File

@ -1460,7 +1460,6 @@ struct proto udpv6_prot = {
.sysctl_wmem = &sysctl_udp_wmem_min, .sysctl_wmem = &sysctl_udp_wmem_min,
.sysctl_rmem = &sysctl_udp_rmem_min, .sysctl_rmem = &sysctl_udp_rmem_min,
.obj_size = sizeof(struct udp6_sock), .obj_size = sizeof(struct udp6_sock),
.slab_flags = SLAB_DESTROY_BY_RCU,
.h.udp_table = &udp_table, .h.udp_table = &udp_table,
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
.compat_setsockopt = compat_udpv6_setsockopt, .compat_setsockopt = compat_udpv6_setsockopt,

View File

@ -50,7 +50,6 @@ struct proto udplitev6_prot = {
.unhash = udp_lib_unhash, .unhash = udp_lib_unhash,
.get_port = udp_v6_get_port, .get_port = udp_v6_get_port,
.obj_size = sizeof(struct udp6_sock), .obj_size = sizeof(struct udp6_sock),
.slab_flags = SLAB_DESTROY_BY_RCU,
.h.udp_table = &udplite_table, .h.udp_table = &udplite_table,
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
.compat_setsockopt = compat_udpv6_setsockopt, .compat_setsockopt = compat_udpv6_setsockopt,

View File

@ -856,7 +856,7 @@ static int pppol2tp_getname(struct socket *sock, struct sockaddr *uaddr,
error = -ENOTCONN; error = -ENOTCONN;
if (sk == NULL) if (sk == NULL)
goto end; goto end;
if (sk->sk_state != PPPOX_CONNECTED) if (!(sk->sk_state & PPPOX_CONNECTED))
goto end; goto end;
error = -EBADF; error = -EBADF;

View File

@ -205,6 +205,7 @@ static int ct_seq_show(struct seq_file *s, void *v)
struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(hash); struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(hash);
const struct nf_conntrack_l3proto *l3proto; const struct nf_conntrack_l3proto *l3proto;
const struct nf_conntrack_l4proto *l4proto; const struct nf_conntrack_l4proto *l4proto;
struct net *net = seq_file_net(s);
int ret = 0; int ret = 0;
NF_CT_ASSERT(ct); NF_CT_ASSERT(ct);
@ -215,6 +216,9 @@ static int ct_seq_show(struct seq_file *s, void *v)
if (NF_CT_DIRECTION(hash)) if (NF_CT_DIRECTION(hash))
goto release; goto release;
if (!net_eq(nf_ct_net(ct), net))
goto release;
l3proto = __nf_ct_l3proto_find(nf_ct_l3num(ct)); l3proto = __nf_ct_l3proto_find(nf_ct_l3num(ct));
NF_CT_ASSERT(l3proto); NF_CT_ASSERT(l3proto);
l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));

View File

@ -326,14 +326,14 @@ static int nfnl_acct_try_del(struct nf_acct *cur)
{ {
int ret = 0; int ret = 0;
/* we want to avoid races with nfnl_acct_find_get. */ /* We want to avoid races with nfnl_acct_put. So only when the current
if (atomic_dec_and_test(&cur->refcnt)) { * refcnt is 1, we decrease it to 0.
*/
if (atomic_cmpxchg(&cur->refcnt, 1, 0) == 1) {
/* We are protected by nfnl mutex. */ /* We are protected by nfnl mutex. */
list_del_rcu(&cur->head); list_del_rcu(&cur->head);
kfree_rcu(cur, rcu_head); kfree_rcu(cur, rcu_head);
} else { } else {
/* still in use, restore reference counter. */
atomic_inc(&cur->refcnt);
ret = -EBUSY; ret = -EBUSY;
} }
return ret; return ret;
@ -443,7 +443,7 @@ void nfnl_acct_update(const struct sk_buff *skb, struct nf_acct *nfacct)
} }
EXPORT_SYMBOL_GPL(nfnl_acct_update); EXPORT_SYMBOL_GPL(nfnl_acct_update);
static void nfnl_overquota_report(struct nf_acct *nfacct) static void nfnl_overquota_report(struct net *net, struct nf_acct *nfacct)
{ {
int ret; int ret;
struct sk_buff *skb; struct sk_buff *skb;
@ -458,11 +458,12 @@ static void nfnl_overquota_report(struct nf_acct *nfacct)
kfree_skb(skb); kfree_skb(skb);
return; return;
} }
netlink_broadcast(init_net.nfnl, skb, 0, NFNLGRP_ACCT_QUOTA, netlink_broadcast(net->nfnl, skb, 0, NFNLGRP_ACCT_QUOTA,
GFP_ATOMIC); GFP_ATOMIC);
} }
int nfnl_acct_overquota(const struct sk_buff *skb, struct nf_acct *nfacct) int nfnl_acct_overquota(struct net *net, const struct sk_buff *skb,
struct nf_acct *nfacct)
{ {
u64 now; u64 now;
u64 *quota; u64 *quota;
@ -480,7 +481,7 @@ int nfnl_acct_overquota(const struct sk_buff *skb, struct nf_acct *nfacct)
if (now >= *quota && if (now >= *quota &&
!test_and_set_bit(NFACCT_OVERQUOTA_BIT, &nfacct->flags)) { !test_and_set_bit(NFACCT_OVERQUOTA_BIT, &nfacct->flags)) {
nfnl_overquota_report(nfacct); nfnl_overquota_report(net, nfacct);
} }
return ret; return ret;

View File

@ -330,16 +330,16 @@ static int ctnl_timeout_try_del(struct net *net, struct ctnl_timeout *timeout)
{ {
int ret = 0; int ret = 0;
/* we want to avoid races with nf_ct_timeout_find_get. */ /* We want to avoid races with ctnl_timeout_put. So only when the
if (atomic_dec_and_test(&timeout->refcnt)) { * current refcnt is 1, we decrease it to 0.
*/
if (atomic_cmpxchg(&timeout->refcnt, 1, 0) == 1) {
/* We are protected by nfnl mutex. */ /* We are protected by nfnl mutex. */
list_del_rcu(&timeout->head); list_del_rcu(&timeout->head);
nf_ct_l4proto_put(timeout->l4proto); nf_ct_l4proto_put(timeout->l4proto);
ctnl_untimeout(net, timeout); ctnl_untimeout(net, timeout);
kfree_rcu(timeout, rcu_head); kfree_rcu(timeout, rcu_head);
} else { } else {
/* still in use, restore reference counter. */
atomic_inc(&timeout->refcnt);
ret = -EBUSY; ret = -EBUSY;
} }
return ret; return ret;
@ -543,7 +543,9 @@ err:
static void ctnl_timeout_put(struct ctnl_timeout *timeout) static void ctnl_timeout_put(struct ctnl_timeout *timeout)
{ {
atomic_dec(&timeout->refcnt); if (atomic_dec_and_test(&timeout->refcnt))
kfree_rcu(timeout, rcu_head);
module_put(THIS_MODULE); module_put(THIS_MODULE);
} }
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
@ -591,7 +593,9 @@ static void __net_exit cttimeout_net_exit(struct net *net)
list_for_each_entry_safe(cur, tmp, &net->nfct_timeout_list, head) { list_for_each_entry_safe(cur, tmp, &net->nfct_timeout_list, head) {
list_del_rcu(&cur->head); list_del_rcu(&cur->head);
nf_ct_l4proto_put(cur->l4proto); nf_ct_l4proto_put(cur->l4proto);
kfree_rcu(cur, rcu_head);
if (atomic_dec_and_test(&cur->refcnt))
kfree_rcu(cur, rcu_head);
} }
} }

View File

@ -1147,6 +1147,7 @@ MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_ULOG);
MODULE_ALIAS_NF_LOGGER(AF_INET, 1); MODULE_ALIAS_NF_LOGGER(AF_INET, 1);
MODULE_ALIAS_NF_LOGGER(AF_INET6, 1); MODULE_ALIAS_NF_LOGGER(AF_INET6, 1);
MODULE_ALIAS_NF_LOGGER(AF_BRIDGE, 1); MODULE_ALIAS_NF_LOGGER(AF_BRIDGE, 1);
MODULE_ALIAS_NF_LOGGER(3, 1); /* NFPROTO_ARP */
module_init(nfnetlink_log_init); module_init(nfnetlink_log_init);
module_exit(nfnetlink_log_fini); module_exit(nfnetlink_log_fini);

View File

@ -127,6 +127,8 @@ nf_tproxy_get_sock_v4(struct net *net, struct sk_buff *skb, void *hp,
daddr, dport, daddr, dport,
in->ifindex); in->ifindex);
if (sk && !atomic_inc_not_zero(&sk->sk_refcnt))
sk = NULL;
/* NOTE: we return listeners even if bound to /* NOTE: we return listeners even if bound to
* 0.0.0.0, those are filtered out in * 0.0.0.0, those are filtered out in
* xt_socket, since xt_TPROXY needs 0 bound * xt_socket, since xt_TPROXY needs 0 bound
@ -195,6 +197,8 @@ nf_tproxy_get_sock_v6(struct net *net, struct sk_buff *skb, int thoff, void *hp,
daddr, ntohs(dport), daddr, ntohs(dport),
in->ifindex); in->ifindex);
if (sk && !atomic_inc_not_zero(&sk->sk_refcnt))
sk = NULL;
/* NOTE: we return listeners even if bound to /* NOTE: we return listeners even if bound to
* 0.0.0.0, those are filtered out in * 0.0.0.0, those are filtered out in
* xt_socket, since xt_TPROXY needs 0 bound * xt_socket, since xt_TPROXY needs 0 bound

View File

@ -26,7 +26,7 @@ static bool nfacct_mt(const struct sk_buff *skb, struct xt_action_param *par)
nfnl_acct_update(skb, info->nfacct); nfnl_acct_update(skb, info->nfacct);
overquota = nfnl_acct_overquota(skb, info->nfacct); overquota = nfnl_acct_overquota(par->net, skb, info->nfacct);
return overquota == NFACCT_UNDERQUOTA ? false : true; return overquota == NFACCT_UNDERQUOTA ? false : true;
} }

View File

@ -53,7 +53,7 @@ int ife_tlv_meta_encode(void *skbdata, u16 attrtype, u16 dlen, const void *dval)
u32 *tlv = (u32 *)(skbdata); u32 *tlv = (u32 *)(skbdata);
u16 totlen = nla_total_size(dlen); /*alignment + hdr */ u16 totlen = nla_total_size(dlen); /*alignment + hdr */
char *dptr = (char *)tlv + NLA_HDRLEN; char *dptr = (char *)tlv + NLA_HDRLEN;
u32 htlv = attrtype << 16 | totlen; u32 htlv = attrtype << 16 | dlen;
*tlv = htonl(htlv); *tlv = htonl(htlv);
memset(dptr, 0, totlen - NLA_HDRLEN); memset(dptr, 0, totlen - NLA_HDRLEN);
@ -135,7 +135,7 @@ EXPORT_SYMBOL_GPL(ife_release_meta_gen);
int ife_validate_meta_u32(void *val, int len) int ife_validate_meta_u32(void *val, int len)
{ {
if (len == 4) if (len == sizeof(u32))
return 0; return 0;
return -EINVAL; return -EINVAL;
@ -144,8 +144,8 @@ EXPORT_SYMBOL_GPL(ife_validate_meta_u32);
int ife_validate_meta_u16(void *val, int len) int ife_validate_meta_u16(void *val, int len)
{ {
/* length will include padding */ /* length will not include padding */
if (len == NLA_ALIGN(2)) if (len == sizeof(u16))
return 0; return 0;
return -EINVAL; return -EINVAL;
@ -652,12 +652,14 @@ static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a,
u8 *tlvdata = (u8 *)tlv; u8 *tlvdata = (u8 *)tlv;
u16 mtype = tlv->type; u16 mtype = tlv->type;
u16 mlen = tlv->len; u16 mlen = tlv->len;
u16 alen;
mtype = ntohs(mtype); mtype = ntohs(mtype);
mlen = ntohs(mlen); mlen = ntohs(mlen);
alen = NLA_ALIGN(mlen);
if (find_decode_metaid(skb, ife, mtype, (mlen - 4), if (find_decode_metaid(skb, ife, mtype, (mlen - NLA_HDRLEN),
(void *)(tlvdata + 4))) { (void *)(tlvdata + NLA_HDRLEN))) {
/* abuse overlimits to count when we receive metadata /* abuse overlimits to count when we receive metadata
* but dont have an ops for it * but dont have an ops for it
*/ */
@ -666,8 +668,8 @@ static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a,
ife->tcf_qstats.overlimits++; ife->tcf_qstats.overlimits++;
} }
tlvdata += mlen; tlvdata += alen;
ifehdrln -= mlen; ifehdrln -= alen;
tlv = (struct meta_tlvhdr *)tlvdata; tlv = (struct meta_tlvhdr *)tlvdata;
} }

View File

@ -643,18 +643,19 @@ struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
struct Qdisc *sch; struct Qdisc *sch;
if (!try_module_get(ops->owner)) if (!try_module_get(ops->owner))
goto errout; return NULL;
sch = qdisc_alloc(dev_queue, ops); sch = qdisc_alloc(dev_queue, ops);
if (IS_ERR(sch)) if (IS_ERR(sch)) {
goto errout; module_put(ops->owner);
return NULL;
}
sch->parent = parentid; sch->parent = parentid;
if (!ops->init || ops->init(sch, NULL) == 0) if (!ops->init || ops->init(sch, NULL) == 0)
return sch; return sch;
qdisc_destroy(sch); qdisc_destroy(sch);
errout:
return NULL; return NULL;
} }
EXPORT_SYMBOL(qdisc_create_dflt); EXPORT_SYMBOL(qdisc_create_dflt);

View File

@ -119,7 +119,13 @@ int sctp_rcv(struct sk_buff *skb)
skb_transport_offset(skb)) skb_transport_offset(skb))
goto discard_it; goto discard_it;
if (!pskb_may_pull(skb, sizeof(struct sctphdr))) /* If the packet is fragmented and we need to do crc checking,
* it's better to just linearize it otherwise crc computing
* takes longer.
*/
if ((!(skb_shinfo(skb)->gso_type & SKB_GSO_SCTP) &&
skb_linearize(skb)) ||
!pskb_may_pull(skb, sizeof(struct sctphdr)))
goto discard_it; goto discard_it;
/* Pull up the IP header. */ /* Pull up the IP header. */
@ -1177,9 +1183,6 @@ static struct sctp_association *__sctp_rcv_lookup_harder(struct net *net,
if ((skb_shinfo(skb)->gso_type & SKB_GSO_SCTP) == SKB_GSO_SCTP) if ((skb_shinfo(skb)->gso_type & SKB_GSO_SCTP) == SKB_GSO_SCTP)
return NULL; return NULL;
if (skb_linearize(skb))
return NULL;
ch = (sctp_chunkhdr_t *) skb->data; ch = (sctp_chunkhdr_t *) skb->data;
/* The code below will attempt to walk the chunk and extract /* The code below will attempt to walk the chunk and extract

View File

@ -170,19 +170,6 @@ next_chunk:
chunk = list_entry(entry, struct sctp_chunk, list); chunk = list_entry(entry, struct sctp_chunk, list);
/* Linearize if it's not GSO */
if ((skb_shinfo(chunk->skb)->gso_type & SKB_GSO_SCTP) != SKB_GSO_SCTP &&
skb_is_nonlinear(chunk->skb)) {
if (skb_linearize(chunk->skb)) {
__SCTP_INC_STATS(dev_net(chunk->skb->dev), SCTP_MIB_IN_PKT_DISCARDS);
sctp_chunk_free(chunk);
goto next_chunk;
}
/* Update sctp_hdr as it probably changed */
chunk->sctp_hdr = sctp_hdr(chunk->skb);
}
if ((skb_shinfo(chunk->skb)->gso_type & SKB_GSO_SCTP) == SKB_GSO_SCTP) { if ((skb_shinfo(chunk->skb)->gso_type & SKB_GSO_SCTP) == SKB_GSO_SCTP) {
/* GSO-marked skbs but without frags, handle /* GSO-marked skbs but without frags, handle
* them normally * them normally

View File

@ -424,11 +424,13 @@ static int sctp_diag_dump_one(struct sk_buff *in_skb,
paddr.v4.sin_family = AF_INET; paddr.v4.sin_family = AF_INET;
} else { } else {
laddr.v6.sin6_port = req->id.idiag_sport; laddr.v6.sin6_port = req->id.idiag_sport;
memcpy(&laddr.v6.sin6_addr, req->id.idiag_src, 64); memcpy(&laddr.v6.sin6_addr, req->id.idiag_src,
sizeof(laddr.v6.sin6_addr));
laddr.v6.sin6_family = AF_INET6; laddr.v6.sin6_family = AF_INET6;
paddr.v6.sin6_port = req->id.idiag_dport; paddr.v6.sin6_port = req->id.idiag_dport;
memcpy(&paddr.v6.sin6_addr, req->id.idiag_dst, 64); memcpy(&paddr.v6.sin6_addr, req->id.idiag_dst,
sizeof(paddr.v6.sin6_addr));
paddr.v6.sin6_family = AF_INET6; paddr.v6.sin6_family = AF_INET6;
} }

View File

@ -396,10 +396,13 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b,
tuncfg.encap_destroy = NULL; tuncfg.encap_destroy = NULL;
setup_udp_tunnel_sock(net, ub->ubsock, &tuncfg); setup_udp_tunnel_sock(net, ub->ubsock, &tuncfg);
if (enable_mcast(ub, remote)) err = enable_mcast(ub, remote);
if (err)
goto err; goto err;
return 0; return 0;
err: err:
if (ub->ubsock)
udp_tunnel_sock_release(ub->ubsock);
kfree(ub); kfree(ub);
return err; return err;
} }