Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Fix stack allocation in s390 BPF JIT, from Michael Holzheu. 2) Disable LRO on openvswitch paths, from Jiri Benc. 3) UDP early demux doesn't handle multicast group membership properly, fix from Shawn Bohrer. 4) Fix TX queue hang due to incorrect handling of mixed sized fragments and linearlization in i40e driver, from Anjali Singhai Jain. 5) Cannot use disable_irq() in timer handler of AMD xgbe driver, from Thomas Lendacky. 6) b2net driver improperly assumes pci_alloc_consistent() gives zero'd out memory, use dma_zalloc_coherent(). From Sriharsha Basavapatna. 7) Fix use-after-free in MPLS and ipv6, from Robert Shearman. 8) Missing neif_napi_del() calls in cleanup paths of b44 driver, from Hauke Mehrtens. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: net: replace last open coded skb_orphan_frags with function call net: bcmgenet: power on MII block for all MII modes ipv6: Fix protocol resubmission ipv6: fix possible use after free of dev stats b44: call netif_napi_del() bridge: disable softirqs around br_fdb_update to avoid lockup Revert "bridge: use _bh spinlock variant for br_fdb_update to avoid lockup" mpls: fix possible use after free of device be2net: Replace dma/pci_alloc_coherent() calls with dma_zalloc_coherent() bridge: use _bh spinlock variant for br_fdb_update to avoid lockup amd-xgbe: Use disable_irq_nosync from within timer function rhashtable: add missing import <linux/export.h> i40e: Make sure to be in VEB mode if SRIOV is enabled at probe i40e: start up in VEPA mode by default i40e/i40evf: Fix mixed size frags and linearization ipv4/udp: Verify multicast group is ours in upd_v4_early_demux() openvswitch: disable LRO s390/bpf: fix bpf frame pointer setup s390/bpf: fix stack allocation
This commit is contained in:
commit
5879ae5fd0
|
@ -48,7 +48,9 @@ extern u8 sk_load_word[], sk_load_half[], sk_load_byte[];
|
|||
* We get 160 bytes stack space from calling function, but only use
|
||||
* 11 * 8 byte (old backchain + r15 - r6) for storing registers.
|
||||
*/
|
||||
#define STK_OFF (MAX_BPF_STACK + 8 + 4 + 4 + (160 - 11 * 8))
|
||||
#define STK_SPACE (MAX_BPF_STACK + 8 + 4 + 4 + 160)
|
||||
#define STK_160_UNUSED (160 - 11 * 8)
|
||||
#define STK_OFF (STK_SPACE - STK_160_UNUSED)
|
||||
#define STK_OFF_TMP 160 /* Offset of tmp buffer on stack */
|
||||
#define STK_OFF_HLEN 168 /* Offset of SKB header length on stack */
|
||||
|
||||
|
|
|
@ -384,13 +384,16 @@ static void bpf_jit_prologue(struct bpf_jit *jit)
|
|||
}
|
||||
/* Setup stack and backchain */
|
||||
if (jit->seen & SEEN_STACK) {
|
||||
/* lgr %bfp,%r15 (BPF frame pointer) */
|
||||
EMIT4(0xb9040000, BPF_REG_FP, REG_15);
|
||||
if (jit->seen & SEEN_FUNC)
|
||||
/* lgr %w1,%r15 (backchain) */
|
||||
EMIT4(0xb9040000, REG_W1, REG_15);
|
||||
/* la %bfp,STK_160_UNUSED(%r15) (BPF frame pointer) */
|
||||
EMIT4_DISP(0x41000000, BPF_REG_FP, REG_15, STK_160_UNUSED);
|
||||
/* aghi %r15,-STK_OFF */
|
||||
EMIT4_IMM(0xa70b0000, REG_15, -STK_OFF);
|
||||
if (jit->seen & SEEN_FUNC)
|
||||
/* stg %bfp,152(%r15) (backchain) */
|
||||
EMIT6_DISP_LH(0xe3000000, 0x0024, BPF_REG_FP, REG_0,
|
||||
/* stg %w1,152(%r15) (backchain) */
|
||||
EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0,
|
||||
REG_15, 152);
|
||||
}
|
||||
/*
|
||||
|
|
|
@ -423,7 +423,7 @@ static void xgbe_tx_timer(unsigned long data)
|
|||
if (napi_schedule_prep(napi)) {
|
||||
/* Disable Tx and Rx interrupts */
|
||||
if (pdata->per_channel_irq)
|
||||
disable_irq(channel->dma_irq);
|
||||
disable_irq_nosync(channel->dma_irq);
|
||||
else
|
||||
xgbe_disable_rx_tx_ints(pdata);
|
||||
|
||||
|
|
|
@ -2464,6 +2464,7 @@ err_out_powerdown:
|
|||
ssb_bus_may_powerdown(sdev->bus);
|
||||
|
||||
err_out_free_dev:
|
||||
netif_napi_del(&bp->napi);
|
||||
free_netdev(dev);
|
||||
|
||||
out:
|
||||
|
@ -2480,6 +2481,7 @@ static void b44_remove_one(struct ssb_device *sdev)
|
|||
b44_unregister_phy_one(bp);
|
||||
ssb_device_disable(sdev, 0);
|
||||
ssb_bus_may_powerdown(sdev->bus);
|
||||
netif_napi_del(&bp->napi);
|
||||
free_netdev(dev);
|
||||
ssb_pcihost_set_power_state(sdev, PCI_D3hot);
|
||||
ssb_set_drvdata(sdev, NULL);
|
||||
|
|
|
@ -299,9 +299,6 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
|
|||
phy_name = "external RGMII (no delay)";
|
||||
else
|
||||
phy_name = "external RGMII (TX delay)";
|
||||
reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
|
||||
reg |= RGMII_MODE_EN | id_mode_dis;
|
||||
bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
|
||||
bcmgenet_sys_writel(priv,
|
||||
PORT_MODE_EXT_GPHY, SYS_PORT_CTRL);
|
||||
break;
|
||||
|
@ -310,6 +307,15 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* This is an external PHY (xMII), so we need to enable the RGMII
|
||||
* block for the interface to work
|
||||
*/
|
||||
if (priv->ext_phy) {
|
||||
reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
|
||||
reg |= RGMII_MODE_EN | id_mode_dis;
|
||||
bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
|
||||
}
|
||||
|
||||
if (init)
|
||||
dev_info(kdev, "configuring instance for %s\n", phy_name);
|
||||
|
||||
|
|
|
@ -1720,9 +1720,9 @@ int be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
|
|||
total_size = buf_len;
|
||||
|
||||
get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
|
||||
get_fat_cmd.va = pci_alloc_consistent(adapter->pdev,
|
||||
get_fat_cmd.size,
|
||||
&get_fat_cmd.dma);
|
||||
get_fat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
|
||||
get_fat_cmd.size,
|
||||
&get_fat_cmd.dma, GFP_ATOMIC);
|
||||
if (!get_fat_cmd.va) {
|
||||
dev_err(&adapter->pdev->dev,
|
||||
"Memory allocation failure while reading FAT data\n");
|
||||
|
@ -1767,8 +1767,8 @@ int be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
|
|||
log_offset += buf_size;
|
||||
}
|
||||
err:
|
||||
pci_free_consistent(adapter->pdev, get_fat_cmd.size,
|
||||
get_fat_cmd.va, get_fat_cmd.dma);
|
||||
dma_free_coherent(&adapter->pdev->dev, get_fat_cmd.size,
|
||||
get_fat_cmd.va, get_fat_cmd.dma);
|
||||
spin_unlock_bh(&adapter->mcc_lock);
|
||||
return status;
|
||||
}
|
||||
|
@ -2215,12 +2215,12 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
|
|||
return -EINVAL;
|
||||
|
||||
cmd.size = sizeof(struct be_cmd_resp_port_type);
|
||||
cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
|
||||
cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
|
||||
GFP_ATOMIC);
|
||||
if (!cmd.va) {
|
||||
dev_err(&adapter->pdev->dev, "Memory allocation failed\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
memset(cmd.va, 0, cmd.size);
|
||||
|
||||
spin_lock_bh(&adapter->mcc_lock);
|
||||
|
||||
|
@ -2245,7 +2245,7 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
|
|||
}
|
||||
err:
|
||||
spin_unlock_bh(&adapter->mcc_lock);
|
||||
pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
|
||||
dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -2720,7 +2720,8 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
|
|||
goto err;
|
||||
}
|
||||
cmd.size = sizeof(struct be_cmd_req_get_phy_info);
|
||||
cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
|
||||
cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
|
||||
GFP_ATOMIC);
|
||||
if (!cmd.va) {
|
||||
dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
|
||||
status = -ENOMEM;
|
||||
|
@ -2754,7 +2755,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
|
|||
BE_SUPPORTED_SPEED_1GBPS;
|
||||
}
|
||||
}
|
||||
pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
|
||||
dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
|
||||
err:
|
||||
spin_unlock_bh(&adapter->mcc_lock);
|
||||
return status;
|
||||
|
@ -2805,8 +2806,9 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
|
|||
|
||||
memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
|
||||
attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
|
||||
attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
|
||||
&attribs_cmd.dma);
|
||||
attribs_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
|
||||
attribs_cmd.size,
|
||||
&attribs_cmd.dma, GFP_ATOMIC);
|
||||
if (!attribs_cmd.va) {
|
||||
dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
|
||||
status = -ENOMEM;
|
||||
|
@ -2833,8 +2835,8 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
|
|||
err:
|
||||
mutex_unlock(&adapter->mbox_lock);
|
||||
if (attribs_cmd.va)
|
||||
pci_free_consistent(adapter->pdev, attribs_cmd.size,
|
||||
attribs_cmd.va, attribs_cmd.dma);
|
||||
dma_free_coherent(&adapter->pdev->dev, attribs_cmd.size,
|
||||
attribs_cmd.va, attribs_cmd.dma);
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -2972,9 +2974,10 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
|
|||
|
||||
memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
|
||||
get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
|
||||
get_mac_list_cmd.va = pci_alloc_consistent(adapter->pdev,
|
||||
get_mac_list_cmd.size,
|
||||
&get_mac_list_cmd.dma);
|
||||
get_mac_list_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
|
||||
get_mac_list_cmd.size,
|
||||
&get_mac_list_cmd.dma,
|
||||
GFP_ATOMIC);
|
||||
|
||||
if (!get_mac_list_cmd.va) {
|
||||
dev_err(&adapter->pdev->dev,
|
||||
|
@ -3047,8 +3050,8 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
|
|||
|
||||
out:
|
||||
spin_unlock_bh(&adapter->mcc_lock);
|
||||
pci_free_consistent(adapter->pdev, get_mac_list_cmd.size,
|
||||
get_mac_list_cmd.va, get_mac_list_cmd.dma);
|
||||
dma_free_coherent(&adapter->pdev->dev, get_mac_list_cmd.size,
|
||||
get_mac_list_cmd.va, get_mac_list_cmd.dma);
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -3101,8 +3104,8 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
|
|||
|
||||
memset(&cmd, 0, sizeof(struct be_dma_mem));
|
||||
cmd.size = sizeof(struct be_cmd_req_set_mac_list);
|
||||
cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size,
|
||||
&cmd.dma, GFP_KERNEL);
|
||||
cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
|
||||
GFP_KERNEL);
|
||||
if (!cmd.va)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -3291,7 +3294,8 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
|
|||
|
||||
memset(&cmd, 0, sizeof(struct be_dma_mem));
|
||||
cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
|
||||
cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
|
||||
cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
|
||||
GFP_ATOMIC);
|
||||
if (!cmd.va) {
|
||||
dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
|
||||
status = -ENOMEM;
|
||||
|
@ -3326,7 +3330,8 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
|
|||
err:
|
||||
mutex_unlock(&adapter->mbox_lock);
|
||||
if (cmd.va)
|
||||
pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
|
||||
dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
|
||||
cmd.dma);
|
||||
return status;
|
||||
|
||||
}
|
||||
|
@ -3340,8 +3345,9 @@ int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level)
|
|||
|
||||
memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
|
||||
extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
|
||||
extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
|
||||
&extfat_cmd.dma);
|
||||
extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
|
||||
extfat_cmd.size, &extfat_cmd.dma,
|
||||
GFP_ATOMIC);
|
||||
if (!extfat_cmd.va)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -3363,8 +3369,8 @@ int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level)
|
|||
|
||||
status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd, cfgs);
|
||||
err:
|
||||
pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
|
||||
extfat_cmd.dma);
|
||||
dma_free_coherent(&adapter->pdev->dev, extfat_cmd.size, extfat_cmd.va,
|
||||
extfat_cmd.dma);
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -3377,8 +3383,9 @@ int be_cmd_get_fw_log_level(struct be_adapter *adapter)
|
|||
|
||||
memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
|
||||
extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
|
||||
extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
|
||||
&extfat_cmd.dma);
|
||||
extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
|
||||
extfat_cmd.size, &extfat_cmd.dma,
|
||||
GFP_ATOMIC);
|
||||
|
||||
if (!extfat_cmd.va) {
|
||||
dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
|
||||
|
@ -3396,8 +3403,8 @@ int be_cmd_get_fw_log_level(struct be_adapter *adapter)
|
|||
level = cfgs->module[0].trace_lvl[j].dbg_lvl;
|
||||
}
|
||||
}
|
||||
pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
|
||||
extfat_cmd.dma);
|
||||
dma_free_coherent(&adapter->pdev->dev, extfat_cmd.size, extfat_cmd.va,
|
||||
extfat_cmd.dma);
|
||||
err:
|
||||
return level;
|
||||
}
|
||||
|
@ -3595,7 +3602,8 @@ int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
|
|||
|
||||
memset(&cmd, 0, sizeof(struct be_dma_mem));
|
||||
cmd.size = sizeof(struct be_cmd_resp_get_func_config);
|
||||
cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
|
||||
cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
|
||||
GFP_ATOMIC);
|
||||
if (!cmd.va) {
|
||||
dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
|
||||
status = -ENOMEM;
|
||||
|
@ -3635,7 +3643,8 @@ int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
|
|||
err:
|
||||
mutex_unlock(&adapter->mbox_lock);
|
||||
if (cmd.va)
|
||||
pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
|
||||
dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
|
||||
cmd.dma);
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -3656,7 +3665,8 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
|
|||
|
||||
memset(&cmd, 0, sizeof(struct be_dma_mem));
|
||||
cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
|
||||
cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
|
||||
cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
|
||||
GFP_ATOMIC);
|
||||
if (!cmd.va)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -3702,7 +3712,8 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
|
|||
res->vf_if_cap_flags = vf_res->cap_flags;
|
||||
err:
|
||||
if (cmd.va)
|
||||
pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
|
||||
dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
|
||||
cmd.dma);
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -3717,7 +3728,8 @@ static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
|
|||
|
||||
memset(&cmd, 0, sizeof(struct be_dma_mem));
|
||||
cmd.size = sizeof(struct be_cmd_req_set_profile_config);
|
||||
cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
|
||||
cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
|
||||
GFP_ATOMIC);
|
||||
if (!cmd.va)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -3733,7 +3745,8 @@ static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
|
|||
status = be_cmd_notify_wait(adapter, &wrb);
|
||||
|
||||
if (cmd.va)
|
||||
pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
|
||||
dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
|
||||
cmd.dma);
|
||||
return status;
|
||||
}
|
||||
|
||||
|
|
|
@ -264,8 +264,8 @@ static int lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
|
|||
int status = 0;
|
||||
|
||||
read_cmd.size = LANCER_READ_FILE_CHUNK;
|
||||
read_cmd.va = pci_alloc_consistent(adapter->pdev, read_cmd.size,
|
||||
&read_cmd.dma);
|
||||
read_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, read_cmd.size,
|
||||
&read_cmd.dma, GFP_ATOMIC);
|
||||
|
||||
if (!read_cmd.va) {
|
||||
dev_err(&adapter->pdev->dev,
|
||||
|
@ -289,8 +289,8 @@ static int lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
|
|||
break;
|
||||
}
|
||||
}
|
||||
pci_free_consistent(adapter->pdev, read_cmd.size, read_cmd.va,
|
||||
read_cmd.dma);
|
||||
dma_free_coherent(&adapter->pdev->dev, read_cmd.size, read_cmd.va,
|
||||
read_cmd.dma);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
@ -818,8 +818,9 @@ static int be_test_ddr_dma(struct be_adapter *adapter)
|
|||
};
|
||||
|
||||
ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test);
|
||||
ddrdma_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, ddrdma_cmd.size,
|
||||
&ddrdma_cmd.dma, GFP_KERNEL);
|
||||
ddrdma_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
|
||||
ddrdma_cmd.size, &ddrdma_cmd.dma,
|
||||
GFP_KERNEL);
|
||||
if (!ddrdma_cmd.va)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -941,8 +942,9 @@ static int be_read_eeprom(struct net_device *netdev,
|
|||
|
||||
memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem));
|
||||
eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read);
|
||||
eeprom_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, eeprom_cmd.size,
|
||||
&eeprom_cmd.dma, GFP_KERNEL);
|
||||
eeprom_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
|
||||
eeprom_cmd.size, &eeprom_cmd.dma,
|
||||
GFP_KERNEL);
|
||||
|
||||
if (!eeprom_cmd.va)
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -4605,8 +4605,8 @@ static int lancer_fw_download(struct be_adapter *adapter,
|
|||
|
||||
flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
|
||||
+ LANCER_FW_DOWNLOAD_CHUNK;
|
||||
flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size,
|
||||
&flash_cmd.dma, GFP_KERNEL);
|
||||
flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size,
|
||||
&flash_cmd.dma, GFP_KERNEL);
|
||||
if (!flash_cmd.va)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -4739,8 +4739,8 @@ static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
|
|||
}
|
||||
|
||||
flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
|
||||
flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
|
||||
GFP_KERNEL);
|
||||
flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
|
||||
GFP_KERNEL);
|
||||
if (!flash_cmd.va)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -5291,16 +5291,15 @@ static int be_drv_init(struct be_adapter *adapter)
|
|||
int status = 0;
|
||||
|
||||
mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
|
||||
mbox_mem_alloc->va = dma_alloc_coherent(dev, mbox_mem_alloc->size,
|
||||
&mbox_mem_alloc->dma,
|
||||
GFP_KERNEL);
|
||||
mbox_mem_alloc->va = dma_zalloc_coherent(dev, mbox_mem_alloc->size,
|
||||
&mbox_mem_alloc->dma,
|
||||
GFP_KERNEL);
|
||||
if (!mbox_mem_alloc->va)
|
||||
return -ENOMEM;
|
||||
|
||||
mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
|
||||
mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
|
||||
mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
|
||||
memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
|
||||
|
||||
rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
|
||||
rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size,
|
||||
|
|
|
@ -317,6 +317,7 @@ struct i40e_pf {
|
|||
#endif
|
||||
#define I40E_FLAG_PORT_ID_VALID (u64)(1 << 28)
|
||||
#define I40E_FLAG_DCB_CAPABLE (u64)(1 << 29)
|
||||
#define I40E_FLAG_VEB_MODE_ENABLED BIT_ULL(40)
|
||||
|
||||
/* tracks features that get auto disabled by errors */
|
||||
u64 auto_disable_flags;
|
||||
|
|
|
@ -1021,6 +1021,15 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
|
|||
goto command_write_done;
|
||||
}
|
||||
|
||||
/* By default we are in VEPA mode, if this is the first VF/VMDq
|
||||
* VSI to be added switch to VEB mode.
|
||||
*/
|
||||
if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
|
||||
pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
|
||||
i40e_do_reset_safe(pf,
|
||||
BIT_ULL(__I40E_PF_RESET_REQUESTED));
|
||||
}
|
||||
|
||||
vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, vsi_seid, 0);
|
||||
if (vsi)
|
||||
dev_info(&pf->pdev->dev, "added VSI %d to relay %d\n",
|
||||
|
|
|
@ -6097,6 +6097,10 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb)
|
|||
if (ret)
|
||||
goto end_reconstitute;
|
||||
|
||||
if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
|
||||
veb->bridge_mode = BRIDGE_MODE_VEB;
|
||||
else
|
||||
veb->bridge_mode = BRIDGE_MODE_VEPA;
|
||||
i40e_config_bridge_mode(veb);
|
||||
|
||||
/* create the remaining VSIs attached to this VEB */
|
||||
|
@ -8031,7 +8035,12 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev,
|
|||
} else if (mode != veb->bridge_mode) {
|
||||
/* Existing HW bridge but different mode needs reset */
|
||||
veb->bridge_mode = mode;
|
||||
i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
|
||||
/* TODO: If no VFs or VMDq VSIs, disallow VEB mode */
|
||||
if (mode == BRIDGE_MODE_VEB)
|
||||
pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
|
||||
else
|
||||
pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
|
||||
i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -8343,11 +8352,12 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
|
|||
ctxt.uplink_seid = vsi->uplink_seid;
|
||||
ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
|
||||
ctxt.flags = I40E_AQ_VSI_TYPE_PF;
|
||||
if (i40e_is_vsi_uplink_mode_veb(vsi)) {
|
||||
if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) &&
|
||||
(i40e_is_vsi_uplink_mode_veb(vsi))) {
|
||||
ctxt.info.valid_sections |=
|
||||
cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
|
||||
cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
|
||||
ctxt.info.switch_id =
|
||||
cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
|
||||
cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
|
||||
}
|
||||
i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
|
||||
break;
|
||||
|
@ -8746,6 +8756,14 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
|
|||
__func__);
|
||||
return NULL;
|
||||
}
|
||||
/* We come up by default in VEPA mode if SRIOV is not
|
||||
* already enabled, in which case we can't force VEPA
|
||||
* mode.
|
||||
*/
|
||||
if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
|
||||
veb->bridge_mode = BRIDGE_MODE_VEPA;
|
||||
pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
|
||||
}
|
||||
i40e_config_bridge_mode(veb);
|
||||
}
|
||||
for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
|
||||
|
@ -9856,6 +9874,15 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
goto err_switch_setup;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PCI_IOV
|
||||
/* prep for VF support */
|
||||
if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
|
||||
(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
|
||||
!test_bit(__I40E_BAD_EEPROM, &pf->state)) {
|
||||
if (pci_num_vf(pdev))
|
||||
pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
|
||||
}
|
||||
#endif
|
||||
err = i40e_setup_pf_switch(pf, false);
|
||||
if (err) {
|
||||
dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
|
||||
|
|
|
@ -2410,14 +2410,12 @@ static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
|
|||
* i40e_chk_linearize - Check if there are more than 8 fragments per packet
|
||||
* @skb: send buffer
|
||||
* @tx_flags: collected send information
|
||||
* @hdr_len: size of the packet header
|
||||
*
|
||||
* Note: Our HW can't scatter-gather more than 8 fragments to build
|
||||
* a packet on the wire and so we need to figure out the cases where we
|
||||
* need to linearize the skb.
|
||||
**/
|
||||
static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
|
||||
const u8 hdr_len)
|
||||
static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags)
|
||||
{
|
||||
struct skb_frag_struct *frag;
|
||||
bool linearize = false;
|
||||
|
@ -2429,7 +2427,7 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
|
|||
gso_segs = skb_shinfo(skb)->gso_segs;
|
||||
|
||||
if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
|
||||
u16 j = 1;
|
||||
u16 j = 0;
|
||||
|
||||
if (num_frags < (I40E_MAX_BUFFER_TXD))
|
||||
goto linearize_chk_done;
|
||||
|
@ -2440,21 +2438,18 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
|
|||
goto linearize_chk_done;
|
||||
}
|
||||
frag = &skb_shinfo(skb)->frags[0];
|
||||
size = hdr_len;
|
||||
/* we might still have more fragments per segment */
|
||||
do {
|
||||
size += skb_frag_size(frag);
|
||||
frag++; j++;
|
||||
if ((size >= skb_shinfo(skb)->gso_size) &&
|
||||
(j < I40E_MAX_BUFFER_TXD)) {
|
||||
size = (size % skb_shinfo(skb)->gso_size);
|
||||
j = (size) ? 1 : 0;
|
||||
}
|
||||
if (j == I40E_MAX_BUFFER_TXD) {
|
||||
if (size < skb_shinfo(skb)->gso_size) {
|
||||
linearize = true;
|
||||
break;
|
||||
}
|
||||
j = 1;
|
||||
size -= skb_shinfo(skb)->gso_size;
|
||||
if (size)
|
||||
j++;
|
||||
size += hdr_len;
|
||||
linearize = true;
|
||||
break;
|
||||
}
|
||||
num_frags--;
|
||||
} while (num_frags);
|
||||
|
@ -2724,7 +2719,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
|
|||
if (tsyn)
|
||||
tx_flags |= I40E_TX_FLAGS_TSYN;
|
||||
|
||||
if (i40e_chk_linearize(skb, tx_flags, hdr_len))
|
||||
if (i40e_chk_linearize(skb, tx_flags))
|
||||
if (skb_linearize(skb))
|
||||
goto out_drop;
|
||||
|
||||
|
|
|
@ -1018,11 +1018,19 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
|
|||
{
|
||||
struct i40e_pf *pf = pci_get_drvdata(pdev);
|
||||
|
||||
if (num_vfs)
|
||||
if (num_vfs) {
|
||||
if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
|
||||
pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
|
||||
i40e_do_reset_safe(pf,
|
||||
BIT_ULL(__I40E_PF_RESET_REQUESTED));
|
||||
}
|
||||
return i40e_pci_sriov_enable(pdev, num_vfs);
|
||||
}
|
||||
|
||||
if (!pci_vfs_assigned(pf->pdev)) {
|
||||
i40e_free_vfs(pf);
|
||||
pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
|
||||
i40e_do_reset_safe(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
|
||||
} else {
|
||||
dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
|
||||
return -EINVAL;
|
||||
|
|
|
@ -1619,14 +1619,12 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
|
|||
* i40e_chk_linearize - Check if there are more than 8 fragments per packet
|
||||
* @skb: send buffer
|
||||
* @tx_flags: collected send information
|
||||
* @hdr_len: size of the packet header
|
||||
*
|
||||
* Note: Our HW can't scatter-gather more than 8 fragments to build
|
||||
* a packet on the wire and so we need to figure out the cases where we
|
||||
* need to linearize the skb.
|
||||
**/
|
||||
static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
|
||||
const u8 hdr_len)
|
||||
static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags)
|
||||
{
|
||||
struct skb_frag_struct *frag;
|
||||
bool linearize = false;
|
||||
|
@ -1638,7 +1636,7 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
|
|||
gso_segs = skb_shinfo(skb)->gso_segs;
|
||||
|
||||
if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
|
||||
u16 j = 1;
|
||||
u16 j = 0;
|
||||
|
||||
if (num_frags < (I40E_MAX_BUFFER_TXD))
|
||||
goto linearize_chk_done;
|
||||
|
@ -1649,21 +1647,18 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
|
|||
goto linearize_chk_done;
|
||||
}
|
||||
frag = &skb_shinfo(skb)->frags[0];
|
||||
size = hdr_len;
|
||||
/* we might still have more fragments per segment */
|
||||
do {
|
||||
size += skb_frag_size(frag);
|
||||
frag++; j++;
|
||||
if ((size >= skb_shinfo(skb)->gso_size) &&
|
||||
(j < I40E_MAX_BUFFER_TXD)) {
|
||||
size = (size % skb_shinfo(skb)->gso_size);
|
||||
j = (size) ? 1 : 0;
|
||||
}
|
||||
if (j == I40E_MAX_BUFFER_TXD) {
|
||||
if (size < skb_shinfo(skb)->gso_size) {
|
||||
linearize = true;
|
||||
break;
|
||||
}
|
||||
j = 1;
|
||||
size -= skb_shinfo(skb)->gso_size;
|
||||
if (size)
|
||||
j++;
|
||||
size += hdr_len;
|
||||
linearize = true;
|
||||
break;
|
||||
}
|
||||
num_frags--;
|
||||
} while (num_frags);
|
||||
|
@ -1950,7 +1945,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
|
|||
else if (tso)
|
||||
tx_flags |= I40E_TX_FLAGS_TSO;
|
||||
|
||||
if (i40e_chk_linearize(skb, tx_flags, hdr_len))
|
||||
if (i40e_chk_linearize(skb, tx_flags))
|
||||
if (skb_linearize(skb))
|
||||
goto out_drop;
|
||||
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
#include <linux/random.h>
|
||||
#include <linux/rhashtable.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/export.h>
|
||||
|
||||
#define HASH_DEFAULT_SIZE 64UL
|
||||
#define HASH_MIN_SIZE 4U
|
||||
|
|
|
@ -796,9 +796,11 @@ static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge_port *p,
|
|||
int err = 0;
|
||||
|
||||
if (ndm->ndm_flags & NTF_USE) {
|
||||
local_bh_disable();
|
||||
rcu_read_lock();
|
||||
br_fdb_update(p->br, p, addr, vid, true);
|
||||
rcu_read_unlock();
|
||||
local_bh_enable();
|
||||
} else {
|
||||
spin_lock_bh(&p->br->hash_lock);
|
||||
err = fdb_add_entry(p, addr, ndm->ndm_state,
|
||||
|
|
|
@ -1718,15 +1718,8 @@ EXPORT_SYMBOL_GPL(is_skb_forwardable);
|
|||
|
||||
int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
|
||||
{
|
||||
if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
|
||||
if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
|
||||
atomic_long_inc(&dev->rx_dropped);
|
||||
kfree_skb(skb);
|
||||
return NET_RX_DROP;
|
||||
}
|
||||
}
|
||||
|
||||
if (unlikely(!is_skb_forwardable(dev, skb))) {
|
||||
if (skb_orphan_frags(skb, GFP_ATOMIC) ||
|
||||
unlikely(!is_skb_forwardable(dev, skb))) {
|
||||
atomic_long_inc(&dev->rx_dropped);
|
||||
kfree_skb(skb);
|
||||
return NET_RX_DROP;
|
||||
|
|
|
@ -90,6 +90,7 @@
|
|||
#include <linux/socket.h>
|
||||
#include <linux/sockios.h>
|
||||
#include <linux/igmp.h>
|
||||
#include <linux/inetdevice.h>
|
||||
#include <linux/in.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/timer.h>
|
||||
|
@ -1960,6 +1961,7 @@ void udp_v4_early_demux(struct sk_buff *skb)
|
|||
struct sock *sk;
|
||||
struct dst_entry *dst;
|
||||
int dif = skb->dev->ifindex;
|
||||
int ours;
|
||||
|
||||
/* validate the packet */
|
||||
if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct udphdr)))
|
||||
|
@ -1969,14 +1971,24 @@ void udp_v4_early_demux(struct sk_buff *skb)
|
|||
uh = udp_hdr(skb);
|
||||
|
||||
if (skb->pkt_type == PACKET_BROADCAST ||
|
||||
skb->pkt_type == PACKET_MULTICAST)
|
||||
skb->pkt_type == PACKET_MULTICAST) {
|
||||
struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
|
||||
|
||||
if (!in_dev)
|
||||
return;
|
||||
|
||||
ours = ip_check_mc_rcu(in_dev, iph->daddr, iph->saddr,
|
||||
iph->protocol);
|
||||
if (!ours)
|
||||
return;
|
||||
sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr,
|
||||
uh->source, iph->saddr, dif);
|
||||
else if (skb->pkt_type == PACKET_HOST)
|
||||
} else if (skb->pkt_type == PACKET_HOST) {
|
||||
sk = __udp4_lib_demux_lookup(net, uh->dest, iph->daddr,
|
||||
uh->source, iph->saddr, dif);
|
||||
else
|
||||
} else {
|
||||
return;
|
||||
}
|
||||
|
||||
if (!sk)
|
||||
return;
|
||||
|
|
|
@ -133,6 +133,14 @@ static void snmp6_free_dev(struct inet6_dev *idev)
|
|||
free_percpu(idev->stats.ipv6);
|
||||
}
|
||||
|
||||
static void in6_dev_finish_destroy_rcu(struct rcu_head *head)
|
||||
{
|
||||
struct inet6_dev *idev = container_of(head, struct inet6_dev, rcu);
|
||||
|
||||
snmp6_free_dev(idev);
|
||||
kfree(idev);
|
||||
}
|
||||
|
||||
/* Nobody refers to this device, we may destroy it. */
|
||||
|
||||
void in6_dev_finish_destroy(struct inet6_dev *idev)
|
||||
|
@ -151,7 +159,6 @@ void in6_dev_finish_destroy(struct inet6_dev *idev)
|
|||
pr_warn("Freeing alive inet6 device %p\n", idev);
|
||||
return;
|
||||
}
|
||||
snmp6_free_dev(idev);
|
||||
kfree_rcu(idev, rcu);
|
||||
call_rcu(&idev->rcu, in6_dev_finish_destroy_rcu);
|
||||
}
|
||||
EXPORT_SYMBOL(in6_dev_finish_destroy);
|
||||
|
|
|
@ -212,13 +212,13 @@ static int ip6_input_finish(struct sock *sk, struct sk_buff *skb)
|
|||
*/
|
||||
|
||||
rcu_read_lock();
|
||||
resubmit:
|
||||
idev = ip6_dst_idev(skb_dst(skb));
|
||||
if (!pskb_pull(skb, skb_transport_offset(skb)))
|
||||
goto discard;
|
||||
nhoff = IP6CB(skb)->nhoff;
|
||||
nexthdr = skb_network_header(skb)[nhoff];
|
||||
|
||||
resubmit:
|
||||
raw = raw6_local_deliver(skb, nexthdr);
|
||||
ipprot = rcu_dereference(inet6_protos[nexthdr]);
|
||||
if (ipprot) {
|
||||
|
@ -246,10 +246,12 @@ resubmit:
|
|||
goto discard;
|
||||
|
||||
ret = ipprot->handler(skb);
|
||||
if (ret > 0)
|
||||
if (ret < 0) {
|
||||
nexthdr = -ret;
|
||||
goto resubmit;
|
||||
else if (ret == 0)
|
||||
} else if (ret == 0) {
|
||||
IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDELIVERS);
|
||||
}
|
||||
} else {
|
||||
if (!raw) {
|
||||
if (xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
|
||||
|
|
|
@ -541,7 +541,7 @@ static void mpls_ifdown(struct net_device *dev)
|
|||
|
||||
RCU_INIT_POINTER(dev->mpls_ptr, NULL);
|
||||
|
||||
kfree(mdev);
|
||||
kfree_rcu(mdev, rcu);
|
||||
}
|
||||
|
||||
static int mpls_dev_notify(struct notifier_block *this, unsigned long event,
|
||||
|
|
|
@ -16,6 +16,7 @@ struct mpls_dev {
|
|||
int input_enabled;
|
||||
|
||||
struct ctl_table_header *sysctl;
|
||||
struct rcu_head rcu;
|
||||
};
|
||||
|
||||
struct sk_buff;
|
||||
|
|
|
@ -125,6 +125,7 @@ static struct vport *netdev_create(const struct vport_parms *parms)
|
|||
if (err)
|
||||
goto error_master_upper_dev_unlink;
|
||||
|
||||
dev_disable_lro(netdev_vport->dev);
|
||||
dev_set_promiscuity(netdev_vport->dev, 1);
|
||||
netdev_vport->dev->priv_flags |= IFF_OVS_DATAPATH;
|
||||
rtnl_unlock();
|
||||
|
|
Loading…
Reference in New Issue