Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) GTP fixes from Andreas Schultz (missing genl module alias, clear IP DF on transmit). 2) Netfilter needs to reflect the fwmark when sending resets, from Pau Espin Pedrol. 3) nftable dump OOPS fix from Liping Zhang. 4) Fix erroneous setting of VIRTIO_NET_HDR_F_DATA_VALID on transmit, from Rolf Neugebauer. 5) Fix build error of ipt_CLUSTERIP when procfs is disabled, from Arnd Bergmann. 6) Fix regression in handling of NETIF_F_SG in harmonize_features(), from Eric Dumazet. 7) Fix RTNL deadlock wrt. lwtunnel module loading, from David Ahern. 8) tcp_fastopen_create_child() needs to setup tp->max_window, from Alexey Kodanev. 9) Missing kmemdup() failure check in ipv6 segment routing code, from Eric Dumazet. 10) Don't execute unix_bind() under the bindlock, otherwise we deadlock with splice. From WANG Cong. 11) ip6_tnl_parse_tlv_enc_lim() potentially reallocates the skb buffer, therefore callers must reload cached header pointers into that skb. Fix from Eric Dumazet. 12) Fix various bugs in legacy IRQ fallback handling in alx driver, from Tobias Regnery. 13) Do not allow lwtunnel drivers to be unloaded while they are referenced by active instances, from Robert Shearman. 14) Fix truncated PHY LED trigger names, from Geert Uytterhoeven. 15) Fix a few regressions from virtio_net XDP support, from John Fastabend and Jakub Kicinski. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (102 commits) ISDN: eicon: silence misleading array-bounds warning net: phy: micrel: add support for KSZ8795 gtp: fix cross netns recv on gtp socket gtp: clear DF bit on GTP packet tx gtp: add genl family modules alias tcp: don't annotate mark on control socket from tcp_v6_send_response() ravb: unmap descriptors when freeing rings virtio_net: reject XDP programs using header adjustment virtio_net: use dev_kfree_skb for small buffer XDP receive r8152: check rx after napi is enabled r8152: re-schedule napi for tx r8152: avoid start_xmit to schedule napi when napi is disabled r8152: avoid start_xmit to call napi_schedule during autosuspend net: dsa: Bring back device detaching in dsa_slave_suspend() net: phy: leds: Fix truncated LED trigger names net: phy: leds: Break dependency of phy.h on phy_led_triggers.h net: phy: leds: Clear phy_num_led_triggers on failure to avoid crash net-next: ethernet: mediatek: change the compatible string Documentation: devicetree: change the mediatek ethernet compatible string bnxt_en: Fix RTNL lock usage on bnxt_get_port_module_status(). ...
This commit is contained in:
commit
1b1bc42c16
|
@ -7,7 +7,7 @@ have dual GMAC each represented by a child node..
|
|||
* Ethernet controller node
|
||||
|
||||
Required properties:
|
||||
- compatible: Should be "mediatek,mt7623-eth"
|
||||
- compatible: Should be "mediatek,mt2701-eth"
|
||||
- reg: Address and length of the register set for the device
|
||||
- interrupts: Should contain the three frame engines interrupts in numeric
|
||||
order. These are fe_int0, fe_int1 and fe_int2.
|
||||
|
|
|
@ -19,8 +19,9 @@ Optional Properties:
|
|||
specifications. If neither of these are specified, the default is to
|
||||
assume clause 22.
|
||||
|
||||
If the phy's identifier is known then the list may contain an entry
|
||||
of the form: "ethernet-phy-idAAAA.BBBB" where
|
||||
If the PHY reports an incorrect ID (or none at all) then the
|
||||
"compatible" list may contain an entry with the correct PHY ID in the
|
||||
form: "ethernet-phy-idAAAA.BBBB" where
|
||||
AAAA - The value of the 16 bit Phy Identifier 1 register as
|
||||
4 hex digits. This is the chip vendor OUI bits 3:18
|
||||
BBBB - The value of the 16 bit Phy Identifier 2 register as
|
||||
|
|
|
@ -3567,7 +3567,7 @@ F: drivers/infiniband/hw/cxgb3/
|
|||
F: include/uapi/rdma/cxgb3-abi.h
|
||||
|
||||
CXGB4 ETHERNET DRIVER (CXGB4)
|
||||
M: Hariprasad S <hariprasad@chelsio.com>
|
||||
M: Ganesh Goudar <ganeshgr@chelsio.com>
|
||||
L: netdev@vger.kernel.org
|
||||
W: http://www.chelsio.com
|
||||
S: Supported
|
||||
|
|
|
@ -11297,7 +11297,8 @@ static void mixer_notify_update(PLCI *plci, byte others)
|
|||
((CAPI_MSG *) msg)->header.ncci = 0;
|
||||
((CAPI_MSG *) msg)->info.facility_req.Selector = SELECTOR_LINE_INTERCONNECT;
|
||||
((CAPI_MSG *) msg)->info.facility_req.structs[0] = 3;
|
||||
PUT_WORD(&(((CAPI_MSG *) msg)->info.facility_req.structs[1]), LI_REQ_SILENT_UPDATE);
|
||||
((CAPI_MSG *) msg)->info.facility_req.structs[1] = LI_REQ_SILENT_UPDATE & 0xff;
|
||||
((CAPI_MSG *) msg)->info.facility_req.structs[2] = LI_REQ_SILENT_UPDATE >> 8;
|
||||
((CAPI_MSG *) msg)->info.facility_req.structs[3] = 0;
|
||||
w = api_put(notify_plci->appl, (CAPI_MSG *) msg);
|
||||
if (w != _QUEUE_FULL)
|
||||
|
|
|
@ -161,6 +161,7 @@ static int c_can_pci_probe(struct pci_dev *pdev,
|
|||
|
||||
dev->irq = pdev->irq;
|
||||
priv->base = addr;
|
||||
priv->device = &pdev->dev;
|
||||
|
||||
if (!c_can_pci_data->freq) {
|
||||
dev_err(&pdev->dev, "no clock frequency defined\n");
|
||||
|
|
|
@ -948,7 +948,12 @@ static int ti_hecc_probe(struct platform_device *pdev)
|
|||
netif_napi_add(ndev, &priv->napi, ti_hecc_rx_poll,
|
||||
HECC_DEF_NAPI_WEIGHT);
|
||||
|
||||
clk_enable(priv->clk);
|
||||
err = clk_prepare_enable(priv->clk);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "clk_prepare_enable() failed\n");
|
||||
goto probe_exit_clk;
|
||||
}
|
||||
|
||||
err = register_candev(ndev);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "register_candev() failed\n");
|
||||
|
@ -981,7 +986,7 @@ static int ti_hecc_remove(struct platform_device *pdev)
|
|||
struct ti_hecc_priv *priv = netdev_priv(ndev);
|
||||
|
||||
unregister_candev(ndev);
|
||||
clk_disable(priv->clk);
|
||||
clk_disable_unprepare(priv->clk);
|
||||
clk_put(priv->clk);
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
iounmap(priv->base);
|
||||
|
@ -1006,7 +1011,7 @@ static int ti_hecc_suspend(struct platform_device *pdev, pm_message_t state)
|
|||
hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_PDR);
|
||||
priv->can.state = CAN_STATE_SLEEPING;
|
||||
|
||||
clk_disable(priv->clk);
|
||||
clk_disable_unprepare(priv->clk);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1015,8 +1020,11 @@ static int ti_hecc_resume(struct platform_device *pdev)
|
|||
{
|
||||
struct net_device *dev = platform_get_drvdata(pdev);
|
||||
struct ti_hecc_priv *priv = netdev_priv(dev);
|
||||
int err;
|
||||
|
||||
clk_enable(priv->clk);
|
||||
err = clk_prepare_enable(priv->clk);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_PDR);
|
||||
priv->can.state = CAN_STATE_ERROR_ACTIVE;
|
||||
|
|
|
@ -891,6 +891,8 @@
|
|||
#define PCS_V1_WINDOW_SELECT 0x03fc
|
||||
#define PCS_V2_WINDOW_DEF 0x9060
|
||||
#define PCS_V2_WINDOW_SELECT 0x9064
|
||||
#define PCS_V2_RV_WINDOW_DEF 0x1060
|
||||
#define PCS_V2_RV_WINDOW_SELECT 0x1064
|
||||
|
||||
/* PCS register entry bit positions and sizes */
|
||||
#define PCS_V2_WINDOW_DEF_OFFSET_INDEX 6
|
||||
|
|
|
@ -1151,7 +1151,7 @@ static int xgbe_read_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
|
|||
offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
|
||||
|
||||
spin_lock_irqsave(&pdata->xpcs_lock, flags);
|
||||
XPCS32_IOWRITE(pdata, PCS_V2_WINDOW_SELECT, index);
|
||||
XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
|
||||
mmd_data = XPCS16_IOREAD(pdata, offset);
|
||||
spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
|
||||
|
||||
|
@ -1183,7 +1183,7 @@ static void xgbe_write_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
|
|||
offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
|
||||
|
||||
spin_lock_irqsave(&pdata->xpcs_lock, flags);
|
||||
XPCS32_IOWRITE(pdata, PCS_V2_WINDOW_SELECT, index);
|
||||
XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
|
||||
XPCS16_IOWRITE(pdata, offset, mmd_data);
|
||||
spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
|
||||
}
|
||||
|
@ -3407,8 +3407,10 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
|
|||
|
||||
/* Flush Tx queues */
|
||||
ret = xgbe_flush_tx_queues(pdata);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
netdev_err(pdata->netdev, "error flushing TX queues\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize DMA related features
|
||||
|
|
|
@ -1070,7 +1070,9 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
|
|||
|
||||
DBGPR("-->xgbe_start\n");
|
||||
|
||||
hw_if->init(pdata);
|
||||
ret = hw_if->init(pdata);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
xgbe_napi_enable(pdata, 1);
|
||||
|
||||
|
|
|
@ -265,6 +265,7 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
struct xgbe_prv_data *pdata;
|
||||
struct device *dev = &pdev->dev;
|
||||
void __iomem * const *iomap_table;
|
||||
struct pci_dev *rdev;
|
||||
unsigned int ma_lo, ma_hi;
|
||||
unsigned int reg;
|
||||
int bar_mask;
|
||||
|
@ -326,8 +327,20 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
if (netif_msg_probe(pdata))
|
||||
dev_dbg(dev, "xpcs_regs = %p\n", pdata->xpcs_regs);
|
||||
|
||||
/* Set the PCS indirect addressing definition registers */
|
||||
rdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
|
||||
if (rdev &&
|
||||
(rdev->vendor == PCI_VENDOR_ID_AMD) && (rdev->device == 0x15d0)) {
|
||||
pdata->xpcs_window_def_reg = PCS_V2_RV_WINDOW_DEF;
|
||||
pdata->xpcs_window_sel_reg = PCS_V2_RV_WINDOW_SELECT;
|
||||
} else {
|
||||
pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF;
|
||||
pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT;
|
||||
}
|
||||
pci_dev_put(rdev);
|
||||
|
||||
/* Configure the PCS indirect addressing support */
|
||||
reg = XPCS32_IOREAD(pdata, PCS_V2_WINDOW_DEF);
|
||||
reg = XPCS32_IOREAD(pdata, pdata->xpcs_window_def_reg);
|
||||
pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET);
|
||||
pdata->xpcs_window <<= 6;
|
||||
pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE);
|
||||
|
|
|
@ -955,6 +955,8 @@ struct xgbe_prv_data {
|
|||
|
||||
/* XPCS indirect addressing lock */
|
||||
spinlock_t xpcs_lock;
|
||||
unsigned int xpcs_window_def_reg;
|
||||
unsigned int xpcs_window_sel_reg;
|
||||
unsigned int xpcs_window;
|
||||
unsigned int xpcs_window_size;
|
||||
unsigned int xpcs_window_mask;
|
||||
|
|
|
@ -685,8 +685,6 @@ static int alx_alloc_rings(struct alx_priv *alx)
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
alx_reinit_rings(alx);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -703,7 +701,7 @@ static void alx_free_rings(struct alx_priv *alx)
|
|||
if (alx->qnapi[0] && alx->qnapi[0]->rxq)
|
||||
kfree(alx->qnapi[0]->rxq->bufs);
|
||||
|
||||
if (!alx->descmem.virt)
|
||||
if (alx->descmem.virt)
|
||||
dma_free_coherent(&alx->hw.pdev->dev,
|
||||
alx->descmem.size,
|
||||
alx->descmem.virt,
|
||||
|
@ -984,6 +982,7 @@ static int alx_realloc_resources(struct alx_priv *alx)
|
|||
alx_free_rings(alx);
|
||||
alx_free_napis(alx);
|
||||
alx_disable_advanced_intr(alx);
|
||||
alx_init_intr(alx, false);
|
||||
|
||||
err = alx_alloc_napis(alx);
|
||||
if (err)
|
||||
|
@ -1241,6 +1240,12 @@ static int __alx_open(struct alx_priv *alx, bool resume)
|
|||
if (err)
|
||||
goto out_free_rings;
|
||||
|
||||
/* must be called after alx_request_irq because the chip stops working
|
||||
* if we copy the dma addresses in alx_init_ring_ptrs twice when
|
||||
* requesting msi-x interrupts failed
|
||||
*/
|
||||
alx_reinit_rings(alx);
|
||||
|
||||
netif_set_real_num_tx_queues(alx->dev, alx->num_txq);
|
||||
netif_set_real_num_rx_queues(alx->dev, alx->num_rxq);
|
||||
|
||||
|
|
|
@ -913,6 +913,8 @@ static int bcm_enet_open(struct net_device *dev)
|
|||
priv->old_link = 0;
|
||||
priv->old_duplex = -1;
|
||||
priv->old_pause = -1;
|
||||
} else {
|
||||
phydev = NULL;
|
||||
}
|
||||
|
||||
/* mask all interrupts and request them */
|
||||
|
@ -1083,7 +1085,7 @@ static int bcm_enet_open(struct net_device *dev)
|
|||
enet_dmac_writel(priv, priv->dma_chan_int_mask,
|
||||
ENETDMAC_IRMASK, priv->tx_chan);
|
||||
|
||||
if (priv->has_phy)
|
||||
if (phydev)
|
||||
phy_start(phydev);
|
||||
else
|
||||
bcm_enet_adjust_link(dev);
|
||||
|
@ -1126,7 +1128,7 @@ out_freeirq:
|
|||
free_irq(dev->irq, dev);
|
||||
|
||||
out_phy_disconnect:
|
||||
if (priv->has_phy)
|
||||
if (phydev)
|
||||
phy_disconnect(phydev);
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -1099,7 +1099,7 @@ static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
|
|||
{
|
||||
#ifdef CONFIG_INET
|
||||
struct tcphdr *th;
|
||||
int len, nw_off, tcp_opt_len;
|
||||
int len, nw_off, tcp_opt_len = 0;
|
||||
|
||||
if (tcp_ts)
|
||||
tcp_opt_len = 12;
|
||||
|
@ -5314,17 +5314,12 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
|
|||
if ((link_info->support_auto_speeds | diff) !=
|
||||
link_info->support_auto_speeds) {
|
||||
/* An advertised speed is no longer supported, so we need to
|
||||
* update the advertisement settings. See bnxt_reset() for
|
||||
* comments about the rtnl_lock() sequence below.
|
||||
* update the advertisement settings. Caller holds RTNL
|
||||
* so we can modify link settings.
|
||||
*/
|
||||
clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
|
||||
rtnl_lock();
|
||||
link_info->advertising = link_info->support_auto_speeds;
|
||||
if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
|
||||
(link_info->autoneg & BNXT_AUTONEG_SPEED))
|
||||
if (link_info->autoneg & BNXT_AUTONEG_SPEED)
|
||||
bnxt_hwrm_set_link_setting(bp, true, false);
|
||||
set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
|
||||
rtnl_unlock();
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -6200,29 +6195,37 @@ bnxt_restart_timer:
|
|||
mod_timer(&bp->timer, jiffies + bp->current_interval);
|
||||
}
|
||||
|
||||
/* Only called from bnxt_sp_task() */
|
||||
static void bnxt_reset(struct bnxt *bp, bool silent)
|
||||
static void bnxt_rtnl_lock_sp(struct bnxt *bp)
|
||||
{
|
||||
/* bnxt_reset_task() calls bnxt_close_nic() which waits
|
||||
* for BNXT_STATE_IN_SP_TASK to clear.
|
||||
* If there is a parallel dev_close(), bnxt_close() may be holding
|
||||
/* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
|
||||
* set. If the device is being closed, bnxt_close() may be holding
|
||||
* rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we
|
||||
* must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
|
||||
*/
|
||||
clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
|
||||
rtnl_lock();
|
||||
if (test_bit(BNXT_STATE_OPEN, &bp->state))
|
||||
bnxt_reset_task(bp, silent);
|
||||
}
|
||||
|
||||
static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
|
||||
{
|
||||
set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
/* Only called from bnxt_sp_task() */
|
||||
static void bnxt_reset(struct bnxt *bp, bool silent)
|
||||
{
|
||||
bnxt_rtnl_lock_sp(bp);
|
||||
if (test_bit(BNXT_STATE_OPEN, &bp->state))
|
||||
bnxt_reset_task(bp, silent);
|
||||
bnxt_rtnl_unlock_sp(bp);
|
||||
}
|
||||
|
||||
static void bnxt_cfg_ntp_filters(struct bnxt *);
|
||||
|
||||
static void bnxt_sp_task(struct work_struct *work)
|
||||
{
|
||||
struct bnxt *bp = container_of(work, struct bnxt, sp_task);
|
||||
int rc;
|
||||
|
||||
set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
|
||||
smp_mb__after_atomic();
|
||||
|
@ -6236,16 +6239,6 @@ static void bnxt_sp_task(struct work_struct *work)
|
|||
|
||||
if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
|
||||
bnxt_cfg_ntp_filters(bp);
|
||||
if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
|
||||
if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
|
||||
&bp->sp_event))
|
||||
bnxt_hwrm_phy_qcaps(bp);
|
||||
|
||||
rc = bnxt_update_link(bp, true);
|
||||
if (rc)
|
||||
netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
|
||||
rc);
|
||||
}
|
||||
if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
|
||||
bnxt_hwrm_exec_fwd_req(bp);
|
||||
if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) {
|
||||
|
@ -6266,18 +6259,39 @@ static void bnxt_sp_task(struct work_struct *work)
|
|||
bnxt_hwrm_tunnel_dst_port_free(
|
||||
bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
|
||||
}
|
||||
if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event))
|
||||
bnxt_hwrm_port_qstats(bp);
|
||||
|
||||
/* These functions below will clear BNXT_STATE_IN_SP_TASK. They
|
||||
* must be the last functions to be called before exiting.
|
||||
*/
|
||||
if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
|
||||
int rc = 0;
|
||||
|
||||
if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
|
||||
&bp->sp_event))
|
||||
bnxt_hwrm_phy_qcaps(bp);
|
||||
|
||||
bnxt_rtnl_lock_sp(bp);
|
||||
if (test_bit(BNXT_STATE_OPEN, &bp->state))
|
||||
rc = bnxt_update_link(bp, true);
|
||||
bnxt_rtnl_unlock_sp(bp);
|
||||
if (rc)
|
||||
netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
|
||||
rc);
|
||||
}
|
||||
if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
|
||||
bnxt_rtnl_lock_sp(bp);
|
||||
if (test_bit(BNXT_STATE_OPEN, &bp->state))
|
||||
bnxt_get_port_module_status(bp);
|
||||
bnxt_rtnl_unlock_sp(bp);
|
||||
}
|
||||
if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
|
||||
bnxt_reset(bp, false);
|
||||
|
||||
if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
|
||||
bnxt_reset(bp, true);
|
||||
|
||||
if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event))
|
||||
bnxt_get_port_module_status(bp);
|
||||
|
||||
if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event))
|
||||
bnxt_hwrm_port_qstats(bp);
|
||||
|
||||
smp_mb__before_atomic();
|
||||
clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
|
||||
}
|
||||
|
|
|
@ -2948,7 +2948,7 @@ static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
|
|||
}
|
||||
|
||||
/* try reuse page */
|
||||
if (unlikely(page_count(page) != 1))
|
||||
if (unlikely(page_count(page) != 1 || page_is_pfmemalloc(page)))
|
||||
return false;
|
||||
|
||||
/* change offset to the other half */
|
||||
|
|
|
@ -1601,8 +1601,11 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
|
|||
netdev->netdev_ops = &ibmveth_netdev_ops;
|
||||
netdev->ethtool_ops = &netdev_ethtool_ops;
|
||||
SET_NETDEV_DEV(netdev, &dev->dev);
|
||||
netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
|
||||
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
|
||||
netdev->hw_features = NETIF_F_SG;
|
||||
if (vio_get_attribute(dev, "ibm,illan-options", NULL) != NULL) {
|
||||
netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
|
||||
NETIF_F_RXCSUM;
|
||||
}
|
||||
|
||||
netdev->features |= netdev->hw_features;
|
||||
|
||||
|
|
|
@ -2517,7 +2517,7 @@ static int mtk_remove(struct platform_device *pdev)
|
|||
}
|
||||
|
||||
const struct of_device_id of_mtk_match[] = {
|
||||
{ .compatible = "mediatek,mt7623-eth" },
|
||||
{ .compatible = "mediatek,mt2701-eth" },
|
||||
{},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, of_mtk_match);
|
||||
|
|
|
@ -1732,8 +1732,6 @@ static void mlx4_en_get_channels(struct net_device *dev,
|
|||
{
|
||||
struct mlx4_en_priv *priv = netdev_priv(dev);
|
||||
|
||||
memset(channel, 0, sizeof(*channel));
|
||||
|
||||
channel->max_rx = MAX_RX_RINGS;
|
||||
channel->max_tx = MLX4_EN_MAX_TX_RING_P_UP;
|
||||
|
||||
|
@ -1752,10 +1750,7 @@ static int mlx4_en_set_channels(struct net_device *dev,
|
|||
int xdp_count;
|
||||
int err = 0;
|
||||
|
||||
if (channel->other_count || channel->combined_count ||
|
||||
channel->tx_count > MLX4_EN_MAX_TX_RING_P_UP ||
|
||||
channel->rx_count > MAX_RX_RINGS ||
|
||||
!channel->tx_count || !channel->rx_count)
|
||||
if (!channel->tx_count || !channel->rx_count)
|
||||
return -EINVAL;
|
||||
|
||||
tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
|
||||
|
|
|
@ -543,7 +543,6 @@ static int mlx5e_set_channels(struct net_device *dev,
|
|||
struct ethtool_channels *ch)
|
||||
{
|
||||
struct mlx5e_priv *priv = netdev_priv(dev);
|
||||
int ncv = mlx5e_get_max_num_channels(priv->mdev);
|
||||
unsigned int count = ch->combined_count;
|
||||
bool arfs_enabled;
|
||||
bool was_opened;
|
||||
|
@ -554,16 +553,6 @@ static int mlx5e_set_channels(struct net_device *dev,
|
|||
__func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (ch->rx_count || ch->tx_count) {
|
||||
netdev_info(dev, "%s: separate rx/tx count not supported\n",
|
||||
__func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (count > ncv) {
|
||||
netdev_info(dev, "%s: count (%d) > max (%d)\n",
|
||||
__func__, count, ncv);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (priv->params.num_channels == count)
|
||||
return 0;
|
||||
|
|
|
@ -193,6 +193,9 @@ static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq,
|
|||
return false;
|
||||
}
|
||||
|
||||
if (unlikely(page_is_pfmemalloc(dma_info->page)))
|
||||
return false;
|
||||
|
||||
cache->page_cache[cache->tail] = *dma_info;
|
||||
cache->tail = tail_next;
|
||||
return true;
|
||||
|
|
|
@ -1172,7 +1172,8 @@ static int mlxsw_sp_nexthop_mac_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
|
|||
|
||||
static int
|
||||
mlxsw_sp_nexthop_group_mac_update(struct mlxsw_sp *mlxsw_sp,
|
||||
struct mlxsw_sp_nexthop_group *nh_grp)
|
||||
struct mlxsw_sp_nexthop_group *nh_grp,
|
||||
bool reallocate)
|
||||
{
|
||||
u32 adj_index = nh_grp->adj_index; /* base */
|
||||
struct mlxsw_sp_nexthop *nh;
|
||||
|
@ -1187,7 +1188,7 @@ mlxsw_sp_nexthop_group_mac_update(struct mlxsw_sp *mlxsw_sp,
|
|||
continue;
|
||||
}
|
||||
|
||||
if (nh->update) {
|
||||
if (nh->update || reallocate) {
|
||||
err = mlxsw_sp_nexthop_mac_update(mlxsw_sp,
|
||||
adj_index, nh);
|
||||
if (err)
|
||||
|
@ -1248,7 +1249,8 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
|
|||
/* Nothing was added or removed, so no need to reallocate. Just
|
||||
* update MAC on existing adjacency indexes.
|
||||
*/
|
||||
err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp);
|
||||
err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp,
|
||||
false);
|
||||
if (err) {
|
||||
dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
|
||||
goto set_trap;
|
||||
|
@ -1276,7 +1278,7 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
|
|||
nh_grp->adj_index_valid = 1;
|
||||
nh_grp->adj_index = adj_index;
|
||||
nh_grp->ecmp_size = ecmp_size;
|
||||
err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp);
|
||||
err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp, true);
|
||||
if (err) {
|
||||
dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
|
||||
goto set_trap;
|
||||
|
|
|
@ -297,7 +297,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
|
|||
list_del(&p_pkt->list_entry);
|
||||
b_last_packet = list_empty(&p_tx->active_descq);
|
||||
list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
|
||||
if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO) {
|
||||
if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO) {
|
||||
struct qed_ooo_buffer *p_buffer;
|
||||
|
||||
p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
|
||||
|
@ -309,7 +309,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
|
|||
b_last_frag =
|
||||
p_tx->cur_completing_bd_idx == p_pkt->bd_used;
|
||||
tx_frag = p_pkt->bds_set[0].tx_frag;
|
||||
if (p_ll2_conn->gsi_enable)
|
||||
if (p_ll2_conn->conn.gsi_enable)
|
||||
qed_ll2b_release_tx_gsi_packet(p_hwfn,
|
||||
p_ll2_conn->
|
||||
my_id,
|
||||
|
@ -378,7 +378,7 @@ static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
|
|||
|
||||
spin_unlock_irqrestore(&p_tx->lock, flags);
|
||||
tx_frag = p_pkt->bds_set[0].tx_frag;
|
||||
if (p_ll2_conn->gsi_enable)
|
||||
if (p_ll2_conn->conn.gsi_enable)
|
||||
qed_ll2b_complete_tx_gsi_packet(p_hwfn,
|
||||
p_ll2_conn->my_id,
|
||||
p_pkt->cookie,
|
||||
|
@ -550,7 +550,7 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
|
|||
|
||||
list_move_tail(&p_pkt->list_entry, &p_rx->free_descq);
|
||||
|
||||
if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO) {
|
||||
if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO) {
|
||||
struct qed_ooo_buffer *p_buffer;
|
||||
|
||||
p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
|
||||
|
@ -738,7 +738,7 @@ qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn,
|
|||
rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id, 1,
|
||||
p_buffer->vlan, bd_flags,
|
||||
l4_hdr_offset_w,
|
||||
p_ll2_conn->tx_dest, 0,
|
||||
p_ll2_conn->conn.tx_dest, 0,
|
||||
first_frag,
|
||||
p_buffer->packet_length,
|
||||
p_buffer, true);
|
||||
|
@ -858,7 +858,7 @@ qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn,
|
|||
u16 buf_idx;
|
||||
int rc = 0;
|
||||
|
||||
if (p_ll2_info->conn_type != QED_LL2_TYPE_ISCSI_OOO)
|
||||
if (p_ll2_info->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO)
|
||||
return rc;
|
||||
|
||||
if (!rx_num_ooo_buffers)
|
||||
|
@ -901,7 +901,7 @@ static void
|
|||
qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn,
|
||||
struct qed_ll2_info *p_ll2_conn)
|
||||
{
|
||||
if (p_ll2_conn->conn_type != QED_LL2_TYPE_ISCSI_OOO)
|
||||
if (p_ll2_conn->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO)
|
||||
return;
|
||||
|
||||
qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
|
||||
|
@ -913,7 +913,7 @@ static void qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn,
|
|||
{
|
||||
struct qed_ooo_buffer *p_buffer;
|
||||
|
||||
if (p_ll2_conn->conn_type != QED_LL2_TYPE_ISCSI_OOO)
|
||||
if (p_ll2_conn->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO)
|
||||
return;
|
||||
|
||||
qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
|
||||
|
@ -945,23 +945,19 @@ static int qed_ll2_start_ooo(struct qed_dev *cdev,
|
|||
{
|
||||
struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
|
||||
u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
|
||||
struct qed_ll2_info *ll2_info;
|
||||
struct qed_ll2_conn ll2_info;
|
||||
int rc;
|
||||
|
||||
ll2_info = kzalloc(sizeof(*ll2_info), GFP_KERNEL);
|
||||
if (!ll2_info)
|
||||
return -ENOMEM;
|
||||
ll2_info->conn_type = QED_LL2_TYPE_ISCSI_OOO;
|
||||
ll2_info->mtu = params->mtu;
|
||||
ll2_info->rx_drop_ttl0_flg = params->drop_ttl0_packets;
|
||||
ll2_info->rx_vlan_removal_en = params->rx_vlan_stripping;
|
||||
ll2_info->tx_tc = OOO_LB_TC;
|
||||
ll2_info->tx_dest = CORE_TX_DEST_LB;
|
||||
ll2_info.conn_type = QED_LL2_TYPE_ISCSI_OOO;
|
||||
ll2_info.mtu = params->mtu;
|
||||
ll2_info.rx_drop_ttl0_flg = params->drop_ttl0_packets;
|
||||
ll2_info.rx_vlan_removal_en = params->rx_vlan_stripping;
|
||||
ll2_info.tx_tc = OOO_LB_TC;
|
||||
ll2_info.tx_dest = CORE_TX_DEST_LB;
|
||||
|
||||
rc = qed_ll2_acquire_connection(hwfn, ll2_info,
|
||||
rc = qed_ll2_acquire_connection(hwfn, &ll2_info,
|
||||
QED_LL2_RX_SIZE, QED_LL2_TX_SIZE,
|
||||
handle);
|
||||
kfree(ll2_info);
|
||||
if (rc) {
|
||||
DP_INFO(cdev, "Failed to acquire LL2 OOO connection\n");
|
||||
goto out;
|
||||
|
@ -1006,7 +1002,7 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
|
|||
struct qed_ll2_info *p_ll2_conn,
|
||||
u8 action_on_error)
|
||||
{
|
||||
enum qed_ll2_conn_type conn_type = p_ll2_conn->conn_type;
|
||||
enum qed_ll2_conn_type conn_type = p_ll2_conn->conn.conn_type;
|
||||
struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
|
||||
struct core_rx_start_ramrod_data *p_ramrod = NULL;
|
||||
struct qed_spq_entry *p_ent = NULL;
|
||||
|
@ -1032,7 +1028,7 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
|
|||
p_ramrod->sb_index = p_rx->rx_sb_index;
|
||||
p_ramrod->complete_event_flg = 1;
|
||||
|
||||
p_ramrod->mtu = cpu_to_le16(p_ll2_conn->mtu);
|
||||
p_ramrod->mtu = cpu_to_le16(p_ll2_conn->conn.mtu);
|
||||
DMA_REGPAIR_LE(p_ramrod->bd_base,
|
||||
p_rx->rxq_chain.p_phys_addr);
|
||||
cqe_pbl_size = (u16)qed_chain_get_page_cnt(&p_rx->rcq_chain);
|
||||
|
@ -1040,8 +1036,8 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
|
|||
DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr,
|
||||
qed_chain_get_pbl_phys(&p_rx->rcq_chain));
|
||||
|
||||
p_ramrod->drop_ttl0_flg = p_ll2_conn->rx_drop_ttl0_flg;
|
||||
p_ramrod->inner_vlan_removal_en = p_ll2_conn->rx_vlan_removal_en;
|
||||
p_ramrod->drop_ttl0_flg = p_ll2_conn->conn.rx_drop_ttl0_flg;
|
||||
p_ramrod->inner_vlan_removal_en = p_ll2_conn->conn.rx_vlan_removal_en;
|
||||
p_ramrod->queue_id = p_ll2_conn->queue_id;
|
||||
p_ramrod->main_func_queue = (conn_type == QED_LL2_TYPE_ISCSI_OOO) ? 0
|
||||
: 1;
|
||||
|
@ -1056,14 +1052,14 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
|
|||
}
|
||||
|
||||
p_ramrod->action_on_error.error_type = action_on_error;
|
||||
p_ramrod->gsi_offload_flag = p_ll2_conn->gsi_enable;
|
||||
p_ramrod->gsi_offload_flag = p_ll2_conn->conn.gsi_enable;
|
||||
return qed_spq_post(p_hwfn, p_ent, NULL);
|
||||
}
|
||||
|
||||
static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
|
||||
struct qed_ll2_info *p_ll2_conn)
|
||||
{
|
||||
enum qed_ll2_conn_type conn_type = p_ll2_conn->conn_type;
|
||||
enum qed_ll2_conn_type conn_type = p_ll2_conn->conn.conn_type;
|
||||
struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
|
||||
struct core_tx_start_ramrod_data *p_ramrod = NULL;
|
||||
struct qed_spq_entry *p_ent = NULL;
|
||||
|
@ -1075,7 +1071,7 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
|
|||
if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
|
||||
return 0;
|
||||
|
||||
if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO)
|
||||
if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO)
|
||||
p_ll2_conn->tx_stats_en = 0;
|
||||
else
|
||||
p_ll2_conn->tx_stats_en = 1;
|
||||
|
@ -1096,7 +1092,7 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
|
|||
|
||||
p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
|
||||
p_ramrod->sb_index = p_tx->tx_sb_index;
|
||||
p_ramrod->mtu = cpu_to_le16(p_ll2_conn->mtu);
|
||||
p_ramrod->mtu = cpu_to_le16(p_ll2_conn->conn.mtu);
|
||||
p_ramrod->stats_en = p_ll2_conn->tx_stats_en;
|
||||
p_ramrod->stats_id = p_ll2_conn->tx_stats_id;
|
||||
|
||||
|
@ -1106,7 +1102,7 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
|
|||
p_ramrod->pbl_size = cpu_to_le16(pbl_size);
|
||||
|
||||
memset(&pq_params, 0, sizeof(pq_params));
|
||||
pq_params.core.tc = p_ll2_conn->tx_tc;
|
||||
pq_params.core.tc = p_ll2_conn->conn.tx_tc;
|
||||
pq_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
|
||||
p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
|
||||
|
||||
|
@ -1123,7 +1119,7 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
|
|||
DP_NOTICE(p_hwfn, "Unknown connection type: %d\n", conn_type);
|
||||
}
|
||||
|
||||
p_ramrod->gsi_offload_flag = p_ll2_conn->gsi_enable;
|
||||
p_ramrod->gsi_offload_flag = p_ll2_conn->conn.gsi_enable;
|
||||
return qed_spq_post(p_hwfn, p_ent, NULL);
|
||||
}
|
||||
|
||||
|
@ -1224,7 +1220,7 @@ qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn,
|
|||
|
||||
DP_VERBOSE(p_hwfn, QED_MSG_LL2,
|
||||
"Allocated LL2 Rxq [Type %08x] with 0x%08x buffers\n",
|
||||
p_ll2_info->conn_type, rx_num_desc);
|
||||
p_ll2_info->conn.conn_type, rx_num_desc);
|
||||
|
||||
out:
|
||||
return rc;
|
||||
|
@ -1262,7 +1258,7 @@ static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn,
|
|||
|
||||
DP_VERBOSE(p_hwfn, QED_MSG_LL2,
|
||||
"Allocated LL2 Txq [Type %08x] with 0x%08x buffers\n",
|
||||
p_ll2_info->conn_type, tx_num_desc);
|
||||
p_ll2_info->conn.conn_type, tx_num_desc);
|
||||
|
||||
out:
|
||||
if (rc)
|
||||
|
@ -1273,7 +1269,7 @@ out:
|
|||
}
|
||||
|
||||
int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
|
||||
struct qed_ll2_info *p_params,
|
||||
struct qed_ll2_conn *p_params,
|
||||
u16 rx_num_desc,
|
||||
u16 tx_num_desc,
|
||||
u8 *p_connection_handle)
|
||||
|
@ -1302,15 +1298,7 @@ int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
|
|||
if (!p_ll2_info)
|
||||
return -EBUSY;
|
||||
|
||||
p_ll2_info->conn_type = p_params->conn_type;
|
||||
p_ll2_info->mtu = p_params->mtu;
|
||||
p_ll2_info->rx_drop_ttl0_flg = p_params->rx_drop_ttl0_flg;
|
||||
p_ll2_info->rx_vlan_removal_en = p_params->rx_vlan_removal_en;
|
||||
p_ll2_info->tx_tc = p_params->tx_tc;
|
||||
p_ll2_info->tx_dest = p_params->tx_dest;
|
||||
p_ll2_info->ai_err_packet_too_big = p_params->ai_err_packet_too_big;
|
||||
p_ll2_info->ai_err_no_buf = p_params->ai_err_no_buf;
|
||||
p_ll2_info->gsi_enable = p_params->gsi_enable;
|
||||
p_ll2_info->conn = *p_params;
|
||||
|
||||
rc = qed_ll2_acquire_connection_rx(p_hwfn, p_ll2_info, rx_num_desc);
|
||||
if (rc)
|
||||
|
@ -1371,9 +1359,9 @@ static int qed_ll2_establish_connection_rx(struct qed_hwfn *p_hwfn,
|
|||
|
||||
SET_FIELD(action_on_error,
|
||||
CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG,
|
||||
p_ll2_conn->ai_err_packet_too_big);
|
||||
p_ll2_conn->conn.ai_err_packet_too_big);
|
||||
SET_FIELD(action_on_error,
|
||||
CORE_RX_ACTION_ON_ERROR_NO_BUFF, p_ll2_conn->ai_err_no_buf);
|
||||
CORE_RX_ACTION_ON_ERROR_NO_BUFF, p_ll2_conn->conn.ai_err_no_buf);
|
||||
|
||||
return qed_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error);
|
||||
}
|
||||
|
@ -1600,7 +1588,7 @@ static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
|
|||
"LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n",
|
||||
p_ll2->queue_id,
|
||||
p_ll2->cid,
|
||||
p_ll2->conn_type,
|
||||
p_ll2->conn.conn_type,
|
||||
prod_idx,
|
||||
first_frag_len,
|
||||
num_of_bds,
|
||||
|
@ -1676,7 +1664,7 @@ static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn,
|
|||
(NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
|
||||
"LL2 [q 0x%02x cid 0x%08x type 0x%08x] Doorbelled [producer 0x%04x]\n",
|
||||
p_ll2_conn->queue_id,
|
||||
p_ll2_conn->cid, p_ll2_conn->conn_type, db_msg.spq_prod);
|
||||
p_ll2_conn->cid, p_ll2_conn->conn.conn_type, db_msg.spq_prod);
|
||||
}
|
||||
|
||||
int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
|
||||
|
@ -1817,7 +1805,7 @@ int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
|
|||
qed_ll2_rxq_flush(p_hwfn, connection_handle);
|
||||
}
|
||||
|
||||
if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO)
|
||||
if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO)
|
||||
qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
|
||||
|
||||
return rc;
|
||||
|
@ -1993,7 +1981,7 @@ static void qed_ll2_register_cb_ops(struct qed_dev *cdev,
|
|||
|
||||
static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
|
||||
{
|
||||
struct qed_ll2_info ll2_info;
|
||||
struct qed_ll2_conn ll2_info;
|
||||
struct qed_ll2_buffer *buffer, *tmp_buffer;
|
||||
enum qed_ll2_conn_type conn_type;
|
||||
struct qed_ptt *p_ptt;
|
||||
|
@ -2041,6 +2029,7 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
|
|||
|
||||
/* Prepare the temporary ll2 information */
|
||||
memset(&ll2_info, 0, sizeof(ll2_info));
|
||||
|
||||
ll2_info.conn_type = conn_type;
|
||||
ll2_info.mtu = params->mtu;
|
||||
ll2_info.rx_drop_ttl0_flg = params->drop_ttl0_packets;
|
||||
|
@ -2120,7 +2109,6 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
|
|||
}
|
||||
|
||||
ether_addr_copy(cdev->ll2_mac_address, params->ll2_mac_address);
|
||||
|
||||
return 0;
|
||||
|
||||
release_terminate_all:
|
||||
|
|
|
@ -112,15 +112,8 @@ struct qed_ll2_tx_queue {
|
|||
bool b_completing_packet;
|
||||
};
|
||||
|
||||
struct qed_ll2_info {
|
||||
/* Lock protecting the state of LL2 */
|
||||
struct mutex mutex;
|
||||
struct qed_ll2_conn {
|
||||
enum qed_ll2_conn_type conn_type;
|
||||
u32 cid;
|
||||
u8 my_id;
|
||||
u8 queue_id;
|
||||
u8 tx_stats_id;
|
||||
bool b_active;
|
||||
u16 mtu;
|
||||
u8 rx_drop_ttl0_flg;
|
||||
u8 rx_vlan_removal_en;
|
||||
|
@ -128,10 +121,21 @@ struct qed_ll2_info {
|
|||
enum core_tx_dest tx_dest;
|
||||
enum core_error_handle ai_err_packet_too_big;
|
||||
enum core_error_handle ai_err_no_buf;
|
||||
u8 gsi_enable;
|
||||
};
|
||||
|
||||
struct qed_ll2_info {
|
||||
/* Lock protecting the state of LL2 */
|
||||
struct mutex mutex;
|
||||
struct qed_ll2_conn conn;
|
||||
u32 cid;
|
||||
u8 my_id;
|
||||
u8 queue_id;
|
||||
u8 tx_stats_id;
|
||||
bool b_active;
|
||||
u8 tx_stats_en;
|
||||
struct qed_ll2_rx_queue rx_queue;
|
||||
struct qed_ll2_tx_queue tx_queue;
|
||||
u8 gsi_enable;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -149,7 +153,7 @@ struct qed_ll2_info {
|
|||
* @return 0 on success, failure otherwise
|
||||
*/
|
||||
int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
|
||||
struct qed_ll2_info *p_params,
|
||||
struct qed_ll2_conn *p_params,
|
||||
u16 rx_num_desc,
|
||||
u16 tx_num_desc,
|
||||
u8 *p_connection_handle);
|
||||
|
|
|
@ -2632,7 +2632,7 @@ static int qed_roce_ll2_start(struct qed_dev *cdev,
|
|||
{
|
||||
struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
|
||||
struct qed_roce_ll2_info *roce_ll2;
|
||||
struct qed_ll2_info ll2_params;
|
||||
struct qed_ll2_conn ll2_params;
|
||||
int rc;
|
||||
|
||||
if (!params) {
|
||||
|
|
|
@ -179,6 +179,49 @@ static struct mdiobb_ops bb_ops = {
|
|||
.get_mdio_data = ravb_get_mdio_data,
|
||||
};
|
||||
|
||||
/* Free TX skb function for AVB-IP */
|
||||
static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only)
|
||||
{
|
||||
struct ravb_private *priv = netdev_priv(ndev);
|
||||
struct net_device_stats *stats = &priv->stats[q];
|
||||
struct ravb_tx_desc *desc;
|
||||
int free_num = 0;
|
||||
int entry;
|
||||
u32 size;
|
||||
|
||||
for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
|
||||
bool txed;
|
||||
|
||||
entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] *
|
||||
NUM_TX_DESC);
|
||||
desc = &priv->tx_ring[q][entry];
|
||||
txed = desc->die_dt == DT_FEMPTY;
|
||||
if (free_txed_only && !txed)
|
||||
break;
|
||||
/* Descriptor type must be checked before all other reads */
|
||||
dma_rmb();
|
||||
size = le16_to_cpu(desc->ds_tagl) & TX_DS;
|
||||
/* Free the original skb. */
|
||||
if (priv->tx_skb[q][entry / NUM_TX_DESC]) {
|
||||
dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
|
||||
size, DMA_TO_DEVICE);
|
||||
/* Last packet descriptor? */
|
||||
if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) {
|
||||
entry /= NUM_TX_DESC;
|
||||
dev_kfree_skb_any(priv->tx_skb[q][entry]);
|
||||
priv->tx_skb[q][entry] = NULL;
|
||||
if (txed)
|
||||
stats->tx_packets++;
|
||||
}
|
||||
free_num++;
|
||||
}
|
||||
if (txed)
|
||||
stats->tx_bytes += size;
|
||||
desc->die_dt = DT_EEMPTY;
|
||||
}
|
||||
return free_num;
|
||||
}
|
||||
|
||||
/* Free skb's and DMA buffers for Ethernet AVB */
|
||||
static void ravb_ring_free(struct net_device *ndev, int q)
|
||||
{
|
||||
|
@ -194,19 +237,21 @@ static void ravb_ring_free(struct net_device *ndev, int q)
|
|||
kfree(priv->rx_skb[q]);
|
||||
priv->rx_skb[q] = NULL;
|
||||
|
||||
/* Free TX skb ringbuffer */
|
||||
if (priv->tx_skb[q]) {
|
||||
for (i = 0; i < priv->num_tx_ring[q]; i++)
|
||||
dev_kfree_skb(priv->tx_skb[q][i]);
|
||||
}
|
||||
kfree(priv->tx_skb[q]);
|
||||
priv->tx_skb[q] = NULL;
|
||||
|
||||
/* Free aligned TX buffers */
|
||||
kfree(priv->tx_align[q]);
|
||||
priv->tx_align[q] = NULL;
|
||||
|
||||
if (priv->rx_ring[q]) {
|
||||
for (i = 0; i < priv->num_rx_ring[q]; i++) {
|
||||
struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i];
|
||||
|
||||
if (!dma_mapping_error(ndev->dev.parent,
|
||||
le32_to_cpu(desc->dptr)))
|
||||
dma_unmap_single(ndev->dev.parent,
|
||||
le32_to_cpu(desc->dptr),
|
||||
PKT_BUF_SZ,
|
||||
DMA_FROM_DEVICE);
|
||||
}
|
||||
ring_size = sizeof(struct ravb_ex_rx_desc) *
|
||||
(priv->num_rx_ring[q] + 1);
|
||||
dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q],
|
||||
|
@ -215,12 +260,20 @@ static void ravb_ring_free(struct net_device *ndev, int q)
|
|||
}
|
||||
|
||||
if (priv->tx_ring[q]) {
|
||||
ravb_tx_free(ndev, q, false);
|
||||
|
||||
ring_size = sizeof(struct ravb_tx_desc) *
|
||||
(priv->num_tx_ring[q] * NUM_TX_DESC + 1);
|
||||
dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q],
|
||||
priv->tx_desc_dma[q]);
|
||||
priv->tx_ring[q] = NULL;
|
||||
}
|
||||
|
||||
/* Free TX skb ringbuffer.
|
||||
* SKBs are freed by ravb_tx_free() call above.
|
||||
*/
|
||||
kfree(priv->tx_skb[q]);
|
||||
priv->tx_skb[q] = NULL;
|
||||
}
|
||||
|
||||
/* Format skb and descriptor buffer for Ethernet AVB */
|
||||
|
@ -431,44 +484,6 @@ static int ravb_dmac_init(struct net_device *ndev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* Free TX skb function for AVB-IP */
|
||||
static int ravb_tx_free(struct net_device *ndev, int q)
|
||||
{
|
||||
struct ravb_private *priv = netdev_priv(ndev);
|
||||
struct net_device_stats *stats = &priv->stats[q];
|
||||
struct ravb_tx_desc *desc;
|
||||
int free_num = 0;
|
||||
int entry;
|
||||
u32 size;
|
||||
|
||||
for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
|
||||
entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] *
|
||||
NUM_TX_DESC);
|
||||
desc = &priv->tx_ring[q][entry];
|
||||
if (desc->die_dt != DT_FEMPTY)
|
||||
break;
|
||||
/* Descriptor type must be checked before all other reads */
|
||||
dma_rmb();
|
||||
size = le16_to_cpu(desc->ds_tagl) & TX_DS;
|
||||
/* Free the original skb. */
|
||||
if (priv->tx_skb[q][entry / NUM_TX_DESC]) {
|
||||
dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
|
||||
size, DMA_TO_DEVICE);
|
||||
/* Last packet descriptor? */
|
||||
if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) {
|
||||
entry /= NUM_TX_DESC;
|
||||
dev_kfree_skb_any(priv->tx_skb[q][entry]);
|
||||
priv->tx_skb[q][entry] = NULL;
|
||||
stats->tx_packets++;
|
||||
}
|
||||
free_num++;
|
||||
}
|
||||
stats->tx_bytes += size;
|
||||
desc->die_dt = DT_EEMPTY;
|
||||
}
|
||||
return free_num;
|
||||
}
|
||||
|
||||
static void ravb_get_tx_tstamp(struct net_device *ndev)
|
||||
{
|
||||
struct ravb_private *priv = netdev_priv(ndev);
|
||||
|
@ -902,7 +917,7 @@ static int ravb_poll(struct napi_struct *napi, int budget)
|
|||
spin_lock_irqsave(&priv->lock, flags);
|
||||
/* Clear TX interrupt */
|
||||
ravb_write(ndev, ~mask, TIS);
|
||||
ravb_tx_free(ndev, q);
|
||||
ravb_tx_free(ndev, q, true);
|
||||
netif_wake_subqueue(ndev, q);
|
||||
mmiowb();
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
|
@ -1567,7 +1582,8 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
|||
|
||||
priv->cur_tx[q] += NUM_TX_DESC;
|
||||
if (priv->cur_tx[q] - priv->dirty_tx[q] >
|
||||
(priv->num_tx_ring[q] - 1) * NUM_TX_DESC && !ravb_tx_free(ndev, q))
|
||||
(priv->num_tx_ring[q] - 1) * NUM_TX_DESC &&
|
||||
!ravb_tx_free(ndev, q, true))
|
||||
netif_stop_subqueue(ndev, q);
|
||||
|
||||
exit:
|
||||
|
|
|
@ -351,6 +351,7 @@ void stmmac_remove_config_dt(struct platform_device *pdev,
|
|||
if (of_phy_is_fixed_link(np))
|
||||
of_phy_deregister_fixed_link(np);
|
||||
of_node_put(plat->phy_node);
|
||||
of_node_put(plat->mdio_node);
|
||||
}
|
||||
#else
|
||||
struct plat_stmmacenet_data *
|
||||
|
|
|
@ -69,7 +69,6 @@ struct gtp_dev {
|
|||
struct socket *sock0;
|
||||
struct socket *sock1u;
|
||||
|
||||
struct net *net;
|
||||
struct net_device *dev;
|
||||
|
||||
unsigned int hash_size;
|
||||
|
@ -316,7 +315,7 @@ static int gtp_encap_recv(struct sock *sk, struct sk_buff *skb)
|
|||
|
||||
netdev_dbg(gtp->dev, "encap_recv sk=%p\n", sk);
|
||||
|
||||
xnet = !net_eq(gtp->net, dev_net(gtp->dev));
|
||||
xnet = !net_eq(sock_net(sk), dev_net(gtp->dev));
|
||||
|
||||
switch (udp_sk(sk)->encap_type) {
|
||||
case UDP_ENCAP_GTP0:
|
||||
|
@ -612,7 +611,7 @@ static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
pktinfo.fl4.saddr, pktinfo.fl4.daddr,
|
||||
pktinfo.iph->tos,
|
||||
ip4_dst_hoplimit(&pktinfo.rt->dst),
|
||||
htons(IP_DF),
|
||||
0,
|
||||
pktinfo.gtph_port, pktinfo.gtph_port,
|
||||
true, false);
|
||||
break;
|
||||
|
@ -658,7 +657,7 @@ static void gtp_link_setup(struct net_device *dev)
|
|||
static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize);
|
||||
static void gtp_hashtable_free(struct gtp_dev *gtp);
|
||||
static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp,
|
||||
int fd_gtp0, int fd_gtp1, struct net *src_net);
|
||||
int fd_gtp0, int fd_gtp1);
|
||||
|
||||
static int gtp_newlink(struct net *src_net, struct net_device *dev,
|
||||
struct nlattr *tb[], struct nlattr *data[])
|
||||
|
@ -675,7 +674,7 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
|
|||
fd0 = nla_get_u32(data[IFLA_GTP_FD0]);
|
||||
fd1 = nla_get_u32(data[IFLA_GTP_FD1]);
|
||||
|
||||
err = gtp_encap_enable(dev, gtp, fd0, fd1, src_net);
|
||||
err = gtp_encap_enable(dev, gtp, fd0, fd1);
|
||||
if (err < 0)
|
||||
goto out_err;
|
||||
|
||||
|
@ -821,7 +820,7 @@ static void gtp_hashtable_free(struct gtp_dev *gtp)
|
|||
}
|
||||
|
||||
static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp,
|
||||
int fd_gtp0, int fd_gtp1, struct net *src_net)
|
||||
int fd_gtp0, int fd_gtp1)
|
||||
{
|
||||
struct udp_tunnel_sock_cfg tuncfg = {NULL};
|
||||
struct socket *sock0, *sock1u;
|
||||
|
@ -858,7 +857,6 @@ static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp,
|
|||
|
||||
gtp->sock0 = sock0;
|
||||
gtp->sock1u = sock1u;
|
||||
gtp->net = src_net;
|
||||
|
||||
tuncfg.sk_user_data = gtp;
|
||||
tuncfg.encap_rcv = gtp_encap_recv;
|
||||
|
@ -1376,3 +1374,4 @@ MODULE_LICENSE("GPL");
|
|||
MODULE_AUTHOR("Harald Welte <hwelte@sysmocom.de>");
|
||||
MODULE_DESCRIPTION("Interface driver for GTP encapsulated traffic");
|
||||
MODULE_ALIAS_RTNL_LINK("gtp");
|
||||
MODULE_ALIAS_GENL_FAMILY("gtp");
|
||||
|
|
|
@ -825,7 +825,7 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
|
|||
return -EINVAL;
|
||||
|
||||
if (virtio_net_hdr_from_skb(skb, &vnet_hdr,
|
||||
macvtap_is_little_endian(q)))
|
||||
macvtap_is_little_endian(q), true))
|
||||
BUG();
|
||||
|
||||
if (copy_to_iter(&vnet_hdr, sizeof(vnet_hdr), iter) !=
|
||||
|
|
|
@ -21,6 +21,23 @@ MODULE_DESCRIPTION("Broadcom 63xx internal PHY driver");
|
|||
MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
static int bcm63xx_config_intr(struct phy_device *phydev)
|
||||
{
|
||||
int reg, err;
|
||||
|
||||
reg = phy_read(phydev, MII_BCM63XX_IR);
|
||||
if (reg < 0)
|
||||
return reg;
|
||||
|
||||
if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
|
||||
reg &= ~MII_BCM63XX_IR_GMASK;
|
||||
else
|
||||
reg |= MII_BCM63XX_IR_GMASK;
|
||||
|
||||
err = phy_write(phydev, MII_BCM63XX_IR, reg);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int bcm63xx_config_init(struct phy_device *phydev)
|
||||
{
|
||||
int reg, err;
|
||||
|
@ -55,7 +72,7 @@ static struct phy_driver bcm63xx_driver[] = {
|
|||
.config_aneg = genphy_config_aneg,
|
||||
.read_status = genphy_read_status,
|
||||
.ack_interrupt = bcm_phy_ack_intr,
|
||||
.config_intr = bcm_phy_config_intr,
|
||||
.config_intr = bcm63xx_config_intr,
|
||||
}, {
|
||||
/* same phy as above, with just a different OUI */
|
||||
.phy_id = 0x002bdc00,
|
||||
|
@ -67,7 +84,7 @@ static struct phy_driver bcm63xx_driver[] = {
|
|||
.config_aneg = genphy_config_aneg,
|
||||
.read_status = genphy_read_status,
|
||||
.ack_interrupt = bcm_phy_ack_intr,
|
||||
.config_intr = bcm_phy_config_intr,
|
||||
.config_intr = bcm63xx_config_intr,
|
||||
} };
|
||||
|
||||
module_phy_driver(bcm63xx_driver);
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#include <linux/phy.h>
|
||||
|
||||
#define TI_DP83848C_PHY_ID 0x20005ca0
|
||||
#define TI_DP83620_PHY_ID 0x20005ce0
|
||||
#define NS_DP83848C_PHY_ID 0x20005c90
|
||||
#define TLK10X_PHY_ID 0x2000a210
|
||||
#define TI_DP83822_PHY_ID 0x2000a240
|
||||
|
@ -77,6 +78,7 @@ static int dp83848_config_intr(struct phy_device *phydev)
|
|||
static struct mdio_device_id __maybe_unused dp83848_tbl[] = {
|
||||
{ TI_DP83848C_PHY_ID, 0xfffffff0 },
|
||||
{ NS_DP83848C_PHY_ID, 0xfffffff0 },
|
||||
{ TI_DP83620_PHY_ID, 0xfffffff0 },
|
||||
{ TLK10X_PHY_ID, 0xfffffff0 },
|
||||
{ TI_DP83822_PHY_ID, 0xfffffff0 },
|
||||
{ }
|
||||
|
@ -106,6 +108,7 @@ MODULE_DEVICE_TABLE(mdio, dp83848_tbl);
|
|||
static struct phy_driver dp83848_driver[] = {
|
||||
DP83848_PHY_DRIVER(TI_DP83848C_PHY_ID, "TI DP83848C 10/100 Mbps PHY"),
|
||||
DP83848_PHY_DRIVER(NS_DP83848C_PHY_ID, "NS DP83848C 10/100 Mbps PHY"),
|
||||
DP83848_PHY_DRIVER(TI_DP83620_PHY_ID, "TI DP83620 10/100 Mbps PHY"),
|
||||
DP83848_PHY_DRIVER(TLK10X_PHY_ID, "TI TLK10X 10/100 Mbps PHY"),
|
||||
DP83848_PHY_DRIVER(TI_DP83822_PHY_ID, "TI DP83822 10/100 Mbps PHY"),
|
||||
};
|
||||
|
|
|
@ -1679,6 +1679,8 @@ static struct phy_driver marvell_drivers[] = {
|
|||
.ack_interrupt = &marvell_ack_interrupt,
|
||||
.config_intr = &marvell_config_intr,
|
||||
.did_interrupt = &m88e1121_did_interrupt,
|
||||
.get_wol = &m88e1318_get_wol,
|
||||
.set_wol = &m88e1318_set_wol,
|
||||
.resume = &marvell_resume,
|
||||
.suspend = &marvell_suspend,
|
||||
.get_sset_count = marvell_get_sset_count,
|
||||
|
|
|
@ -1008,6 +1008,20 @@ static struct phy_driver ksphy_driver[] = {
|
|||
.get_stats = kszphy_get_stats,
|
||||
.suspend = genphy_suspend,
|
||||
.resume = genphy_resume,
|
||||
}, {
|
||||
.phy_id = PHY_ID_KSZ8795,
|
||||
.phy_id_mask = MICREL_PHY_ID_MASK,
|
||||
.name = "Micrel KSZ8795",
|
||||
.features = (SUPPORTED_Pause | SUPPORTED_Asym_Pause),
|
||||
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
|
||||
.config_init = kszphy_config_init,
|
||||
.config_aneg = ksz8873mll_config_aneg,
|
||||
.read_status = ksz8873mll_read_status,
|
||||
.get_sset_count = kszphy_get_sset_count,
|
||||
.get_strings = kszphy_get_strings,
|
||||
.get_stats = kszphy_get_stats,
|
||||
.suspend = genphy_suspend,
|
||||
.resume = genphy_resume,
|
||||
} };
|
||||
|
||||
module_phy_driver(ksphy_driver);
|
||||
|
|
|
@ -29,6 +29,7 @@
|
|||
#include <linux/mii.h>
|
||||
#include <linux/ethtool.h>
|
||||
#include <linux/phy.h>
|
||||
#include <linux/phy_led_triggers.h>
|
||||
#include <linux/timer.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/mdio.h>
|
||||
|
@ -649,14 +650,18 @@ void phy_start_machine(struct phy_device *phydev)
|
|||
* phy_trigger_machine - trigger the state machine to run
|
||||
*
|
||||
* @phydev: the phy_device struct
|
||||
* @sync: indicate whether we should wait for the workqueue cancelation
|
||||
*
|
||||
* Description: There has been a change in state which requires that the
|
||||
* state machine runs.
|
||||
*/
|
||||
|
||||
static void phy_trigger_machine(struct phy_device *phydev)
|
||||
static void phy_trigger_machine(struct phy_device *phydev, bool sync)
|
||||
{
|
||||
cancel_delayed_work_sync(&phydev->state_queue);
|
||||
if (sync)
|
||||
cancel_delayed_work_sync(&phydev->state_queue);
|
||||
else
|
||||
cancel_delayed_work(&phydev->state_queue);
|
||||
queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 0);
|
||||
}
|
||||
|
||||
|
@ -693,7 +698,7 @@ static void phy_error(struct phy_device *phydev)
|
|||
phydev->state = PHY_HALTED;
|
||||
mutex_unlock(&phydev->lock);
|
||||
|
||||
phy_trigger_machine(phydev);
|
||||
phy_trigger_machine(phydev, false);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -840,7 +845,7 @@ void phy_change(struct phy_device *phydev)
|
|||
}
|
||||
|
||||
/* reschedule state queue work to run as soon as possible */
|
||||
phy_trigger_machine(phydev);
|
||||
phy_trigger_machine(phydev, true);
|
||||
return;
|
||||
|
||||
ignore:
|
||||
|
@ -942,7 +947,7 @@ void phy_start(struct phy_device *phydev)
|
|||
if (do_resume)
|
||||
phy_resume(phydev);
|
||||
|
||||
phy_trigger_machine(phydev);
|
||||
phy_trigger_machine(phydev, true);
|
||||
}
|
||||
EXPORT_SYMBOL(phy_start);
|
||||
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
*/
|
||||
#include <linux/leds.h>
|
||||
#include <linux/phy.h>
|
||||
#include <linux/phy_led_triggers.h>
|
||||
#include <linux/netdevice.h>
|
||||
|
||||
static struct phy_led_trigger *phy_speed_to_led_trigger(struct phy_device *phy,
|
||||
|
@ -102,8 +103,10 @@ int phy_led_triggers_register(struct phy_device *phy)
|
|||
sizeof(struct phy_led_trigger) *
|
||||
phy->phy_num_led_triggers,
|
||||
GFP_KERNEL);
|
||||
if (!phy->phy_led_triggers)
|
||||
return -ENOMEM;
|
||||
if (!phy->phy_led_triggers) {
|
||||
err = -ENOMEM;
|
||||
goto out_clear;
|
||||
}
|
||||
|
||||
for (i = 0; i < phy->phy_num_led_triggers; i++) {
|
||||
err = phy_led_trigger_register(phy, &phy->phy_led_triggers[i],
|
||||
|
@ -120,6 +123,8 @@ out_unreg:
|
|||
while (i--)
|
||||
phy_led_trigger_unregister(&phy->phy_led_triggers[i]);
|
||||
devm_kfree(&phy->mdio.dev, phy->phy_led_triggers);
|
||||
out_clear:
|
||||
phy->phy_num_led_triggers = 0;
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(phy_led_triggers_register);
|
||||
|
|
|
@ -1360,7 +1360,7 @@ static ssize_t tun_put_user(struct tun_struct *tun,
|
|||
return -EINVAL;
|
||||
|
||||
if (virtio_net_hdr_from_skb(skb, &gso,
|
||||
tun_is_little_endian(tun))) {
|
||||
tun_is_little_endian(tun), true)) {
|
||||
struct skb_shared_info *sinfo = skb_shinfo(skb);
|
||||
pr_err("unexpected GSO type: "
|
||||
"0x%x, gso_size %d, hdr_len %d\n",
|
||||
|
|
|
@ -531,6 +531,7 @@ static const struct driver_info wwan_info = {
|
|||
#define SAMSUNG_VENDOR_ID 0x04e8
|
||||
#define LENOVO_VENDOR_ID 0x17ef
|
||||
#define NVIDIA_VENDOR_ID 0x0955
|
||||
#define HP_VENDOR_ID 0x03f0
|
||||
|
||||
static const struct usb_device_id products[] = {
|
||||
/* BLACKLIST !!
|
||||
|
@ -677,6 +678,13 @@ static const struct usb_device_id products[] = {
|
|||
.driver_info = 0,
|
||||
},
|
||||
|
||||
/* HP lt2523 (Novatel E371) - handled by qmi_wwan */
|
||||
{
|
||||
USB_DEVICE_AND_INTERFACE_INFO(HP_VENDOR_ID, 0x421d, USB_CLASS_COMM,
|
||||
USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
|
||||
.driver_info = 0,
|
||||
},
|
||||
|
||||
/* AnyDATA ADU960S - handled by qmi_wwan */
|
||||
{
|
||||
USB_DEVICE_AND_INTERFACE_INFO(0x16d5, 0x650a, USB_CLASS_COMM,
|
||||
|
|
|
@ -654,6 +654,13 @@ static const struct usb_device_id products[] = {
|
|||
USB_CDC_PROTO_NONE),
|
||||
.driver_info = (unsigned long)&qmi_wwan_info,
|
||||
},
|
||||
{ /* HP lt2523 (Novatel E371) */
|
||||
USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x421d,
|
||||
USB_CLASS_COMM,
|
||||
USB_CDC_SUBCLASS_ETHERNET,
|
||||
USB_CDC_PROTO_NONE),
|
||||
.driver_info = (unsigned long)&qmi_wwan_info,
|
||||
},
|
||||
{ /* HP lt4112 LTE/HSPA+ Gobi 4G Module (Huawei me906e) */
|
||||
USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x581d, USB_CLASS_VENDOR_SPEC, 1, 7),
|
||||
.driver_info = (unsigned long)&qmi_wwan_info,
|
||||
|
|
|
@ -32,7 +32,7 @@
|
|||
#define NETNEXT_VERSION "08"
|
||||
|
||||
/* Information for net */
|
||||
#define NET_VERSION "6"
|
||||
#define NET_VERSION "8"
|
||||
|
||||
#define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION
|
||||
#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
|
||||
|
@ -1936,6 +1936,9 @@ static int r8152_poll(struct napi_struct *napi, int budget)
|
|||
napi_complete(napi);
|
||||
if (!list_empty(&tp->rx_done))
|
||||
napi_schedule(napi);
|
||||
else if (!skb_queue_empty(&tp->tx_queue) &&
|
||||
!list_empty(&tp->tx_free))
|
||||
napi_schedule(napi);
|
||||
}
|
||||
|
||||
return work_done;
|
||||
|
@ -3155,10 +3158,13 @@ static void set_carrier(struct r8152 *tp)
|
|||
if (!netif_carrier_ok(netdev)) {
|
||||
tp->rtl_ops.enable(tp);
|
||||
set_bit(RTL8152_SET_RX_MODE, &tp->flags);
|
||||
netif_stop_queue(netdev);
|
||||
napi_disable(&tp->napi);
|
||||
netif_carrier_on(netdev);
|
||||
rtl_start_rx(tp);
|
||||
napi_enable(&tp->napi);
|
||||
netif_wake_queue(netdev);
|
||||
netif_info(tp, link, netdev, "carrier on\n");
|
||||
}
|
||||
} else {
|
||||
if (netif_carrier_ok(netdev)) {
|
||||
|
@ -3166,6 +3172,7 @@ static void set_carrier(struct r8152 *tp)
|
|||
napi_disable(&tp->napi);
|
||||
tp->rtl_ops.disable(tp);
|
||||
napi_enable(&tp->napi);
|
||||
netif_info(tp, link, netdev, "carrier off\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -3515,12 +3522,12 @@ static int rtl8152_pre_reset(struct usb_interface *intf)
|
|||
if (!netif_running(netdev))
|
||||
return 0;
|
||||
|
||||
netif_stop_queue(netdev);
|
||||
napi_disable(&tp->napi);
|
||||
clear_bit(WORK_ENABLE, &tp->flags);
|
||||
usb_kill_urb(tp->intr_urb);
|
||||
cancel_delayed_work_sync(&tp->schedule);
|
||||
if (netif_carrier_ok(netdev)) {
|
||||
netif_stop_queue(netdev);
|
||||
mutex_lock(&tp->control);
|
||||
tp->rtl_ops.disable(tp);
|
||||
mutex_unlock(&tp->control);
|
||||
|
@ -3545,12 +3552,17 @@ static int rtl8152_post_reset(struct usb_interface *intf)
|
|||
if (netif_carrier_ok(netdev)) {
|
||||
mutex_lock(&tp->control);
|
||||
tp->rtl_ops.enable(tp);
|
||||
rtl_start_rx(tp);
|
||||
rtl8152_set_rx_mode(netdev);
|
||||
mutex_unlock(&tp->control);
|
||||
netif_wake_queue(netdev);
|
||||
}
|
||||
|
||||
napi_enable(&tp->napi);
|
||||
netif_wake_queue(netdev);
|
||||
usb_submit_urb(tp->intr_urb, GFP_KERNEL);
|
||||
|
||||
if (!list_empty(&tp->rx_done))
|
||||
napi_schedule(&tp->napi);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -3572,6 +3584,8 @@ static bool delay_autosuspend(struct r8152 *tp)
|
|||
*/
|
||||
if (!sw_linking && tp->rtl_ops.in_nway(tp))
|
||||
return true;
|
||||
else if (!skb_queue_empty(&tp->tx_queue))
|
||||
return true;
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
@ -3581,10 +3595,15 @@ static int rtl8152_rumtime_suspend(struct r8152 *tp)
|
|||
struct net_device *netdev = tp->netdev;
|
||||
int ret = 0;
|
||||
|
||||
set_bit(SELECTIVE_SUSPEND, &tp->flags);
|
||||
smp_mb__after_atomic();
|
||||
|
||||
if (netif_running(netdev) && test_bit(WORK_ENABLE, &tp->flags)) {
|
||||
u32 rcr = 0;
|
||||
|
||||
if (delay_autosuspend(tp)) {
|
||||
clear_bit(SELECTIVE_SUSPEND, &tp->flags);
|
||||
smp_mb__after_atomic();
|
||||
ret = -EBUSY;
|
||||
goto out1;
|
||||
}
|
||||
|
@ -3601,6 +3620,8 @@ static int rtl8152_rumtime_suspend(struct r8152 *tp)
|
|||
if (!(ocp_data & RXFIFO_EMPTY)) {
|
||||
rxdy_gated_en(tp, false);
|
||||
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, rcr);
|
||||
clear_bit(SELECTIVE_SUSPEND, &tp->flags);
|
||||
smp_mb__after_atomic();
|
||||
ret = -EBUSY;
|
||||
goto out1;
|
||||
}
|
||||
|
@ -3620,8 +3641,6 @@ static int rtl8152_rumtime_suspend(struct r8152 *tp)
|
|||
}
|
||||
}
|
||||
|
||||
set_bit(SELECTIVE_SUSPEND, &tp->flags);
|
||||
|
||||
out1:
|
||||
return ret;
|
||||
}
|
||||
|
@ -3677,12 +3696,15 @@ static int rtl8152_resume(struct usb_interface *intf)
|
|||
if (netif_running(tp->netdev) && tp->netdev->flags & IFF_UP) {
|
||||
if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
|
||||
tp->rtl_ops.autosuspend_en(tp, false);
|
||||
clear_bit(SELECTIVE_SUSPEND, &tp->flags);
|
||||
napi_disable(&tp->napi);
|
||||
set_bit(WORK_ENABLE, &tp->flags);
|
||||
if (netif_carrier_ok(tp->netdev))
|
||||
rtl_start_rx(tp);
|
||||
napi_enable(&tp->napi);
|
||||
clear_bit(SELECTIVE_SUSPEND, &tp->flags);
|
||||
smp_mb__after_atomic();
|
||||
if (!list_empty(&tp->rx_done))
|
||||
napi_schedule(&tp->napi);
|
||||
} else {
|
||||
tp->rtl_ops.up(tp);
|
||||
netif_carrier_off(tp->netdev);
|
||||
|
|
|
@ -48,8 +48,16 @@ module_param(gso, bool, 0444);
|
|||
*/
|
||||
DECLARE_EWMA(pkt_len, 1, 64)
|
||||
|
||||
/* With mergeable buffers we align buffer address and use the low bits to
|
||||
* encode its true size. Buffer size is up to 1 page so we need to align to
|
||||
* square root of page size to ensure we reserve enough bits to encode the true
|
||||
* size.
|
||||
*/
|
||||
#define MERGEABLE_BUFFER_MIN_ALIGN_SHIFT ((PAGE_SHIFT + 1) / 2)
|
||||
|
||||
/* Minimum alignment for mergeable packet buffers. */
|
||||
#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256)
|
||||
#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, \
|
||||
1 << MERGEABLE_BUFFER_MIN_ALIGN_SHIFT)
|
||||
|
||||
#define VIRTNET_DRIVER_VERSION "1.0.0"
|
||||
|
||||
|
@ -1104,7 +1112,7 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
|
|||
hdr = skb_vnet_hdr(skb);
|
||||
|
||||
if (virtio_net_hdr_from_skb(skb, &hdr->hdr,
|
||||
virtio_is_little_endian(vi->vdev)))
|
||||
virtio_is_little_endian(vi->vdev), false))
|
||||
BUG();
|
||||
|
||||
if (vi->mergeable_rx_bufs)
|
||||
|
@ -1707,6 +1715,11 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog)
|
|||
u16 xdp_qp = 0, curr_qp;
|
||||
int i, err;
|
||||
|
||||
if (prog && prog->xdp_adjust_head) {
|
||||
netdev_warn(dev, "Does not support bpf_xdp_adjust_head()\n");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
|
||||
virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
|
||||
virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
|
||||
|
@ -1890,8 +1903,12 @@ static void free_receive_page_frags(struct virtnet_info *vi)
|
|||
put_page(vi->rq[i].alloc_frag.page);
|
||||
}
|
||||
|
||||
static bool is_xdp_queue(struct virtnet_info *vi, int q)
|
||||
static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
|
||||
{
|
||||
/* For small receive mode always use kfree_skb variants */
|
||||
if (!vi->mergeable_rx_bufs)
|
||||
return false;
|
||||
|
||||
if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
|
||||
return false;
|
||||
else if (q < vi->curr_queue_pairs)
|
||||
|
@ -1908,7 +1925,7 @@ static void free_unused_bufs(struct virtnet_info *vi)
|
|||
for (i = 0; i < vi->max_queue_pairs; i++) {
|
||||
struct virtqueue *vq = vi->sq[i].vq;
|
||||
while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
|
||||
if (!is_xdp_queue(vi, i))
|
||||
if (!is_xdp_raw_buffer_queue(vi, i))
|
||||
dev_kfree_skb(buf);
|
||||
else
|
||||
put_page(virt_to_head_page(buf));
|
||||
|
|
|
@ -2268,7 +2268,7 @@ static void vxlan_cleanup(unsigned long arg)
|
|||
= container_of(p, struct vxlan_fdb, hlist);
|
||||
unsigned long timeout;
|
||||
|
||||
if (f->state & NUD_PERMANENT)
|
||||
if (f->state & (NUD_PERMANENT | NUD_NOARP))
|
||||
continue;
|
||||
|
||||
timeout = f->used + vxlan->cfg.age_interval * HZ;
|
||||
|
@ -2354,7 +2354,7 @@ static int vxlan_open(struct net_device *dev)
|
|||
}
|
||||
|
||||
/* Purge the forwarding table */
|
||||
static void vxlan_flush(struct vxlan_dev *vxlan)
|
||||
static void vxlan_flush(struct vxlan_dev *vxlan, bool do_all)
|
||||
{
|
||||
unsigned int h;
|
||||
|
||||
|
@ -2364,6 +2364,8 @@ static void vxlan_flush(struct vxlan_dev *vxlan)
|
|||
hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
|
||||
struct vxlan_fdb *f
|
||||
= container_of(p, struct vxlan_fdb, hlist);
|
||||
if (!do_all && (f->state & (NUD_PERMANENT | NUD_NOARP)))
|
||||
continue;
|
||||
/* the all_zeros_mac entry is deleted at vxlan_uninit */
|
||||
if (!is_zero_ether_addr(f->eth_addr))
|
||||
vxlan_fdb_destroy(vxlan, f);
|
||||
|
@ -2385,7 +2387,7 @@ static int vxlan_stop(struct net_device *dev)
|
|||
|
||||
del_timer_sync(&vxlan->age_timer);
|
||||
|
||||
vxlan_flush(vxlan);
|
||||
vxlan_flush(vxlan, false);
|
||||
vxlan_sock_release(vxlan);
|
||||
|
||||
return ret;
|
||||
|
@ -2890,7 +2892,7 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
|
|||
memcpy(&vxlan->cfg, conf, sizeof(*conf));
|
||||
if (!vxlan->cfg.dst_port) {
|
||||
if (conf->flags & VXLAN_F_GPE)
|
||||
vxlan->cfg.dst_port = 4790; /* IANA assigned VXLAN-GPE port */
|
||||
vxlan->cfg.dst_port = htons(4790); /* IANA VXLAN-GPE port */
|
||||
else
|
||||
vxlan->cfg.dst_port = default_port;
|
||||
}
|
||||
|
@ -3058,6 +3060,8 @@ static void vxlan_dellink(struct net_device *dev, struct list_head *head)
|
|||
struct vxlan_dev *vxlan = netdev_priv(dev);
|
||||
struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
|
||||
|
||||
vxlan_flush(vxlan, true);
|
||||
|
||||
spin_lock(&vn->sock_lock);
|
||||
if (!hlist_unhashed(&vxlan->hlist))
|
||||
hlist_del_rcu(&vxlan->hlist);
|
||||
|
|
|
@ -221,18 +221,18 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
|
|||
{
|
||||
struct xenvif *vif = netdev_priv(dev);
|
||||
struct xenvif_queue *queue = NULL;
|
||||
unsigned int num_queues = vif->num_queues;
|
||||
unsigned long rx_bytes = 0;
|
||||
unsigned long rx_packets = 0;
|
||||
unsigned long tx_bytes = 0;
|
||||
unsigned long tx_packets = 0;
|
||||
unsigned int index;
|
||||
|
||||
spin_lock(&vif->lock);
|
||||
if (vif->queues == NULL)
|
||||
goto out;
|
||||
|
||||
/* Aggregate tx and rx stats from each queue */
|
||||
for (index = 0; index < num_queues; ++index) {
|
||||
for (index = 0; index < vif->num_queues; ++index) {
|
||||
queue = &vif->queues[index];
|
||||
rx_bytes += queue->stats.rx_bytes;
|
||||
rx_packets += queue->stats.rx_packets;
|
||||
|
@ -241,6 +241,8 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
|
|||
}
|
||||
|
||||
out:
|
||||
spin_unlock(&vif->lock);
|
||||
|
||||
vif->dev->stats.rx_bytes = rx_bytes;
|
||||
vif->dev->stats.rx_packets = rx_packets;
|
||||
vif->dev->stats.tx_bytes = tx_bytes;
|
||||
|
|
|
@ -493,11 +493,22 @@ static int backend_create_xenvif(struct backend_info *be)
|
|||
static void backend_disconnect(struct backend_info *be)
|
||||
{
|
||||
if (be->vif) {
|
||||
unsigned int queue_index;
|
||||
|
||||
xen_unregister_watchers(be->vif);
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
xenvif_debugfs_delif(be->vif);
|
||||
#endif /* CONFIG_DEBUG_FS */
|
||||
xenvif_disconnect_data(be->vif);
|
||||
for (queue_index = 0; queue_index < be->vif->num_queues; ++queue_index)
|
||||
xenvif_deinit_queue(&be->vif->queues[queue_index]);
|
||||
|
||||
spin_lock(&be->vif->lock);
|
||||
vfree(be->vif->queues);
|
||||
be->vif->num_queues = 0;
|
||||
be->vif->queues = NULL;
|
||||
spin_unlock(&be->vif->lock);
|
||||
|
||||
xenvif_disconnect_ctrl(be->vif);
|
||||
}
|
||||
}
|
||||
|
@ -1034,6 +1045,8 @@ static void connect(struct backend_info *be)
|
|||
err:
|
||||
if (be->vif->num_queues > 0)
|
||||
xenvif_disconnect_data(be->vif); /* Clean up existing queues */
|
||||
for (queue_index = 0; queue_index < be->vif->num_queues; ++queue_index)
|
||||
xenvif_deinit_queue(&be->vif->queues[queue_index]);
|
||||
vfree(be->vif->queues);
|
||||
be->vif->queues = NULL;
|
||||
be->vif->num_queues = 0;
|
||||
|
|
|
@ -321,7 +321,7 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
|
|||
queue->rx.req_prod_pvt = req_prod;
|
||||
|
||||
/* Not enough requests? Try again later. */
|
||||
if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN) {
|
||||
if (req_prod - queue->rx.sring->req_prod < NET_RX_SLOTS_MIN) {
|
||||
mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10));
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -247,6 +247,8 @@ struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref);
|
|||
void bpf_map_put_with_uref(struct bpf_map *map);
|
||||
void bpf_map_put(struct bpf_map *map);
|
||||
int bpf_map_precharge_memlock(u32 pages);
|
||||
void *bpf_map_area_alloc(size_t size);
|
||||
void bpf_map_area_free(void *base);
|
||||
|
||||
extern int sysctl_unprivileged_bpf_disabled;
|
||||
|
||||
|
|
|
@ -35,6 +35,8 @@
|
|||
#define PHY_ID_KSZ886X 0x00221430
|
||||
#define PHY_ID_KSZ8863 0x00221435
|
||||
|
||||
#define PHY_ID_KSZ8795 0x00221550
|
||||
|
||||
/* struct phy_device dev_flags definitions */
|
||||
#define MICREL_PHY_50MHZ_CLK 0x00000001
|
||||
#define MICREL_PHY_FXEN 0x00000002
|
||||
|
|
|
@ -25,7 +25,6 @@
|
|||
#include <linux/timer.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/mod_devicetable.h>
|
||||
#include <linux/phy_led_triggers.h>
|
||||
|
||||
#include <linux/atomic.h>
|
||||
|
||||
|
|
|
@ -18,11 +18,11 @@ struct phy_device;
|
|||
#ifdef CONFIG_LED_TRIGGER_PHY
|
||||
|
||||
#include <linux/leds.h>
|
||||
#include <linux/phy.h>
|
||||
|
||||
#define PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE 10
|
||||
#define PHY_MII_BUS_ID_SIZE (20 - 3)
|
||||
|
||||
#define PHY_LINK_LED_TRIGGER_NAME_SIZE (PHY_MII_BUS_ID_SIZE + \
|
||||
#define PHY_LINK_LED_TRIGGER_NAME_SIZE (MII_BUS_ID_SIZE + \
|
||||
FIELD_SIZEOF(struct mdio_device, addr)+\
|
||||
PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE)
|
||||
|
||||
|
|
|
@ -56,7 +56,8 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
|
|||
|
||||
static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb,
|
||||
struct virtio_net_hdr *hdr,
|
||||
bool little_endian)
|
||||
bool little_endian,
|
||||
bool has_data_valid)
|
||||
{
|
||||
memset(hdr, 0, sizeof(*hdr)); /* no info leak */
|
||||
|
||||
|
@ -91,7 +92,8 @@ static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb,
|
|||
skb_checksum_start_offset(skb));
|
||||
hdr->csum_offset = __cpu_to_virtio16(little_endian,
|
||||
skb->csum_offset);
|
||||
} else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
|
||||
} else if (has_data_valid &&
|
||||
skb->ip_summed == CHECKSUM_UNNECESSARY) {
|
||||
hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID;
|
||||
} /* else everything is zero */
|
||||
|
||||
|
|
|
@ -871,7 +871,7 @@ int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb);
|
|||
* upper-layer output functions
|
||||
*/
|
||||
int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
|
||||
struct ipv6_txoptions *opt, int tclass);
|
||||
__u32 mark, struct ipv6_txoptions *opt, int tclass);
|
||||
|
||||
int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr);
|
||||
|
||||
|
|
|
@ -44,6 +44,8 @@ struct lwtunnel_encap_ops {
|
|||
int (*get_encap_size)(struct lwtunnel_state *lwtstate);
|
||||
int (*cmp_encap)(struct lwtunnel_state *a, struct lwtunnel_state *b);
|
||||
int (*xmit)(struct sk_buff *skb);
|
||||
|
||||
struct module *owner;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_LWTUNNEL
|
||||
|
@ -105,6 +107,8 @@ int lwtunnel_encap_add_ops(const struct lwtunnel_encap_ops *op,
|
|||
unsigned int num);
|
||||
int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *op,
|
||||
unsigned int num);
|
||||
int lwtunnel_valid_encap_type(u16 encap_type);
|
||||
int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int len);
|
||||
int lwtunnel_build_state(struct net_device *dev, u16 encap_type,
|
||||
struct nlattr *encap,
|
||||
unsigned int family, const void *cfg,
|
||||
|
@ -168,6 +172,15 @@ static inline int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *op,
|
|||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline int lwtunnel_valid_encap_type(u16 encap_type)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
static inline int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int len)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline int lwtunnel_build_state(struct net_device *dev, u16 encap_type,
|
||||
struct nlattr *encap,
|
||||
unsigned int family, const void *cfg,
|
||||
|
|
|
@ -207,9 +207,9 @@ struct nft_set_iter {
|
|||
unsigned int skip;
|
||||
int err;
|
||||
int (*fn)(const struct nft_ctx *ctx,
|
||||
const struct nft_set *set,
|
||||
struct nft_set *set,
|
||||
const struct nft_set_iter *iter,
|
||||
const struct nft_set_elem *elem);
|
||||
struct nft_set_elem *elem);
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -301,7 +301,7 @@ struct nft_set_ops {
|
|||
void (*remove)(const struct nft_set *set,
|
||||
const struct nft_set_elem *elem);
|
||||
void (*walk)(const struct nft_ctx *ctx,
|
||||
const struct nft_set *set,
|
||||
struct nft_set *set,
|
||||
struct nft_set_iter *iter);
|
||||
|
||||
unsigned int (*privsize)(const struct nlattr * const nla[]);
|
||||
|
|
|
@ -9,6 +9,12 @@ struct nft_fib {
|
|||
|
||||
extern const struct nla_policy nft_fib_policy[];
|
||||
|
||||
static inline bool
|
||||
nft_fib_is_loopback(const struct sk_buff *skb, const struct net_device *in)
|
||||
{
|
||||
return skb->pkt_type == PACKET_LOOPBACK || in->flags & IFF_LOOPBACK;
|
||||
}
|
||||
|
||||
int nft_fib_dump(struct sk_buff *skb, const struct nft_expr *expr);
|
||||
int nft_fib_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
|
||||
const struct nlattr * const tb[]);
|
||||
|
|
|
@ -9,4 +9,6 @@
|
|||
#define NF_LOG_MACDECODE 0x20 /* Decode MAC header */
|
||||
#define NF_LOG_MASK 0x2f
|
||||
|
||||
#define NF_LOG_PREFIXLEN 128
|
||||
|
||||
#endif /* _NETFILTER_NF_LOG_H */
|
||||
|
|
|
@ -235,7 +235,7 @@ enum nft_rule_compat_flags {
|
|||
/**
|
||||
* enum nft_rule_compat_attributes - nf_tables rule compat attributes
|
||||
*
|
||||
* @NFTA_RULE_COMPAT_PROTO: numerice value of handled protocol (NLA_U32)
|
||||
* @NFTA_RULE_COMPAT_PROTO: numeric value of handled protocol (NLA_U32)
|
||||
* @NFTA_RULE_COMPAT_FLAGS: bitmask of enum nft_rule_compat_flags (NLA_U32)
|
||||
*/
|
||||
enum nft_rule_compat_attributes {
|
||||
|
@ -499,7 +499,7 @@ enum nft_bitwise_attributes {
|
|||
* enum nft_byteorder_ops - nf_tables byteorder operators
|
||||
*
|
||||
* @NFT_BYTEORDER_NTOH: network to host operator
|
||||
* @NFT_BYTEORDER_HTON: host to network opertaor
|
||||
* @NFT_BYTEORDER_HTON: host to network operator
|
||||
*/
|
||||
enum nft_byteorder_ops {
|
||||
NFT_BYTEORDER_NTOH,
|
||||
|
|
|
@ -11,7 +11,6 @@
|
|||
*/
|
||||
#include <linux/bpf.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/filter.h>
|
||||
|
@ -74,14 +73,10 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
|
|||
if (array_size >= U32_MAX - PAGE_SIZE)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
|
||||
/* allocate all map elements and zero-initialize them */
|
||||
array = kzalloc(array_size, GFP_USER | __GFP_NOWARN);
|
||||
if (!array) {
|
||||
array = vzalloc(array_size);
|
||||
if (!array)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
array = bpf_map_area_alloc(array_size);
|
||||
if (!array)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
/* copy mandatory map attributes */
|
||||
array->map.map_type = attr->map_type;
|
||||
|
@ -97,7 +92,7 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
|
|||
|
||||
if (array_size >= U32_MAX - PAGE_SIZE ||
|
||||
elem_size > PCPU_MIN_UNIT_SIZE || bpf_array_alloc_percpu(array)) {
|
||||
kvfree(array);
|
||||
bpf_map_area_free(array);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
out:
|
||||
|
@ -262,7 +257,7 @@ static void array_map_free(struct bpf_map *map)
|
|||
if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
|
||||
bpf_array_free_percpu(array);
|
||||
|
||||
kvfree(array);
|
||||
bpf_map_area_free(array);
|
||||
}
|
||||
|
||||
static const struct bpf_map_ops array_ops = {
|
||||
|
@ -319,7 +314,8 @@ static void fd_array_map_free(struct bpf_map *map)
|
|||
/* make sure it's empty */
|
||||
for (i = 0; i < array->map.max_entries; i++)
|
||||
BUG_ON(array->ptrs[i] != NULL);
|
||||
kvfree(array);
|
||||
|
||||
bpf_map_area_free(array);
|
||||
}
|
||||
|
||||
static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
|
||||
|
|
|
@ -13,7 +13,6 @@
|
|||
#include <linux/bpf.h>
|
||||
#include <linux/jhash.h>
|
||||
#include <linux/filter.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include "percpu_freelist.h"
|
||||
#include "bpf_lru_list.h"
|
||||
|
||||
|
@ -103,7 +102,7 @@ static void htab_free_elems(struct bpf_htab *htab)
|
|||
free_percpu(pptr);
|
||||
}
|
||||
free_elems:
|
||||
vfree(htab->elems);
|
||||
bpf_map_area_free(htab->elems);
|
||||
}
|
||||
|
||||
static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key,
|
||||
|
@ -125,7 +124,8 @@ static int prealloc_init(struct bpf_htab *htab)
|
|||
{
|
||||
int err = -ENOMEM, i;
|
||||
|
||||
htab->elems = vzalloc(htab->elem_size * htab->map.max_entries);
|
||||
htab->elems = bpf_map_area_alloc(htab->elem_size *
|
||||
htab->map.max_entries);
|
||||
if (!htab->elems)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -320,14 +320,10 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
|
|||
goto free_htab;
|
||||
|
||||
err = -ENOMEM;
|
||||
htab->buckets = kmalloc_array(htab->n_buckets, sizeof(struct bucket),
|
||||
GFP_USER | __GFP_NOWARN);
|
||||
|
||||
if (!htab->buckets) {
|
||||
htab->buckets = vmalloc(htab->n_buckets * sizeof(struct bucket));
|
||||
if (!htab->buckets)
|
||||
goto free_htab;
|
||||
}
|
||||
htab->buckets = bpf_map_area_alloc(htab->n_buckets *
|
||||
sizeof(struct bucket));
|
||||
if (!htab->buckets)
|
||||
goto free_htab;
|
||||
|
||||
for (i = 0; i < htab->n_buckets; i++) {
|
||||
INIT_HLIST_HEAD(&htab->buckets[i].head);
|
||||
|
@ -354,7 +350,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
|
|||
free_extra_elems:
|
||||
free_percpu(htab->extra_elems);
|
||||
free_buckets:
|
||||
kvfree(htab->buckets);
|
||||
bpf_map_area_free(htab->buckets);
|
||||
free_htab:
|
||||
kfree(htab);
|
||||
return ERR_PTR(err);
|
||||
|
@ -1014,7 +1010,7 @@ static void htab_map_free(struct bpf_map *map)
|
|||
prealloc_destroy(htab);
|
||||
|
||||
free_percpu(htab->extra_elems);
|
||||
kvfree(htab->buckets);
|
||||
bpf_map_area_free(htab->buckets);
|
||||
kfree(htab);
|
||||
}
|
||||
|
||||
|
|
|
@ -7,7 +7,6 @@
|
|||
#include <linux/bpf.h>
|
||||
#include <linux/jhash.h>
|
||||
#include <linux/filter.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/stacktrace.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include "percpu_freelist.h"
|
||||
|
@ -32,7 +31,7 @@ static int prealloc_elems_and_freelist(struct bpf_stack_map *smap)
|
|||
u32 elem_size = sizeof(struct stack_map_bucket) + smap->map.value_size;
|
||||
int err;
|
||||
|
||||
smap->elems = vzalloc(elem_size * smap->map.max_entries);
|
||||
smap->elems = bpf_map_area_alloc(elem_size * smap->map.max_entries);
|
||||
if (!smap->elems)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -45,7 +44,7 @@ static int prealloc_elems_and_freelist(struct bpf_stack_map *smap)
|
|||
return 0;
|
||||
|
||||
free_elems:
|
||||
vfree(smap->elems);
|
||||
bpf_map_area_free(smap->elems);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -76,12 +75,9 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
|
|||
if (cost >= U32_MAX - PAGE_SIZE)
|
||||
return ERR_PTR(-E2BIG);
|
||||
|
||||
smap = kzalloc(cost, GFP_USER | __GFP_NOWARN);
|
||||
if (!smap) {
|
||||
smap = vzalloc(cost);
|
||||
if (!smap)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
smap = bpf_map_area_alloc(cost);
|
||||
if (!smap)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
err = -E2BIG;
|
||||
cost += n_buckets * (value_size + sizeof(struct stack_map_bucket));
|
||||
|
@ -112,7 +108,7 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
|
|||
put_buffers:
|
||||
put_callchain_buffers();
|
||||
free_smap:
|
||||
kvfree(smap);
|
||||
bpf_map_area_free(smap);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
|
@ -262,9 +258,9 @@ static void stack_map_free(struct bpf_map *map)
|
|||
/* wait for bpf programs to complete before freeing stack map */
|
||||
synchronize_rcu();
|
||||
|
||||
vfree(smap->elems);
|
||||
bpf_map_area_free(smap->elems);
|
||||
pcpu_freelist_destroy(&smap->freelist);
|
||||
kvfree(smap);
|
||||
bpf_map_area_free(smap);
|
||||
put_callchain_buffers();
|
||||
}
|
||||
|
||||
|
|
|
@ -12,6 +12,8 @@
|
|||
#include <linux/bpf.h>
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/mmzone.h>
|
||||
#include <linux/anon_inodes.h>
|
||||
#include <linux/file.h>
|
||||
#include <linux/license.h>
|
||||
|
@ -49,6 +51,30 @@ void bpf_register_map_type(struct bpf_map_type_list *tl)
|
|||
list_add(&tl->list_node, &bpf_map_types);
|
||||
}
|
||||
|
||||
void *bpf_map_area_alloc(size_t size)
|
||||
{
|
||||
/* We definitely need __GFP_NORETRY, so OOM killer doesn't
|
||||
* trigger under memory pressure as we really just want to
|
||||
* fail instead.
|
||||
*/
|
||||
const gfp_t flags = __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO;
|
||||
void *area;
|
||||
|
||||
if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
|
||||
area = kmalloc(size, GFP_USER | flags);
|
||||
if (area != NULL)
|
||||
return area;
|
||||
}
|
||||
|
||||
return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | flags,
|
||||
PAGE_KERNEL);
|
||||
}
|
||||
|
||||
void bpf_map_area_free(void *area)
|
||||
{
|
||||
kvfree(area);
|
||||
}
|
||||
|
||||
int bpf_map_precharge_memlock(u32 pages)
|
||||
{
|
||||
struct user_struct *user = get_current_user();
|
||||
|
|
|
@ -474,7 +474,7 @@ int batadv_frag_send_packet(struct sk_buff *skb,
|
|||
primary_if = batadv_primary_if_get_selected(bat_priv);
|
||||
if (!primary_if) {
|
||||
ret = -EINVAL;
|
||||
goto put_primary_if;
|
||||
goto free_skb;
|
||||
}
|
||||
|
||||
/* Create one header to be copied to all fragments */
|
||||
|
@ -502,7 +502,7 @@ int batadv_frag_send_packet(struct sk_buff *skb,
|
|||
skb_fragment = batadv_frag_create(skb, &frag_header, mtu);
|
||||
if (!skb_fragment) {
|
||||
ret = -ENOMEM;
|
||||
goto free_skb;
|
||||
goto put_primary_if;
|
||||
}
|
||||
|
||||
batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_TX);
|
||||
|
@ -511,7 +511,7 @@ int batadv_frag_send_packet(struct sk_buff *skb,
|
|||
ret = batadv_send_unicast_skb(skb_fragment, neigh_node);
|
||||
if (ret != NET_XMIT_SUCCESS) {
|
||||
ret = NET_XMIT_DROP;
|
||||
goto free_skb;
|
||||
goto put_primary_if;
|
||||
}
|
||||
|
||||
frag_header.no++;
|
||||
|
@ -519,7 +519,7 @@ int batadv_frag_send_packet(struct sk_buff *skb,
|
|||
/* The initial check in this function should cover this case */
|
||||
if (frag_header.no == BATADV_FRAG_MAX_FRAGMENTS - 1) {
|
||||
ret = -EINVAL;
|
||||
goto free_skb;
|
||||
goto put_primary_if;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -527,7 +527,7 @@ int batadv_frag_send_packet(struct sk_buff *skb,
|
|||
if (batadv_skb_head_push(skb, header_size) < 0 ||
|
||||
pskb_expand_head(skb, header_size + ETH_HLEN, 0, GFP_ATOMIC) < 0) {
|
||||
ret = -ENOMEM;
|
||||
goto free_skb;
|
||||
goto put_primary_if;
|
||||
}
|
||||
|
||||
memcpy(skb->data, &frag_header, header_size);
|
||||
|
|
|
@ -781,20 +781,6 @@ static int br_validate(struct nlattr *tb[], struct nlattr *data[])
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int br_dev_newlink(struct net *src_net, struct net_device *dev,
|
||||
struct nlattr *tb[], struct nlattr *data[])
|
||||
{
|
||||
struct net_bridge *br = netdev_priv(dev);
|
||||
|
||||
if (tb[IFLA_ADDRESS]) {
|
||||
spin_lock_bh(&br->lock);
|
||||
br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS]));
|
||||
spin_unlock_bh(&br->lock);
|
||||
}
|
||||
|
||||
return register_netdevice(dev);
|
||||
}
|
||||
|
||||
static int br_port_slave_changelink(struct net_device *brdev,
|
||||
struct net_device *dev,
|
||||
struct nlattr *tb[],
|
||||
|
@ -1115,6 +1101,25 @@ static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int br_dev_newlink(struct net *src_net, struct net_device *dev,
|
||||
struct nlattr *tb[], struct nlattr *data[])
|
||||
{
|
||||
struct net_bridge *br = netdev_priv(dev);
|
||||
int err;
|
||||
|
||||
if (tb[IFLA_ADDRESS]) {
|
||||
spin_lock_bh(&br->lock);
|
||||
br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS]));
|
||||
spin_unlock_bh(&br->lock);
|
||||
}
|
||||
|
||||
err = br_changelink(dev, tb, data);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return register_netdevice(dev);
|
||||
}
|
||||
|
||||
static size_t br_get_size(const struct net_device *brdev)
|
||||
{
|
||||
return nla_total_size(sizeof(u32)) + /* IFLA_BR_FORWARD_DELAY */
|
||||
|
|
|
@ -2795,9 +2795,9 @@ static netdev_features_t harmonize_features(struct sk_buff *skb,
|
|||
if (skb->ip_summed != CHECKSUM_NONE &&
|
||||
!can_checksum_protocol(features, type)) {
|
||||
features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
|
||||
} else if (illegal_highdma(skb->dev, skb)) {
|
||||
features &= ~NETIF_F_SG;
|
||||
}
|
||||
if (illegal_highdma(skb->dev, skb))
|
||||
features &= ~NETIF_F_SG;
|
||||
|
||||
return features;
|
||||
}
|
||||
|
|
|
@ -1712,7 +1712,7 @@ static noinline_for_stack int ethtool_get_channels(struct net_device *dev,
|
|||
static noinline_for_stack int ethtool_set_channels(struct net_device *dev,
|
||||
void __user *useraddr)
|
||||
{
|
||||
struct ethtool_channels channels, max;
|
||||
struct ethtool_channels channels, max = { .cmd = ETHTOOL_GCHANNELS };
|
||||
u32 max_rx_in_use = 0;
|
||||
|
||||
if (!dev->ethtool_ops->set_channels || !dev->ethtool_ops->get_channels)
|
||||
|
|
|
@ -386,6 +386,7 @@ static const struct lwtunnel_encap_ops bpf_encap_ops = {
|
|||
.fill_encap = bpf_fill_encap_info,
|
||||
.get_encap_size = bpf_encap_nlsize,
|
||||
.cmp_encap = bpf_encap_cmp,
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
static int __init bpf_lwt_init(void)
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
#include <net/lwtunnel.h>
|
||||
#include <net/rtnetlink.h>
|
||||
#include <net/ip6_fib.h>
|
||||
#include <net/nexthop.h>
|
||||
|
||||
#ifdef CONFIG_MODULES
|
||||
|
||||
|
@ -114,26 +115,78 @@ int lwtunnel_build_state(struct net_device *dev, u16 encap_type,
|
|||
ret = -EOPNOTSUPP;
|
||||
rcu_read_lock();
|
||||
ops = rcu_dereference(lwtun_encaps[encap_type]);
|
||||
#ifdef CONFIG_MODULES
|
||||
if (!ops) {
|
||||
const char *encap_type_str = lwtunnel_encap_str(encap_type);
|
||||
|
||||
if (encap_type_str) {
|
||||
rcu_read_unlock();
|
||||
request_module("rtnl-lwt-%s", encap_type_str);
|
||||
rcu_read_lock();
|
||||
ops = rcu_dereference(lwtun_encaps[encap_type]);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
if (likely(ops && ops->build_state))
|
||||
if (likely(ops && ops->build_state && try_module_get(ops->owner))) {
|
||||
ret = ops->build_state(dev, encap, family, cfg, lws);
|
||||
if (ret)
|
||||
module_put(ops->owner);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(lwtunnel_build_state);
|
||||
|
||||
int lwtunnel_valid_encap_type(u16 encap_type)
|
||||
{
|
||||
const struct lwtunnel_encap_ops *ops;
|
||||
int ret = -EINVAL;
|
||||
|
||||
if (encap_type == LWTUNNEL_ENCAP_NONE ||
|
||||
encap_type > LWTUNNEL_ENCAP_MAX)
|
||||
return ret;
|
||||
|
||||
rcu_read_lock();
|
||||
ops = rcu_dereference(lwtun_encaps[encap_type]);
|
||||
rcu_read_unlock();
|
||||
#ifdef CONFIG_MODULES
|
||||
if (!ops) {
|
||||
const char *encap_type_str = lwtunnel_encap_str(encap_type);
|
||||
|
||||
if (encap_type_str) {
|
||||
__rtnl_unlock();
|
||||
request_module("rtnl-lwt-%s", encap_type_str);
|
||||
rtnl_lock();
|
||||
|
||||
rcu_read_lock();
|
||||
ops = rcu_dereference(lwtun_encaps[encap_type]);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
}
|
||||
#endif
|
||||
return ops ? 0 : -EOPNOTSUPP;
|
||||
}
|
||||
EXPORT_SYMBOL(lwtunnel_valid_encap_type);
|
||||
|
||||
int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int remaining)
|
||||
{
|
||||
struct rtnexthop *rtnh = (struct rtnexthop *)attr;
|
||||
struct nlattr *nla_entype;
|
||||
struct nlattr *attrs;
|
||||
struct nlattr *nla;
|
||||
u16 encap_type;
|
||||
int attrlen;
|
||||
|
||||
while (rtnh_ok(rtnh, remaining)) {
|
||||
attrlen = rtnh_attrlen(rtnh);
|
||||
if (attrlen > 0) {
|
||||
attrs = rtnh_attrs(rtnh);
|
||||
nla = nla_find(attrs, attrlen, RTA_ENCAP);
|
||||
nla_entype = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
|
||||
|
||||
if (nla_entype) {
|
||||
encap_type = nla_get_u16(nla_entype);
|
||||
|
||||
if (lwtunnel_valid_encap_type(encap_type) != 0)
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
}
|
||||
rtnh = rtnh_next(rtnh, &remaining);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(lwtunnel_valid_encap_type_attr);
|
||||
|
||||
void lwtstate_free(struct lwtunnel_state *lws)
|
||||
{
|
||||
const struct lwtunnel_encap_ops *ops = lwtun_encaps[lws->type];
|
||||
|
@ -144,6 +197,7 @@ void lwtstate_free(struct lwtunnel_state *lws)
|
|||
} else {
|
||||
kfree(lws);
|
||||
}
|
||||
module_put(ops->owner);
|
||||
}
|
||||
EXPORT_SYMBOL(lwtstate_free);
|
||||
|
||||
|
|
|
@ -227,7 +227,7 @@ static int dccp_v6_send_response(const struct sock *sk, struct request_sock *req
|
|||
opt = ireq->ipv6_opt;
|
||||
if (!opt)
|
||||
opt = rcu_dereference(np->opt);
|
||||
err = ip6_xmit(sk, skb, &fl6, opt, np->tclass);
|
||||
err = ip6_xmit(sk, skb, &fl6, sk->sk_mark, opt, np->tclass);
|
||||
rcu_read_unlock();
|
||||
err = net_xmit_eval(err);
|
||||
}
|
||||
|
@ -281,7 +281,7 @@ static void dccp_v6_ctl_send_reset(const struct sock *sk, struct sk_buff *rxskb)
|
|||
dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
|
||||
if (!IS_ERR(dst)) {
|
||||
skb_dst_set(skb, dst);
|
||||
ip6_xmit(ctl_sk, skb, &fl6, NULL, 0);
|
||||
ip6_xmit(ctl_sk, skb, &fl6, 0, NULL, 0);
|
||||
DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
|
||||
DCCP_INC_STATS(DCCP_MIB_OUTRSTS);
|
||||
return;
|
||||
|
|
|
@ -1105,10 +1105,8 @@ static int dsa_slave_phy_connect(struct dsa_slave_priv *p,
|
|||
/* Use already configured phy mode */
|
||||
if (p->phy_interface == PHY_INTERFACE_MODE_NA)
|
||||
p->phy_interface = p->phy->interface;
|
||||
phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link,
|
||||
p->phy_interface);
|
||||
|
||||
return 0;
|
||||
return phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link,
|
||||
p->phy_interface);
|
||||
}
|
||||
|
||||
static int dsa_slave_phy_setup(struct dsa_slave_priv *p,
|
||||
|
@ -1203,6 +1201,8 @@ int dsa_slave_suspend(struct net_device *slave_dev)
|
|||
{
|
||||
struct dsa_slave_priv *p = netdev_priv(slave_dev);
|
||||
|
||||
netif_device_detach(slave_dev);
|
||||
|
||||
if (p->phy) {
|
||||
phy_stop(p->phy);
|
||||
p->old_pause = -1;
|
||||
|
|
|
@ -46,6 +46,7 @@
|
|||
#include <net/rtnetlink.h>
|
||||
#include <net/xfrm.h>
|
||||
#include <net/l3mdev.h>
|
||||
#include <net/lwtunnel.h>
|
||||
#include <trace/events/fib.h>
|
||||
|
||||
#ifndef CONFIG_IP_MULTIPLE_TABLES
|
||||
|
@ -677,6 +678,10 @@ static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
|
|||
cfg->fc_mx_len = nla_len(attr);
|
||||
break;
|
||||
case RTA_MULTIPATH:
|
||||
err = lwtunnel_valid_encap_type_attr(nla_data(attr),
|
||||
nla_len(attr));
|
||||
if (err < 0)
|
||||
goto errout;
|
||||
cfg->fc_mp = nla_data(attr);
|
||||
cfg->fc_mp_len = nla_len(attr);
|
||||
break;
|
||||
|
@ -691,6 +696,9 @@ static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
|
|||
break;
|
||||
case RTA_ENCAP_TYPE:
|
||||
cfg->fc_encap_type = nla_get_u16(attr);
|
||||
err = lwtunnel_valid_encap_type(cfg->fc_encap_type);
|
||||
if (err < 0)
|
||||
goto errout;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1629,6 +1629,7 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
|
|||
sk->sk_protocol = ip_hdr(skb)->protocol;
|
||||
sk->sk_bound_dev_if = arg->bound_dev_if;
|
||||
sk->sk_sndbuf = sysctl_wmem_default;
|
||||
sk->sk_mark = fl4.flowi4_mark;
|
||||
err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base,
|
||||
len, 0, &ipc, &rt, MSG_DONTWAIT);
|
||||
if (unlikely(err)) {
|
||||
|
|
|
@ -313,6 +313,7 @@ static const struct lwtunnel_encap_ops ip_tun_lwt_ops = {
|
|||
.fill_encap = ip_tun_fill_encap_info,
|
||||
.get_encap_size = ip_tun_encap_nlsize,
|
||||
.cmp_encap = ip_tun_cmp_encap,
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
static const struct nla_policy ip6_tun_policy[LWTUNNEL_IP6_MAX + 1] = {
|
||||
|
@ -403,6 +404,7 @@ static const struct lwtunnel_encap_ops ip6_tun_lwt_ops = {
|
|||
.fill_encap = ip6_tun_fill_encap_info,
|
||||
.get_encap_size = ip6_tun_encap_nlsize,
|
||||
.cmp_encap = ip_tun_cmp_encap,
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
void __init ip_tunnel_core_init(void)
|
||||
|
|
|
@ -144,7 +144,12 @@ clusterip_config_find_get(struct net *net, __be32 clusterip, int entry)
|
|||
rcu_read_lock_bh();
|
||||
c = __clusterip_config_find(net, clusterip);
|
||||
if (c) {
|
||||
if (!c->pde || unlikely(!atomic_inc_not_zero(&c->refcount)))
|
||||
#ifdef CONFIG_PROC_FS
|
||||
if (!c->pde)
|
||||
c = NULL;
|
||||
else
|
||||
#endif
|
||||
if (unlikely(!atomic_inc_not_zero(&c->refcount)))
|
||||
c = NULL;
|
||||
else if (entry)
|
||||
atomic_inc(&c->entries);
|
||||
|
|
|
@ -63,10 +63,10 @@ static bool rpfilter_lookup_reverse(struct net *net, struct flowi4 *fl4,
|
|||
return dev_match || flags & XT_RPFILTER_LOOSE;
|
||||
}
|
||||
|
||||
static bool rpfilter_is_local(const struct sk_buff *skb)
|
||||
static bool
|
||||
rpfilter_is_loopback(const struct sk_buff *skb, const struct net_device *in)
|
||||
{
|
||||
const struct rtable *rt = skb_rtable(skb);
|
||||
return rt && (rt->rt_flags & RTCF_LOCAL);
|
||||
return skb->pkt_type == PACKET_LOOPBACK || in->flags & IFF_LOOPBACK;
|
||||
}
|
||||
|
||||
static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
|
||||
|
@ -79,7 +79,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
|
|||
info = par->matchinfo;
|
||||
invert = info->flags & XT_RPFILTER_INVERT;
|
||||
|
||||
if (rpfilter_is_local(skb))
|
||||
if (rpfilter_is_loopback(skb, xt_in(par)))
|
||||
return true ^ invert;
|
||||
|
||||
iph = ip_hdr(skb);
|
||||
|
|
|
@ -126,6 +126,8 @@ void nf_send_reset(struct net *net, struct sk_buff *oldskb, int hook)
|
|||
/* ip_route_me_harder expects skb->dst to be set */
|
||||
skb_dst_set_noref(nskb, skb_dst(oldskb));
|
||||
|
||||
nskb->mark = IP4_REPLY_MARK(net, oldskb->mark);
|
||||
|
||||
skb_reserve(nskb, LL_MAX_HEADER);
|
||||
niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP,
|
||||
ip4_dst_hoplimit(skb_dst(nskb)));
|
||||
|
|
|
@ -26,13 +26,6 @@ static __be32 get_saddr(__be32 addr)
|
|||
return addr;
|
||||
}
|
||||
|
||||
static bool fib4_is_local(const struct sk_buff *skb)
|
||||
{
|
||||
const struct rtable *rt = skb_rtable(skb);
|
||||
|
||||
return rt && (rt->rt_flags & RTCF_LOCAL);
|
||||
}
|
||||
|
||||
#define DSCP_BITS 0xfc
|
||||
|
||||
void nft_fib4_eval_type(const struct nft_expr *expr, struct nft_regs *regs,
|
||||
|
@ -95,8 +88,10 @@ void nft_fib4_eval(const struct nft_expr *expr, struct nft_regs *regs,
|
|||
else
|
||||
oif = NULL;
|
||||
|
||||
if (nft_hook(pkt) == NF_INET_PRE_ROUTING && fib4_is_local(pkt->skb)) {
|
||||
nft_fib_store_result(dest, priv->result, pkt, LOOPBACK_IFINDEX);
|
||||
if (nft_hook(pkt) == NF_INET_PRE_ROUTING &&
|
||||
nft_fib_is_loopback(pkt->skb, nft_in(pkt))) {
|
||||
nft_fib_store_result(dest, priv->result, pkt,
|
||||
nft_in(pkt)->ifindex);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -131,7 +126,7 @@ void nft_fib4_eval(const struct nft_expr *expr, struct nft_regs *regs,
|
|||
switch (res.type) {
|
||||
case RTN_UNICAST:
|
||||
break;
|
||||
case RTN_LOCAL: /* should not appear here, see fib4_is_local() above */
|
||||
case RTN_LOCAL: /* Should not see RTN_LOCAL here */
|
||||
return;
|
||||
default:
|
||||
break;
|
||||
|
|
|
@ -205,6 +205,7 @@ static struct sock *tcp_fastopen_create_child(struct sock *sk,
|
|||
* scaled. So correct it appropriately.
|
||||
*/
|
||||
tp->snd_wnd = ntohs(tcp_hdr(skb)->window);
|
||||
tp->max_window = tp->snd_wnd;
|
||||
|
||||
/* Activate the retrans timer so that SYNACK can be retransmitted.
|
||||
* The request socket is not added to the ehash
|
||||
|
|
|
@ -5078,7 +5078,7 @@ static void tcp_check_space(struct sock *sk)
|
|||
if (sock_flag(sk, SOCK_QUEUE_SHRUNK)) {
|
||||
sock_reset_flag(sk, SOCK_QUEUE_SHRUNK);
|
||||
/* pairs with tcp_poll() */
|
||||
smp_mb__after_atomic();
|
||||
smp_mb();
|
||||
if (sk->sk_socket &&
|
||||
test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
|
||||
tcp_new_space(sk);
|
||||
|
|
|
@ -5540,8 +5540,7 @@ static void addrconf_disable_change(struct net *net, __s32 newf)
|
|||
struct net_device *dev;
|
||||
struct inet6_dev *idev;
|
||||
|
||||
rcu_read_lock();
|
||||
for_each_netdev_rcu(net, dev) {
|
||||
for_each_netdev(net, dev) {
|
||||
idev = __in6_dev_get(dev);
|
||||
if (idev) {
|
||||
int changed = (!idev->cnf.disable_ipv6) ^ (!newf);
|
||||
|
@ -5550,7 +5549,6 @@ static void addrconf_disable_change(struct net *net, __s32 newf)
|
|||
dev_disable_change(idev);
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int newf)
|
||||
|
|
|
@ -238,6 +238,7 @@ static const struct lwtunnel_encap_ops ila_encap_ops = {
|
|||
.fill_encap = ila_fill_encap_info,
|
||||
.get_encap_size = ila_encap_nlsize,
|
||||
.cmp_encap = ila_encap_cmp,
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
int ila_lwt_init(void)
|
||||
|
|
|
@ -176,7 +176,7 @@ int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl_unused
|
|||
/* Restore final destination back after routing done */
|
||||
fl6.daddr = sk->sk_v6_daddr;
|
||||
|
||||
res = ip6_xmit(sk, skb, &fl6, rcu_dereference(np->opt),
|
||||
res = ip6_xmit(sk, skb, &fl6, sk->sk_mark, rcu_dereference(np->opt),
|
||||
np->tclass);
|
||||
rcu_read_unlock();
|
||||
return res;
|
||||
|
|
|
@ -582,6 +582,9 @@ static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev)
|
|||
return -1;
|
||||
|
||||
offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
|
||||
/* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */
|
||||
ipv6h = ipv6_hdr(skb);
|
||||
|
||||
if (offset > 0) {
|
||||
struct ipv6_tlv_tnl_enc_lim *tel;
|
||||
tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset];
|
||||
|
|
|
@ -172,7 +172,7 @@ int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
|
|||
* which are using proper atomic operations or spinlocks.
|
||||
*/
|
||||
int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
|
||||
struct ipv6_txoptions *opt, int tclass)
|
||||
__u32 mark, struct ipv6_txoptions *opt, int tclass)
|
||||
{
|
||||
struct net *net = sock_net(sk);
|
||||
const struct ipv6_pinfo *np = inet6_sk(sk);
|
||||
|
@ -240,7 +240,7 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
|
|||
|
||||
skb->protocol = htons(ETH_P_IPV6);
|
||||
skb->priority = sk->sk_priority;
|
||||
skb->mark = sk->sk_mark;
|
||||
skb->mark = mark;
|
||||
|
||||
mtu = dst_mtu(dst);
|
||||
if ((skb->len <= mtu) || skb->ignore_df || skb_is_gso(skb)) {
|
||||
|
|
|
@ -400,18 +400,19 @@ ip6_tnl_dev_uninit(struct net_device *dev)
|
|||
|
||||
__u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
|
||||
{
|
||||
const struct ipv6hdr *ipv6h = (const struct ipv6hdr *) raw;
|
||||
__u8 nexthdr = ipv6h->nexthdr;
|
||||
__u16 off = sizeof(*ipv6h);
|
||||
const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)raw;
|
||||
unsigned int nhoff = raw - skb->data;
|
||||
unsigned int off = nhoff + sizeof(*ipv6h);
|
||||
u8 next, nexthdr = ipv6h->nexthdr;
|
||||
|
||||
while (ipv6_ext_hdr(nexthdr) && nexthdr != NEXTHDR_NONE) {
|
||||
__u16 optlen = 0;
|
||||
struct ipv6_opt_hdr *hdr;
|
||||
if (raw + off + sizeof(*hdr) > skb->data &&
|
||||
!pskb_may_pull(skb, raw - skb->data + off + sizeof (*hdr)))
|
||||
u16 optlen;
|
||||
|
||||
if (!pskb_may_pull(skb, off + sizeof(*hdr)))
|
||||
break;
|
||||
|
||||
hdr = (struct ipv6_opt_hdr *) (raw + off);
|
||||
hdr = (struct ipv6_opt_hdr *)(skb->data + off);
|
||||
if (nexthdr == NEXTHDR_FRAGMENT) {
|
||||
struct frag_hdr *frag_hdr = (struct frag_hdr *) hdr;
|
||||
if (frag_hdr->frag_off)
|
||||
|
@ -422,20 +423,29 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
|
|||
} else {
|
||||
optlen = ipv6_optlen(hdr);
|
||||
}
|
||||
/* cache hdr->nexthdr, since pskb_may_pull() might
|
||||
* invalidate hdr
|
||||
*/
|
||||
next = hdr->nexthdr;
|
||||
if (nexthdr == NEXTHDR_DEST) {
|
||||
__u16 i = off + 2;
|
||||
u16 i = 2;
|
||||
|
||||
/* Remember : hdr is no longer valid at this point. */
|
||||
if (!pskb_may_pull(skb, off + optlen))
|
||||
break;
|
||||
|
||||
while (1) {
|
||||
struct ipv6_tlv_tnl_enc_lim *tel;
|
||||
|
||||
/* No more room for encapsulation limit */
|
||||
if (i + sizeof (*tel) > off + optlen)
|
||||
if (i + sizeof(*tel) > optlen)
|
||||
break;
|
||||
|
||||
tel = (struct ipv6_tlv_tnl_enc_lim *) &raw[i];
|
||||
tel = (struct ipv6_tlv_tnl_enc_lim *) skb->data + off + i;
|
||||
/* return index of option if found and valid */
|
||||
if (tel->type == IPV6_TLV_TNL_ENCAP_LIMIT &&
|
||||
tel->length == 1)
|
||||
return i;
|
||||
return i + off - nhoff;
|
||||
/* else jump to next option */
|
||||
if (tel->type)
|
||||
i += tel->length + 2;
|
||||
|
@ -443,7 +453,7 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
|
|||
i++;
|
||||
}
|
||||
}
|
||||
nexthdr = hdr->nexthdr;
|
||||
nexthdr = next;
|
||||
off += optlen;
|
||||
}
|
||||
return 0;
|
||||
|
@ -1303,6 +1313,8 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
fl6.flowlabel = key->label;
|
||||
} else {
|
||||
offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
|
||||
/* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */
|
||||
ipv6h = ipv6_hdr(skb);
|
||||
if (offset > 0) {
|
||||
struct ipv6_tlv_tnl_enc_lim *tel;
|
||||
|
||||
|
|
|
@ -72,10 +72,10 @@ static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static bool rpfilter_is_local(const struct sk_buff *skb)
|
||||
static bool
|
||||
rpfilter_is_loopback(const struct sk_buff *skb, const struct net_device *in)
|
||||
{
|
||||
const struct rt6_info *rt = (const void *) skb_dst(skb);
|
||||
return rt && (rt->rt6i_flags & RTF_LOCAL);
|
||||
return skb->pkt_type == PACKET_LOOPBACK || in->flags & IFF_LOOPBACK;
|
||||
}
|
||||
|
||||
static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
|
||||
|
@ -85,7 +85,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
|
|||
struct ipv6hdr *iph;
|
||||
bool invert = info->flags & XT_RPFILTER_INVERT;
|
||||
|
||||
if (rpfilter_is_local(skb))
|
||||
if (rpfilter_is_loopback(skb, xt_in(par)))
|
||||
return true ^ invert;
|
||||
|
||||
iph = ipv6_hdr(skb);
|
||||
|
|
|
@ -157,6 +157,7 @@ void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
|
|||
fl6.fl6_sport = otcph->dest;
|
||||
fl6.fl6_dport = otcph->source;
|
||||
fl6.flowi6_oif = l3mdev_master_ifindex(skb_dst(oldskb)->dev);
|
||||
fl6.flowi6_mark = IP6_REPLY_MARK(net, oldskb->mark);
|
||||
security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6));
|
||||
dst = ip6_route_output(net, NULL, &fl6);
|
||||
if (dst->error) {
|
||||
|
@ -180,6 +181,8 @@ void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
|
|||
|
||||
skb_dst_set(nskb, dst);
|
||||
|
||||
nskb->mark = fl6.flowi6_mark;
|
||||
|
||||
skb_reserve(nskb, hh_len + dst->header_len);
|
||||
ip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP,
|
||||
ip6_dst_hoplimit(dst));
|
||||
|
|
|
@ -18,13 +18,6 @@
|
|||
#include <net/ip6_fib.h>
|
||||
#include <net/ip6_route.h>
|
||||
|
||||
static bool fib6_is_local(const struct sk_buff *skb)
|
||||
{
|
||||
const struct rt6_info *rt = (const void *)skb_dst(skb);
|
||||
|
||||
return rt && (rt->rt6i_flags & RTF_LOCAL);
|
||||
}
|
||||
|
||||
static int get_ifindex(const struct net_device *dev)
|
||||
{
|
||||
return dev ? dev->ifindex : 0;
|
||||
|
@ -164,8 +157,10 @@ void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs,
|
|||
|
||||
lookup_flags = nft_fib6_flowi_init(&fl6, priv, pkt, oif);
|
||||
|
||||
if (nft_hook(pkt) == NF_INET_PRE_ROUTING && fib6_is_local(pkt->skb)) {
|
||||
nft_fib_store_result(dest, priv->result, pkt, LOOPBACK_IFINDEX);
|
||||
if (nft_hook(pkt) == NF_INET_PRE_ROUTING &&
|
||||
nft_fib_is_loopback(pkt->skb, nft_in(pkt))) {
|
||||
nft_fib_store_result(dest, priv->result, pkt,
|
||||
nft_in(pkt)->ifindex);
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -2896,6 +2896,11 @@ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
|
|||
if (tb[RTA_MULTIPATH]) {
|
||||
cfg->fc_mp = nla_data(tb[RTA_MULTIPATH]);
|
||||
cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]);
|
||||
|
||||
err = lwtunnel_valid_encap_type_attr(cfg->fc_mp,
|
||||
cfg->fc_mp_len);
|
||||
if (err < 0)
|
||||
goto errout;
|
||||
}
|
||||
|
||||
if (tb[RTA_PREF]) {
|
||||
|
@ -2909,9 +2914,14 @@ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
|
|||
if (tb[RTA_ENCAP])
|
||||
cfg->fc_encap = tb[RTA_ENCAP];
|
||||
|
||||
if (tb[RTA_ENCAP_TYPE])
|
||||
if (tb[RTA_ENCAP_TYPE]) {
|
||||
cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]);
|
||||
|
||||
err = lwtunnel_valid_encap_type(cfg->fc_encap_type);
|
||||
if (err < 0)
|
||||
goto errout;
|
||||
}
|
||||
|
||||
if (tb[RTA_EXPIRES]) {
|
||||
unsigned long timeout = addrconf_timeout_fixup(nla_get_u32(tb[RTA_EXPIRES]), HZ);
|
||||
|
||||
|
|
|
@ -176,6 +176,8 @@ static int seg6_genl_set_tunsrc(struct sk_buff *skb, struct genl_info *info)
|
|||
|
||||
val = nla_data(info->attrs[SEG6_ATTR_DST]);
|
||||
t_new = kmemdup(val, sizeof(*val), GFP_KERNEL);
|
||||
if (!t_new)
|
||||
return -ENOMEM;
|
||||
|
||||
mutex_lock(&sdata->lock);
|
||||
|
||||
|
|
|
@ -422,6 +422,7 @@ static const struct lwtunnel_encap_ops seg6_iptun_ops = {
|
|||
.fill_encap = seg6_fill_encap_info,
|
||||
.get_encap_size = seg6_encap_nlsize,
|
||||
.cmp_encap = seg6_encap_cmp,
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
int __init seg6_iptunnel_init(void)
|
||||
|
|
|
@ -469,7 +469,7 @@ static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
|
|||
opt = ireq->ipv6_opt;
|
||||
if (!opt)
|
||||
opt = rcu_dereference(np->opt);
|
||||
err = ip6_xmit(sk, skb, fl6, opt, np->tclass);
|
||||
err = ip6_xmit(sk, skb, fl6, sk->sk_mark, opt, np->tclass);
|
||||
rcu_read_unlock();
|
||||
err = net_xmit_eval(err);
|
||||
}
|
||||
|
@ -840,7 +840,7 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32
|
|||
dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
|
||||
if (!IS_ERR(dst)) {
|
||||
skb_dst_set(buff, dst);
|
||||
ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
|
||||
ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, tclass);
|
||||
TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
|
||||
if (rst)
|
||||
TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
|
||||
|
|
|
@ -40,8 +40,6 @@ void rate_control_rate_init(struct sta_info *sta)
|
|||
|
||||
ieee80211_sta_set_rx_nss(sta);
|
||||
|
||||
ieee80211_recalc_min_chandef(sta->sdata);
|
||||
|
||||
if (!ref)
|
||||
return;
|
||||
|
||||
|
|
|
@ -98,18 +98,19 @@ bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(mpls_pkt_too_big);
|
||||
|
||||
static u32 mpls_multipath_hash(struct mpls_route *rt,
|
||||
struct sk_buff *skb, bool bos)
|
||||
static u32 mpls_multipath_hash(struct mpls_route *rt, struct sk_buff *skb)
|
||||
{
|
||||
struct mpls_entry_decoded dec;
|
||||
unsigned int mpls_hdr_len = 0;
|
||||
struct mpls_shim_hdr *hdr;
|
||||
bool eli_seen = false;
|
||||
int label_index;
|
||||
u32 hash = 0;
|
||||
|
||||
for (label_index = 0; label_index < MAX_MP_SELECT_LABELS && !bos;
|
||||
for (label_index = 0; label_index < MAX_MP_SELECT_LABELS;
|
||||
label_index++) {
|
||||
if (!pskb_may_pull(skb, sizeof(*hdr) * label_index))
|
||||
mpls_hdr_len += sizeof(*hdr);
|
||||
if (!pskb_may_pull(skb, mpls_hdr_len))
|
||||
break;
|
||||
|
||||
/* Read and decode the current label */
|
||||
|
@ -134,37 +135,38 @@ static u32 mpls_multipath_hash(struct mpls_route *rt,
|
|||
eli_seen = true;
|
||||
}
|
||||
|
||||
bos = dec.bos;
|
||||
if (bos && pskb_may_pull(skb, sizeof(*hdr) * label_index +
|
||||
sizeof(struct iphdr))) {
|
||||
if (!dec.bos)
|
||||
continue;
|
||||
|
||||
/* found bottom label; does skb have room for a header? */
|
||||
if (pskb_may_pull(skb, mpls_hdr_len + sizeof(struct iphdr))) {
|
||||
const struct iphdr *v4hdr;
|
||||
|
||||
v4hdr = (const struct iphdr *)(mpls_hdr(skb) +
|
||||
label_index);
|
||||
v4hdr = (const struct iphdr *)(hdr + 1);
|
||||
if (v4hdr->version == 4) {
|
||||
hash = jhash_3words(ntohl(v4hdr->saddr),
|
||||
ntohl(v4hdr->daddr),
|
||||
v4hdr->protocol, hash);
|
||||
} else if (v4hdr->version == 6 &&
|
||||
pskb_may_pull(skb, sizeof(*hdr) * label_index +
|
||||
sizeof(struct ipv6hdr))) {
|
||||
pskb_may_pull(skb, mpls_hdr_len +
|
||||
sizeof(struct ipv6hdr))) {
|
||||
const struct ipv6hdr *v6hdr;
|
||||
|
||||
v6hdr = (const struct ipv6hdr *)(mpls_hdr(skb) +
|
||||
label_index);
|
||||
|
||||
v6hdr = (const struct ipv6hdr *)(hdr + 1);
|
||||
hash = __ipv6_addr_jhash(&v6hdr->saddr, hash);
|
||||
hash = __ipv6_addr_jhash(&v6hdr->daddr, hash);
|
||||
hash = jhash_1word(v6hdr->nexthdr, hash);
|
||||
}
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
return hash;
|
||||
}
|
||||
|
||||
static struct mpls_nh *mpls_select_multipath(struct mpls_route *rt,
|
||||
struct sk_buff *skb, bool bos)
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
int alive = ACCESS_ONCE(rt->rt_nhn_alive);
|
||||
u32 hash = 0;
|
||||
|
@ -180,7 +182,7 @@ static struct mpls_nh *mpls_select_multipath(struct mpls_route *rt,
|
|||
if (alive <= 0)
|
||||
return NULL;
|
||||
|
||||
hash = mpls_multipath_hash(rt, skb, bos);
|
||||
hash = mpls_multipath_hash(rt, skb);
|
||||
nh_index = hash % alive;
|
||||
if (alive == rt->rt_nhn)
|
||||
goto out;
|
||||
|
@ -278,17 +280,11 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
|
|||
hdr = mpls_hdr(skb);
|
||||
dec = mpls_entry_decode(hdr);
|
||||
|
||||
/* Pop the label */
|
||||
skb_pull(skb, sizeof(*hdr));
|
||||
skb_reset_network_header(skb);
|
||||
|
||||
skb_orphan(skb);
|
||||
|
||||
rt = mpls_route_input_rcu(net, dec.label);
|
||||
if (!rt)
|
||||
goto drop;
|
||||
|
||||
nh = mpls_select_multipath(rt, skb, dec.bos);
|
||||
nh = mpls_select_multipath(rt, skb);
|
||||
if (!nh)
|
||||
goto drop;
|
||||
|
||||
|
@ -297,6 +293,12 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
|
|||
if (!mpls_output_possible(out_dev))
|
||||
goto drop;
|
||||
|
||||
/* Pop the label */
|
||||
skb_pull(skb, sizeof(*hdr));
|
||||
skb_reset_network_header(skb);
|
||||
|
||||
skb_orphan(skb);
|
||||
|
||||
if (skb_warn_if_lro(skb))
|
||||
goto drop;
|
||||
|
||||
|
|
|
@ -215,6 +215,7 @@ static const struct lwtunnel_encap_ops mpls_iptun_ops = {
|
|||
.fill_encap = mpls_fill_encap_info,
|
||||
.get_encap_size = mpls_encap_nlsize,
|
||||
.cmp_encap = mpls_encap_cmp,
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
static int __init mpls_iptunnel_init(void)
|
||||
|
|
|
@ -494,7 +494,7 @@ config NFT_CT
|
|||
depends on NF_CONNTRACK
|
||||
tristate "Netfilter nf_tables conntrack module"
|
||||
help
|
||||
This option adds the "meta" expression that you can use to match
|
||||
This option adds the "ct" expression that you can use to match
|
||||
connection tracking information such as the flow state.
|
||||
|
||||
config NFT_SET_RBTREE
|
||||
|
|
|
@ -85,11 +85,11 @@ static __read_mostly DEFINE_SPINLOCK(nf_conntrack_locks_all_lock);
|
|||
static __read_mostly bool nf_conntrack_locks_all;
|
||||
|
||||
/* every gc cycle scans at most 1/GC_MAX_BUCKETS_DIV part of table */
|
||||
#define GC_MAX_BUCKETS_DIV 64u
|
||||
/* upper bound of scan intervals */
|
||||
#define GC_INTERVAL_MAX (2 * HZ)
|
||||
/* maximum conntracks to evict per gc run */
|
||||
#define GC_MAX_EVICTS 256u
|
||||
#define GC_MAX_BUCKETS_DIV 128u
|
||||
/* upper bound of full table scan */
|
||||
#define GC_MAX_SCAN_JIFFIES (16u * HZ)
|
||||
/* desired ratio of entries found to be expired */
|
||||
#define GC_EVICT_RATIO 50u
|
||||
|
||||
static struct conntrack_gc_work conntrack_gc_work;
|
||||
|
||||
|
@ -938,6 +938,7 @@ static noinline int early_drop(struct net *net, unsigned int _hash)
|
|||
|
||||
static void gc_worker(struct work_struct *work)
|
||||
{
|
||||
unsigned int min_interval = max(HZ / GC_MAX_BUCKETS_DIV, 1u);
|
||||
unsigned int i, goal, buckets = 0, expired_count = 0;
|
||||
struct conntrack_gc_work *gc_work;
|
||||
unsigned int ratio, scanned = 0;
|
||||
|
@ -979,8 +980,7 @@ static void gc_worker(struct work_struct *work)
|
|||
*/
|
||||
rcu_read_unlock();
|
||||
cond_resched_rcu_qs();
|
||||
} while (++buckets < goal &&
|
||||
expired_count < GC_MAX_EVICTS);
|
||||
} while (++buckets < goal);
|
||||
|
||||
if (gc_work->exiting)
|
||||
return;
|
||||
|
@ -997,27 +997,25 @@ static void gc_worker(struct work_struct *work)
|
|||
* 1. Minimize time until we notice a stale entry
|
||||
* 2. Maximize scan intervals to not waste cycles
|
||||
*
|
||||
* Normally, expired_count will be 0, this increases the next_run time
|
||||
* to priorize 2) above.
|
||||
* Normally, expire ratio will be close to 0.
|
||||
*
|
||||
* As soon as a timed-out entry is found, move towards 1) and increase
|
||||
* the scan frequency.
|
||||
* In case we have lots of evictions next scan is done immediately.
|
||||
* As soon as a sizeable fraction of the entries have expired
|
||||
* increase scan frequency.
|
||||
*/
|
||||
ratio = scanned ? expired_count * 100 / scanned : 0;
|
||||
if (ratio >= 90 || expired_count == GC_MAX_EVICTS) {
|
||||
gc_work->next_gc_run = 0;
|
||||
next_run = 0;
|
||||
} else if (expired_count) {
|
||||
gc_work->next_gc_run /= 2U;
|
||||
next_run = msecs_to_jiffies(1);
|
||||
if (ratio > GC_EVICT_RATIO) {
|
||||
gc_work->next_gc_run = min_interval;
|
||||
} else {
|
||||
if (gc_work->next_gc_run < GC_INTERVAL_MAX)
|
||||
gc_work->next_gc_run += msecs_to_jiffies(1);
|
||||
unsigned int max = GC_MAX_SCAN_JIFFIES / GC_MAX_BUCKETS_DIV;
|
||||
|
||||
next_run = gc_work->next_gc_run;
|
||||
BUILD_BUG_ON((GC_MAX_SCAN_JIFFIES / GC_MAX_BUCKETS_DIV) == 0);
|
||||
|
||||
gc_work->next_gc_run += min_interval;
|
||||
if (gc_work->next_gc_run > max)
|
||||
gc_work->next_gc_run = max;
|
||||
}
|
||||
|
||||
next_run = gc_work->next_gc_run;
|
||||
gc_work->last_bucket = i;
|
||||
queue_delayed_work(system_long_wq, &gc_work->dwork, next_run);
|
||||
}
|
||||
|
@ -1025,7 +1023,7 @@ static void gc_worker(struct work_struct *work)
|
|||
static void conntrack_gc_work_init(struct conntrack_gc_work *gc_work)
|
||||
{
|
||||
INIT_DELAYED_WORK(&gc_work->dwork, gc_worker);
|
||||
gc_work->next_gc_run = GC_INTERVAL_MAX;
|
||||
gc_work->next_gc_run = HZ;
|
||||
gc_work->exiting = false;
|
||||
}
|
||||
|
||||
|
@ -1917,7 +1915,7 @@ int nf_conntrack_init_start(void)
|
|||
nf_ct_untracked_status_or(IPS_CONFIRMED | IPS_UNTRACKED);
|
||||
|
||||
conntrack_gc_work_init(&conntrack_gc_work);
|
||||
queue_delayed_work(system_long_wq, &conntrack_gc_work.dwork, GC_INTERVAL_MAX);
|
||||
queue_delayed_work(system_long_wq, &conntrack_gc_work.dwork, HZ);
|
||||
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -13,7 +13,6 @@
|
|||
/* Internal logging interface, which relies on the real
|
||||
LOG target modules */
|
||||
|
||||
#define NF_LOG_PREFIXLEN 128
|
||||
#define NFLOGGER_NAME_LEN 64
|
||||
|
||||
static struct nf_logger __rcu *loggers[NFPROTO_NUMPROTO][NF_LOG_TYPE_MAX] __read_mostly;
|
||||
|
|
|
@ -928,7 +928,8 @@ static struct nft_chain *nf_tables_chain_lookup(const struct nft_table *table,
|
|||
}
|
||||
|
||||
static const struct nla_policy nft_chain_policy[NFTA_CHAIN_MAX + 1] = {
|
||||
[NFTA_CHAIN_TABLE] = { .type = NLA_STRING },
|
||||
[NFTA_CHAIN_TABLE] = { .type = NLA_STRING,
|
||||
.len = NFT_TABLE_MAXNAMELEN - 1 },
|
||||
[NFTA_CHAIN_HANDLE] = { .type = NLA_U64 },
|
||||
[NFTA_CHAIN_NAME] = { .type = NLA_STRING,
|
||||
.len = NFT_CHAIN_MAXNAMELEN - 1 },
|
||||
|
@ -1854,7 +1855,8 @@ static struct nft_rule *nf_tables_rule_lookup(const struct nft_chain *chain,
|
|||
}
|
||||
|
||||
static const struct nla_policy nft_rule_policy[NFTA_RULE_MAX + 1] = {
|
||||
[NFTA_RULE_TABLE] = { .type = NLA_STRING },
|
||||
[NFTA_RULE_TABLE] = { .type = NLA_STRING,
|
||||
.len = NFT_TABLE_MAXNAMELEN - 1 },
|
||||
[NFTA_RULE_CHAIN] = { .type = NLA_STRING,
|
||||
.len = NFT_CHAIN_MAXNAMELEN - 1 },
|
||||
[NFTA_RULE_HANDLE] = { .type = NLA_U64 },
|
||||
|
@ -2443,7 +2445,8 @@ nft_select_set_ops(const struct nlattr * const nla[],
|
|||
}
|
||||
|
||||
static const struct nla_policy nft_set_policy[NFTA_SET_MAX + 1] = {
|
||||
[NFTA_SET_TABLE] = { .type = NLA_STRING },
|
||||
[NFTA_SET_TABLE] = { .type = NLA_STRING,
|
||||
.len = NFT_TABLE_MAXNAMELEN - 1 },
|
||||
[NFTA_SET_NAME] = { .type = NLA_STRING,
|
||||
.len = NFT_SET_MAXNAMELEN - 1 },
|
||||
[NFTA_SET_FLAGS] = { .type = NLA_U32 },
|
||||
|
@ -3084,9 +3087,9 @@ static int nf_tables_delset(struct net *net, struct sock *nlsk,
|
|||
}
|
||||
|
||||
static int nf_tables_bind_check_setelem(const struct nft_ctx *ctx,
|
||||
const struct nft_set *set,
|
||||
struct nft_set *set,
|
||||
const struct nft_set_iter *iter,
|
||||
const struct nft_set_elem *elem)
|
||||
struct nft_set_elem *elem)
|
||||
{
|
||||
const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
|
||||
enum nft_registers dreg;
|
||||
|
@ -3192,8 +3195,10 @@ static const struct nla_policy nft_set_elem_policy[NFTA_SET_ELEM_MAX + 1] = {
|
|||
};
|
||||
|
||||
static const struct nla_policy nft_set_elem_list_policy[NFTA_SET_ELEM_LIST_MAX + 1] = {
|
||||
[NFTA_SET_ELEM_LIST_TABLE] = { .type = NLA_STRING },
|
||||
[NFTA_SET_ELEM_LIST_SET] = { .type = NLA_STRING },
|
||||
[NFTA_SET_ELEM_LIST_TABLE] = { .type = NLA_STRING,
|
||||
.len = NFT_TABLE_MAXNAMELEN - 1 },
|
||||
[NFTA_SET_ELEM_LIST_SET] = { .type = NLA_STRING,
|
||||
.len = NFT_SET_MAXNAMELEN - 1 },
|
||||
[NFTA_SET_ELEM_LIST_ELEMENTS] = { .type = NLA_NESTED },
|
||||
[NFTA_SET_ELEM_LIST_SET_ID] = { .type = NLA_U32 },
|
||||
};
|
||||
|
@ -3303,9 +3308,9 @@ struct nft_set_dump_args {
|
|||
};
|
||||
|
||||
static int nf_tables_dump_setelem(const struct nft_ctx *ctx,
|
||||
const struct nft_set *set,
|
||||
struct nft_set *set,
|
||||
const struct nft_set_iter *iter,
|
||||
const struct nft_set_elem *elem)
|
||||
struct nft_set_elem *elem)
|
||||
{
|
||||
struct nft_set_dump_args *args;
|
||||
|
||||
|
@ -3317,7 +3322,7 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
|
|||
{
|
||||
struct net *net = sock_net(skb->sk);
|
||||
u8 genmask = nft_genmask_cur(net);
|
||||
const struct nft_set *set;
|
||||
struct nft_set *set;
|
||||
struct nft_set_dump_args args;
|
||||
struct nft_ctx ctx;
|
||||
struct nlattr *nla[NFTA_SET_ELEM_LIST_MAX + 1];
|
||||
|
@ -3740,10 +3745,18 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
|
|||
goto err5;
|
||||
}
|
||||
|
||||
if (set->size &&
|
||||
!atomic_add_unless(&set->nelems, 1, set->size + set->ndeact)) {
|
||||
err = -ENFILE;
|
||||
goto err6;
|
||||
}
|
||||
|
||||
nft_trans_elem(trans) = elem;
|
||||
list_add_tail(&trans->list, &ctx->net->nft.commit_list);
|
||||
return 0;
|
||||
|
||||
err6:
|
||||
set->ops->remove(set, &elem);
|
||||
err5:
|
||||
kfree(trans);
|
||||
err4:
|
||||
|
@ -3790,15 +3803,9 @@ static int nf_tables_newsetelem(struct net *net, struct sock *nlsk,
|
|||
return -EBUSY;
|
||||
|
||||
nla_for_each_nested(attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) {
|
||||
if (set->size &&
|
||||
!atomic_add_unless(&set->nelems, 1, set->size + set->ndeact))
|
||||
return -ENFILE;
|
||||
|
||||
err = nft_add_set_elem(&ctx, set, attr, nlh->nlmsg_flags);
|
||||
if (err < 0) {
|
||||
atomic_dec(&set->nelems);
|
||||
if (err < 0)
|
||||
break;
|
||||
}
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
@ -3883,9 +3890,9 @@ err1:
|
|||
}
|
||||
|
||||
static int nft_flush_set(const struct nft_ctx *ctx,
|
||||
const struct nft_set *set,
|
||||
struct nft_set *set,
|
||||
const struct nft_set_iter *iter,
|
||||
const struct nft_set_elem *elem)
|
||||
struct nft_set_elem *elem)
|
||||
{
|
||||
struct nft_trans *trans;
|
||||
int err;
|
||||
|
@ -3899,9 +3906,10 @@ static int nft_flush_set(const struct nft_ctx *ctx,
|
|||
err = -ENOENT;
|
||||
goto err1;
|
||||
}
|
||||
set->ndeact++;
|
||||
|
||||
nft_trans_elem_set(trans) = (struct nft_set *)set;
|
||||
nft_trans_elem(trans) = *((struct nft_set_elem *)elem);
|
||||
nft_trans_elem_set(trans) = set;
|
||||
nft_trans_elem(trans) = *elem;
|
||||
list_add_tail(&trans->list, &ctx->net->nft.commit_list);
|
||||
|
||||
return 0;
|
||||
|
@ -4032,8 +4040,10 @@ struct nft_object *nf_tables_obj_lookup(const struct nft_table *table,
|
|||
EXPORT_SYMBOL_GPL(nf_tables_obj_lookup);
|
||||
|
||||
static const struct nla_policy nft_obj_policy[NFTA_OBJ_MAX + 1] = {
|
||||
[NFTA_OBJ_TABLE] = { .type = NLA_STRING },
|
||||
[NFTA_OBJ_NAME] = { .type = NLA_STRING },
|
||||
[NFTA_OBJ_TABLE] = { .type = NLA_STRING,
|
||||
.len = NFT_TABLE_MAXNAMELEN - 1 },
|
||||
[NFTA_OBJ_NAME] = { .type = NLA_STRING,
|
||||
.len = NFT_OBJ_MAXNAMELEN - 1 },
|
||||
[NFTA_OBJ_TYPE] = { .type = NLA_U32 },
|
||||
[NFTA_OBJ_DATA] = { .type = NLA_NESTED },
|
||||
};
|
||||
|
@ -4262,10 +4272,11 @@ static int nf_tables_dump_obj(struct sk_buff *skb, struct netlink_callback *cb)
|
|||
if (idx > s_idx)
|
||||
memset(&cb->args[1], 0,
|
||||
sizeof(cb->args) - sizeof(cb->args[0]));
|
||||
if (filter->table[0] &&
|
||||
if (filter && filter->table[0] &&
|
||||
strcmp(filter->table, table->name))
|
||||
goto cont;
|
||||
if (filter->type != NFT_OBJECT_UNSPEC &&
|
||||
if (filter &&
|
||||
filter->type != NFT_OBJECT_UNSPEC &&
|
||||
obj->type->type != filter->type)
|
||||
goto cont;
|
||||
|
||||
|
@ -5009,9 +5020,9 @@ static int nf_tables_check_loops(const struct nft_ctx *ctx,
|
|||
const struct nft_chain *chain);
|
||||
|
||||
static int nf_tables_loop_check_setelem(const struct nft_ctx *ctx,
|
||||
const struct nft_set *set,
|
||||
struct nft_set *set,
|
||||
const struct nft_set_iter *iter,
|
||||
const struct nft_set_elem *elem)
|
||||
struct nft_set_elem *elem)
|
||||
{
|
||||
const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
|
||||
const struct nft_data *data;
|
||||
|
@ -5035,7 +5046,7 @@ static int nf_tables_check_loops(const struct nft_ctx *ctx,
|
|||
{
|
||||
const struct nft_rule *rule;
|
||||
const struct nft_expr *expr, *last;
|
||||
const struct nft_set *set;
|
||||
struct nft_set *set;
|
||||
struct nft_set_binding *binding;
|
||||
struct nft_set_iter iter;
|
||||
|
||||
|
|
|
@ -98,7 +98,8 @@ out:
|
|||
}
|
||||
|
||||
static const struct nla_policy nft_dynset_policy[NFTA_DYNSET_MAX + 1] = {
|
||||
[NFTA_DYNSET_SET_NAME] = { .type = NLA_STRING },
|
||||
[NFTA_DYNSET_SET_NAME] = { .type = NLA_STRING,
|
||||
.len = NFT_SET_MAXNAMELEN - 1 },
|
||||
[NFTA_DYNSET_SET_ID] = { .type = NLA_U32 },
|
||||
[NFTA_DYNSET_OP] = { .type = NLA_U32 },
|
||||
[NFTA_DYNSET_SREG_KEY] = { .type = NLA_U32 },
|
||||
|
|
|
@ -39,7 +39,8 @@ static void nft_log_eval(const struct nft_expr *expr,
|
|||
|
||||
static const struct nla_policy nft_log_policy[NFTA_LOG_MAX + 1] = {
|
||||
[NFTA_LOG_GROUP] = { .type = NLA_U16 },
|
||||
[NFTA_LOG_PREFIX] = { .type = NLA_STRING },
|
||||
[NFTA_LOG_PREFIX] = { .type = NLA_STRING,
|
||||
.len = NF_LOG_PREFIXLEN - 1 },
|
||||
[NFTA_LOG_SNAPLEN] = { .type = NLA_U32 },
|
||||
[NFTA_LOG_QTHRESHOLD] = { .type = NLA_U16 },
|
||||
[NFTA_LOG_LEVEL] = { .type = NLA_U32 },
|
||||
|
|
|
@ -49,7 +49,8 @@ static void nft_lookup_eval(const struct nft_expr *expr,
|
|||
}
|
||||
|
||||
static const struct nla_policy nft_lookup_policy[NFTA_LOOKUP_MAX + 1] = {
|
||||
[NFTA_LOOKUP_SET] = { .type = NLA_STRING },
|
||||
[NFTA_LOOKUP_SET] = { .type = NLA_STRING,
|
||||
.len = NFT_SET_MAXNAMELEN - 1 },
|
||||
[NFTA_LOOKUP_SET_ID] = { .type = NLA_U32 },
|
||||
[NFTA_LOOKUP_SREG] = { .type = NLA_U32 },
|
||||
[NFTA_LOOKUP_DREG] = { .type = NLA_U32 },
|
||||
|
|
|
@ -193,10 +193,12 @@ nft_objref_select_ops(const struct nft_ctx *ctx,
|
|||
}
|
||||
|
||||
static const struct nla_policy nft_objref_policy[NFTA_OBJREF_MAX + 1] = {
|
||||
[NFTA_OBJREF_IMM_NAME] = { .type = NLA_STRING },
|
||||
[NFTA_OBJREF_IMM_NAME] = { .type = NLA_STRING,
|
||||
.len = NFT_OBJ_MAXNAMELEN - 1 },
|
||||
[NFTA_OBJREF_IMM_TYPE] = { .type = NLA_U32 },
|
||||
[NFTA_OBJREF_SET_SREG] = { .type = NLA_U32 },
|
||||
[NFTA_OBJREF_SET_NAME] = { .type = NLA_STRING },
|
||||
[NFTA_OBJREF_SET_NAME] = { .type = NLA_STRING,
|
||||
.len = NFT_SET_MAXNAMELEN - 1 },
|
||||
[NFTA_OBJREF_SET_ID] = { .type = NLA_U32 },
|
||||
};
|
||||
|
||||
|
|
|
@ -212,7 +212,7 @@ static void nft_hash_remove(const struct nft_set *set,
|
|||
rhashtable_remove_fast(&priv->ht, &he->node, nft_hash_params);
|
||||
}
|
||||
|
||||
static void nft_hash_walk(const struct nft_ctx *ctx, const struct nft_set *set,
|
||||
static void nft_hash_walk(const struct nft_ctx *ctx, struct nft_set *set,
|
||||
struct nft_set_iter *iter)
|
||||
{
|
||||
struct nft_hash *priv = nft_set_priv(set);
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue