Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Fix TCP checksum offload handling in iwlwifi driver, from Emmanuel Grumbach. 2) In ksz DSA tagging code, free SKB if skb_put_padto() fails. From Vivien Didelot. 3) Fix two regressions with bonding on wireless, from Andreas Born. 4) Fix build when busypoll is disabled, from Daniel Borkmann. 5) Fix copy_linear_skb() wrt. SO_PEEK_OFF, from Eric Dumazet. 6) Set SKB cached route properly in inet_rtm_getroute(), from Florian Westphal. 7) Fix PCI-E relaxed ordering handling in cxgb4 driver, from Ding Tianhong. 8) Fix module refcnt leak in ULP code, from Sabrina Dubroca. 9) Fix use of GFP_KERNEL in atomic contexts in AF_KEY code, from Eric Dumazet. 10) Need to purge socket write queue in dccp_destroy_sock(), also from Eric Dumazet. 11) Make bpf_trace_printk() work properly on 32-bit architectures, from Daniel Borkmann. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (47 commits) bpf: fix bpf_trace_printk on 32 bit archs PCI: fix oops when try to find Root Port for a PCI device sfc: don't try and read ef10 data on non-ef10 NIC net_sched: remove warning from qdisc_hash_add net_sched/sfq: update hierarchical backlog when drop packet net_sched: reset pointers to tcf blocks in classful qdiscs' destructors ipv4: fix NULL dereference in free_fib_info_rcu() net: Fix a typo in comment about sock flags. ipv6: fix NULL dereference in ip6_route_dev_notify() tcp: fix possible deadlock in TCP stack vs BPF filter dccp: purge write queue in dccp_destroy_sock() udp: fix linear skb reception with PEEK_OFF ipv6: release rt6->rt6i_idev properly during ifdown af_key: do not use GFP_KERNEL in atomic contexts tcp: ulp: avoid module refcnt leak in tcp_set_ulp net/cxgb4vf: Use new PCI_DEV_FLAGS_NO_RELAXED_ORDERING flag net/cxgb4: Use new PCI_DEV_FLAGS_NO_RELAXED_ORDERING flag PCI: Disable Relaxed Ordering Attributes for AMD A1100 PCI: Disable Relaxed Ordering for some Intel processors PCI: Disable PCIe Relaxed Ordering if unsupported ...
This commit is contained in:
commit
510c8a899c
|
@ -26,7 +26,7 @@
|
|||
|
||||
#define FSM_TIMER_DEBUG 0
|
||||
|
||||
void
|
||||
int
|
||||
mISDN_FsmNew(struct Fsm *fsm,
|
||||
struct FsmNode *fnlist, int fncount)
|
||||
{
|
||||
|
@ -34,6 +34,8 @@ mISDN_FsmNew(struct Fsm *fsm,
|
|||
|
||||
fsm->jumpmatrix = kzalloc(sizeof(FSMFNPTR) * fsm->state_count *
|
||||
fsm->event_count, GFP_KERNEL);
|
||||
if (fsm->jumpmatrix == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < fncount; i++)
|
||||
if ((fnlist[i].state >= fsm->state_count) ||
|
||||
|
@ -45,6 +47,7 @@ mISDN_FsmNew(struct Fsm *fsm,
|
|||
} else
|
||||
fsm->jumpmatrix[fsm->state_count * fnlist[i].event +
|
||||
fnlist[i].state] = (FSMFNPTR) fnlist[i].routine;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(mISDN_FsmNew);
|
||||
|
||||
|
|
|
@ -55,7 +55,7 @@ struct FsmTimer {
|
|||
void *arg;
|
||||
};
|
||||
|
||||
extern void mISDN_FsmNew(struct Fsm *, struct FsmNode *, int);
|
||||
extern int mISDN_FsmNew(struct Fsm *, struct FsmNode *, int);
|
||||
extern void mISDN_FsmFree(struct Fsm *);
|
||||
extern int mISDN_FsmEvent(struct FsmInst *, int , void *);
|
||||
extern void mISDN_FsmChangeState(struct FsmInst *, int);
|
||||
|
|
|
@ -414,8 +414,7 @@ l1_init(u_int *deb)
|
|||
l1fsm_s.event_count = L1_EVENT_COUNT;
|
||||
l1fsm_s.strEvent = strL1Event;
|
||||
l1fsm_s.strState = strL1SState;
|
||||
mISDN_FsmNew(&l1fsm_s, L1SFnList, ARRAY_SIZE(L1SFnList));
|
||||
return 0;
|
||||
return mISDN_FsmNew(&l1fsm_s, L1SFnList, ARRAY_SIZE(L1SFnList));
|
||||
}
|
||||
|
||||
void
|
||||
|
|
|
@ -2247,15 +2247,26 @@ static struct Bprotocol X75SLP = {
|
|||
int
|
||||
Isdnl2_Init(u_int *deb)
|
||||
{
|
||||
int res;
|
||||
debug = deb;
|
||||
mISDN_register_Bprotocol(&X75SLP);
|
||||
l2fsm.state_count = L2_STATE_COUNT;
|
||||
l2fsm.event_count = L2_EVENT_COUNT;
|
||||
l2fsm.strEvent = strL2Event;
|
||||
l2fsm.strState = strL2State;
|
||||
mISDN_FsmNew(&l2fsm, L2FnList, ARRAY_SIZE(L2FnList));
|
||||
TEIInit(deb);
|
||||
res = mISDN_FsmNew(&l2fsm, L2FnList, ARRAY_SIZE(L2FnList));
|
||||
if (res)
|
||||
goto error;
|
||||
res = TEIInit(deb);
|
||||
if (res)
|
||||
goto error_fsm;
|
||||
return 0;
|
||||
|
||||
error_fsm:
|
||||
mISDN_FsmFree(&l2fsm);
|
||||
error:
|
||||
mISDN_unregister_Bprotocol(&X75SLP);
|
||||
return res;
|
||||
}
|
||||
|
||||
void
|
||||
|
|
|
@ -1387,23 +1387,37 @@ create_teimanager(struct mISDNdevice *dev)
|
|||
|
||||
int TEIInit(u_int *deb)
|
||||
{
|
||||
int res;
|
||||
debug = deb;
|
||||
teifsmu.state_count = TEI_STATE_COUNT;
|
||||
teifsmu.event_count = TEI_EVENT_COUNT;
|
||||
teifsmu.strEvent = strTeiEvent;
|
||||
teifsmu.strState = strTeiState;
|
||||
mISDN_FsmNew(&teifsmu, TeiFnListUser, ARRAY_SIZE(TeiFnListUser));
|
||||
res = mISDN_FsmNew(&teifsmu, TeiFnListUser, ARRAY_SIZE(TeiFnListUser));
|
||||
if (res)
|
||||
goto error;
|
||||
teifsmn.state_count = TEI_STATE_COUNT;
|
||||
teifsmn.event_count = TEI_EVENT_COUNT;
|
||||
teifsmn.strEvent = strTeiEvent;
|
||||
teifsmn.strState = strTeiState;
|
||||
mISDN_FsmNew(&teifsmn, TeiFnListNet, ARRAY_SIZE(TeiFnListNet));
|
||||
res = mISDN_FsmNew(&teifsmn, TeiFnListNet, ARRAY_SIZE(TeiFnListNet));
|
||||
if (res)
|
||||
goto error_smn;
|
||||
deactfsm.state_count = DEACT_STATE_COUNT;
|
||||
deactfsm.event_count = DEACT_EVENT_COUNT;
|
||||
deactfsm.strEvent = strDeactEvent;
|
||||
deactfsm.strState = strDeactState;
|
||||
mISDN_FsmNew(&deactfsm, DeactFnList, ARRAY_SIZE(DeactFnList));
|
||||
res = mISDN_FsmNew(&deactfsm, DeactFnList, ARRAY_SIZE(DeactFnList));
|
||||
if (res)
|
||||
goto error_deact;
|
||||
return 0;
|
||||
|
||||
error_deact:
|
||||
mISDN_FsmFree(&teifsmn);
|
||||
error_smn:
|
||||
mISDN_FsmFree(&teifsmu);
|
||||
error:
|
||||
return res;
|
||||
}
|
||||
|
||||
void TEIFree(void)
|
||||
|
|
|
@ -1569,7 +1569,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
|
|||
new_slave->delay = 0;
|
||||
new_slave->link_failure_count = 0;
|
||||
|
||||
if (bond_update_speed_duplex(new_slave))
|
||||
if (bond_update_speed_duplex(new_slave) &&
|
||||
bond_needs_speed_duplex(bond))
|
||||
new_slave->link = BOND_LINK_DOWN;
|
||||
|
||||
new_slave->last_rx = jiffies -
|
||||
|
@ -2140,11 +2141,13 @@ static void bond_miimon_commit(struct bonding *bond)
|
|||
continue;
|
||||
|
||||
case BOND_LINK_UP:
|
||||
if (bond_update_speed_duplex(slave)) {
|
||||
if (bond_update_speed_duplex(slave) &&
|
||||
bond_needs_speed_duplex(bond)) {
|
||||
slave->link = BOND_LINK_DOWN;
|
||||
netdev_warn(bond->dev,
|
||||
"failed to get link speed/duplex for %s\n",
|
||||
slave->dev->name);
|
||||
if (net_ratelimit())
|
||||
netdev_warn(bond->dev,
|
||||
"failed to get link speed/duplex for %s\n",
|
||||
slave->dev->name);
|
||||
continue;
|
||||
}
|
||||
bond_set_slave_link_state(slave, BOND_LINK_UP,
|
||||
|
|
|
@ -529,6 +529,7 @@ enum { /* adapter flags */
|
|||
USING_SOFT_PARAMS = (1 << 6),
|
||||
MASTER_PF = (1 << 7),
|
||||
FW_OFLD_CONN = (1 << 9),
|
||||
ROOT_NO_RELAXED_ORDERING = (1 << 10),
|
||||
};
|
||||
|
||||
enum {
|
||||
|
|
|
@ -4654,11 +4654,6 @@ static void print_port_info(const struct net_device *dev)
|
|||
dev->name, adap->params.vpd.id, adap->name, buf);
|
||||
}
|
||||
|
||||
static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
|
||||
{
|
||||
pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
|
||||
}
|
||||
|
||||
/*
|
||||
* Free the following resources:
|
||||
* - memory used for tables
|
||||
|
@ -4908,7 +4903,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
}
|
||||
|
||||
pci_enable_pcie_error_reporting(pdev);
|
||||
enable_pcie_relaxed_ordering(pdev);
|
||||
pci_set_master(pdev);
|
||||
pci_save_state(pdev);
|
||||
|
||||
|
@ -4947,6 +4941,23 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
adapter->msg_enable = DFLT_MSG_ENABLE;
|
||||
memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
|
||||
|
||||
/* If possible, we use PCIe Relaxed Ordering Attribute to deliver
|
||||
* Ingress Packet Data to Free List Buffers in order to allow for
|
||||
* chipset performance optimizations between the Root Complex and
|
||||
* Memory Controllers. (Messages to the associated Ingress Queue
|
||||
* notifying new Packet Placement in the Free Lists Buffers will be
|
||||
* send without the Relaxed Ordering Attribute thus guaranteeing that
|
||||
* all preceding PCIe Transaction Layer Packets will be processed
|
||||
* first.) But some Root Complexes have various issues with Upstream
|
||||
* Transaction Layer Packets with the Relaxed Ordering Attribute set.
|
||||
* The PCIe devices which under the Root Complexes will be cleared the
|
||||
* Relaxed Ordering bit in the configuration space, So we check our
|
||||
* PCIe configuration space to see if it's flagged with advice against
|
||||
* using Relaxed Ordering.
|
||||
*/
|
||||
if (!pcie_relaxed_ordering_enabled(pdev))
|
||||
adapter->flags |= ROOT_NO_RELAXED_ORDERING;
|
||||
|
||||
spin_lock_init(&adapter->stats_lock);
|
||||
spin_lock_init(&adapter->tid_release_lock);
|
||||
spin_lock_init(&adapter->win0_lock);
|
||||
|
|
|
@ -2719,6 +2719,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
|
|||
struct fw_iq_cmd c;
|
||||
struct sge *s = &adap->sge;
|
||||
struct port_info *pi = netdev_priv(dev);
|
||||
int relaxed = !(adap->flags & ROOT_NO_RELAXED_ORDERING);
|
||||
|
||||
/* Size needs to be multiple of 16, including status entry. */
|
||||
iq->size = roundup(iq->size, 16);
|
||||
|
@ -2772,8 +2773,8 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
|
|||
|
||||
flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc);
|
||||
c.iqns_to_fl0congen |= htonl(FW_IQ_CMD_FL0PACKEN_F |
|
||||
FW_IQ_CMD_FL0FETCHRO_F |
|
||||
FW_IQ_CMD_FL0DATARO_F |
|
||||
FW_IQ_CMD_FL0FETCHRO_V(relaxed) |
|
||||
FW_IQ_CMD_FL0DATARO_V(relaxed) |
|
||||
FW_IQ_CMD_FL0PADEN_F);
|
||||
if (cong >= 0)
|
||||
c.iqns_to_fl0congen |=
|
||||
|
|
|
@ -408,6 +408,7 @@ enum { /* adapter flags */
|
|||
USING_MSI = (1UL << 1),
|
||||
USING_MSIX = (1UL << 2),
|
||||
QUEUES_BOUND = (1UL << 3),
|
||||
ROOT_NO_RELAXED_ORDERING = (1UL << 4),
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -2888,6 +2888,24 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
|
|||
*/
|
||||
adapter->name = pci_name(pdev);
|
||||
adapter->msg_enable = DFLT_MSG_ENABLE;
|
||||
|
||||
/* If possible, we use PCIe Relaxed Ordering Attribute to deliver
|
||||
* Ingress Packet Data to Free List Buffers in order to allow for
|
||||
* chipset performance optimizations between the Root Complex and
|
||||
* Memory Controllers. (Messages to the associated Ingress Queue
|
||||
* notifying new Packet Placement in the Free Lists Buffers will be
|
||||
* send without the Relaxed Ordering Attribute thus guaranteeing that
|
||||
* all preceding PCIe Transaction Layer Packets will be processed
|
||||
* first.) But some Root Complexes have various issues with Upstream
|
||||
* Transaction Layer Packets with the Relaxed Ordering Attribute set.
|
||||
* The PCIe devices which under the Root Complexes will be cleared the
|
||||
* Relaxed Ordering bit in the configuration space, So we check our
|
||||
* PCIe configuration space to see if it's flagged with advice against
|
||||
* using Relaxed Ordering.
|
||||
*/
|
||||
if (!pcie_relaxed_ordering_enabled(pdev))
|
||||
adapter->flags |= ROOT_NO_RELAXED_ORDERING;
|
||||
|
||||
err = adap_init0(adapter);
|
||||
if (err)
|
||||
goto err_unmap_bar;
|
||||
|
|
|
@ -2205,6 +2205,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
|
|||
struct port_info *pi = netdev_priv(dev);
|
||||
struct fw_iq_cmd cmd, rpl;
|
||||
int ret, iqandst, flsz = 0;
|
||||
int relaxed = !(adapter->flags & ROOT_NO_RELAXED_ORDERING);
|
||||
|
||||
/*
|
||||
* If we're using MSI interrupts and we're not initializing the
|
||||
|
@ -2300,6 +2301,8 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
|
|||
cpu_to_be32(
|
||||
FW_IQ_CMD_FL0HOSTFCMODE_V(SGE_HOSTFCMODE_NONE) |
|
||||
FW_IQ_CMD_FL0PACKEN_F |
|
||||
FW_IQ_CMD_FL0FETCHRO_V(relaxed) |
|
||||
FW_IQ_CMD_FL0DATARO_V(relaxed) |
|
||||
FW_IQ_CMD_FL0PADEN_F);
|
||||
|
||||
/* In T6, for egress queue type FL there is internal overhead
|
||||
|
|
|
@ -115,14 +115,10 @@ nfp_flower_cmsg_portmod_rx(struct nfp_app *app, struct sk_buff *skb)
|
|||
return;
|
||||
}
|
||||
|
||||
if (link) {
|
||||
if (link)
|
||||
netif_carrier_on(netdev);
|
||||
rtnl_lock();
|
||||
dev_set_mtu(netdev, be16_to_cpu(msg->mtu));
|
||||
rtnl_unlock();
|
||||
} else {
|
||||
else
|
||||
netif_carrier_off(netdev);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
|
|
|
@ -938,7 +938,6 @@ enum efx_stats_action {
|
|||
static int efx_mcdi_mac_stats(struct efx_nic *efx,
|
||||
enum efx_stats_action action, int clear)
|
||||
{
|
||||
struct efx_ef10_nic_data *nic_data = efx->nic_data;
|
||||
MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN);
|
||||
int rc;
|
||||
int change = action == EFX_STATS_PULL ? 0 : 1;
|
||||
|
@ -960,7 +959,12 @@ static int efx_mcdi_mac_stats(struct efx_nic *efx,
|
|||
MAC_STATS_IN_PERIODIC_NOEVENT, 1,
|
||||
MAC_STATS_IN_PERIOD_MS, period);
|
||||
MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
|
||||
MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, nic_data->vport_id);
|
||||
|
||||
if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) {
|
||||
struct efx_ef10_nic_data *nic_data = efx->nic_data;
|
||||
|
||||
MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, nic_data->vport_id);
|
||||
}
|
||||
|
||||
rc = efx_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
|
||||
NULL, 0, NULL);
|
||||
|
|
|
@ -204,6 +204,7 @@ int stmmac_mdio_register(struct net_device *ndev)
|
|||
struct stmmac_priv *priv = netdev_priv(ndev);
|
||||
struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data;
|
||||
struct device_node *mdio_node = priv->plat->mdio_node;
|
||||
struct device *dev = ndev->dev.parent;
|
||||
int addr, found;
|
||||
|
||||
if (!mdio_bus_data)
|
||||
|
@ -237,7 +238,7 @@ int stmmac_mdio_register(struct net_device *ndev)
|
|||
else
|
||||
err = mdiobus_register(new_bus);
|
||||
if (err != 0) {
|
||||
netdev_err(ndev, "Cannot register the MDIO bus\n");
|
||||
dev_err(dev, "Cannot register the MDIO bus\n");
|
||||
goto bus_register_fail;
|
||||
}
|
||||
|
||||
|
@ -285,14 +286,12 @@ int stmmac_mdio_register(struct net_device *ndev)
|
|||
irq_str = irq_num;
|
||||
break;
|
||||
}
|
||||
netdev_info(ndev, "PHY ID %08x at %d IRQ %s (%s)%s\n",
|
||||
phydev->phy_id, addr, irq_str, phydev_name(phydev),
|
||||
act ? " active" : "");
|
||||
phy_attached_info(phydev);
|
||||
found = 1;
|
||||
}
|
||||
|
||||
if (!found && !mdio_node) {
|
||||
netdev_warn(ndev, "No PHY found\n");
|
||||
dev_warn(dev, "No PHY found\n");
|
||||
mdiobus_unregister(new_bus);
|
||||
mdiobus_free(new_bus);
|
||||
return -ENODEV;
|
||||
|
|
|
@ -159,8 +159,10 @@ void brcmf_feat_attach(struct brcmf_pub *drvr)
|
|||
|
||||
brcmf_feat_firmware_capabilities(ifp);
|
||||
memset(&gscan_cfg, 0, sizeof(gscan_cfg));
|
||||
brcmf_feat_iovar_data_set(ifp, BRCMF_FEAT_GSCAN, "pfn_gscan_cfg",
|
||||
&gscan_cfg, sizeof(gscan_cfg));
|
||||
if (drvr->bus_if->chip != BRCM_CC_43430_CHIP_ID)
|
||||
brcmf_feat_iovar_data_set(ifp, BRCMF_FEAT_GSCAN,
|
||||
"pfn_gscan_cfg",
|
||||
&gscan_cfg, sizeof(gscan_cfg));
|
||||
brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_PNO, "pfn");
|
||||
if (drvr->bus_if->wowl_supported)
|
||||
brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_WOWL, "wowl");
|
||||
|
|
|
@ -154,7 +154,7 @@ static const struct iwl_tt_params iwl9000_tt_params = {
|
|||
const struct iwl_cfg iwl9160_2ac_cfg = {
|
||||
.name = "Intel(R) Dual Band Wireless AC 9160",
|
||||
.fw_name_pre = IWL9260A_FW_PRE,
|
||||
.fw_name_pre_next_step = IWL9260B_FW_PRE,
|
||||
.fw_name_pre_b_or_c_step = IWL9260B_FW_PRE,
|
||||
IWL_DEVICE_9000,
|
||||
.ht_params = &iwl9000_ht_params,
|
||||
.nvm_ver = IWL9000_NVM_VERSION,
|
||||
|
@ -165,7 +165,7 @@ const struct iwl_cfg iwl9160_2ac_cfg = {
|
|||
const struct iwl_cfg iwl9260_2ac_cfg = {
|
||||
.name = "Intel(R) Dual Band Wireless AC 9260",
|
||||
.fw_name_pre = IWL9260A_FW_PRE,
|
||||
.fw_name_pre_next_step = IWL9260B_FW_PRE,
|
||||
.fw_name_pre_b_or_c_step = IWL9260B_FW_PRE,
|
||||
IWL_DEVICE_9000,
|
||||
.ht_params = &iwl9000_ht_params,
|
||||
.nvm_ver = IWL9000_NVM_VERSION,
|
||||
|
@ -176,7 +176,7 @@ const struct iwl_cfg iwl9260_2ac_cfg = {
|
|||
const struct iwl_cfg iwl9270_2ac_cfg = {
|
||||
.name = "Intel(R) Dual Band Wireless AC 9270",
|
||||
.fw_name_pre = IWL9260A_FW_PRE,
|
||||
.fw_name_pre_next_step = IWL9260B_FW_PRE,
|
||||
.fw_name_pre_b_or_c_step = IWL9260B_FW_PRE,
|
||||
IWL_DEVICE_9000,
|
||||
.ht_params = &iwl9000_ht_params,
|
||||
.nvm_ver = IWL9000_NVM_VERSION,
|
||||
|
@ -186,8 +186,8 @@ const struct iwl_cfg iwl9270_2ac_cfg = {
|
|||
|
||||
const struct iwl_cfg iwl9460_2ac_cfg = {
|
||||
.name = "Intel(R) Dual Band Wireless AC 9460",
|
||||
.fw_name_pre = IWL9000_FW_PRE,
|
||||
.fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
|
||||
.fw_name_pre = IWL9260A_FW_PRE,
|
||||
.fw_name_pre_b_or_c_step = IWL9260B_FW_PRE,
|
||||
IWL_DEVICE_9000,
|
||||
.ht_params = &iwl9000_ht_params,
|
||||
.nvm_ver = IWL9000_NVM_VERSION,
|
||||
|
@ -198,8 +198,8 @@ const struct iwl_cfg iwl9460_2ac_cfg = {
|
|||
|
||||
const struct iwl_cfg iwl9560_2ac_cfg = {
|
||||
.name = "Intel(R) Dual Band Wireless AC 9560",
|
||||
.fw_name_pre = IWL9000_FW_PRE,
|
||||
.fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
|
||||
.fw_name_pre = IWL9260A_FW_PRE,
|
||||
.fw_name_pre_b_or_c_step = IWL9260B_FW_PRE,
|
||||
IWL_DEVICE_9000,
|
||||
.ht_params = &iwl9000_ht_params,
|
||||
.nvm_ver = IWL9000_NVM_VERSION,
|
||||
|
|
|
@ -328,6 +328,7 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
|
|||
* @IWL_UCODE_TLV_CAPA_TX_POWER_ACK: reduced TX power API has larger
|
||||
* command size (command version 4) that supports toggling ACK TX
|
||||
* power reduction.
|
||||
* @IWL_UCODE_TLV_CAPA_MLME_OFFLOAD: supports MLME offload
|
||||
*
|
||||
* @NUM_IWL_UCODE_TLV_CAPA: number of bits used
|
||||
*/
|
||||
|
@ -373,6 +374,7 @@ enum iwl_ucode_tlv_capa {
|
|||
IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG = (__force iwl_ucode_tlv_capa_t)80,
|
||||
IWL_UCODE_TLV_CAPA_LQM_SUPPORT = (__force iwl_ucode_tlv_capa_t)81,
|
||||
IWL_UCODE_TLV_CAPA_TX_POWER_ACK = (__force iwl_ucode_tlv_capa_t)84,
|
||||
IWL_UCODE_TLV_CAPA_MLME_OFFLOAD = (__force iwl_ucode_tlv_capa_t)96,
|
||||
|
||||
NUM_IWL_UCODE_TLV_CAPA
|
||||
#ifdef __CHECKER__
|
||||
|
|
|
@ -276,10 +276,10 @@ struct iwl_pwr_tx_backoff {
|
|||
* @fw_name_pre: Firmware filename prefix. The api version and extension
|
||||
* (.ucode) will be added to filename before loading from disk. The
|
||||
* filename is constructed as fw_name_pre<api>.ucode.
|
||||
* @fw_name_pre_next_step: same as @fw_name_pre, only for next step
|
||||
* @fw_name_pre_b_or_c_step: same as @fw_name_pre, only for b or c steps
|
||||
* (if supported)
|
||||
* @fw_name_pre_rf_next_step: same as @fw_name_pre_next_step, only for rf next
|
||||
* step. Supported only in integrated solutions.
|
||||
* @fw_name_pre_rf_next_step: same as @fw_name_pre_b_or_c_step, only for rf
|
||||
* next step. Supported only in integrated solutions.
|
||||
* @ucode_api_max: Highest version of uCode API supported by driver.
|
||||
* @ucode_api_min: Lowest version of uCode API supported by driver.
|
||||
* @max_inst_size: The maximal length of the fw inst section
|
||||
|
@ -330,7 +330,7 @@ struct iwl_cfg {
|
|||
/* params specific to an individual device within a device family */
|
||||
const char *name;
|
||||
const char *fw_name_pre;
|
||||
const char *fw_name_pre_next_step;
|
||||
const char *fw_name_pre_b_or_c_step;
|
||||
const char *fw_name_pre_rf_next_step;
|
||||
/* params not likely to change within a device family */
|
||||
const struct iwl_base_params *base_params;
|
||||
|
|
|
@ -216,8 +216,9 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
|
|||
const char *fw_pre_name;
|
||||
|
||||
if (drv->trans->cfg->device_family == IWL_DEVICE_FAMILY_9000 &&
|
||||
CSR_HW_REV_STEP(drv->trans->hw_rev) == SILICON_B_STEP)
|
||||
fw_pre_name = cfg->fw_name_pre_next_step;
|
||||
(CSR_HW_REV_STEP(drv->trans->hw_rev) == SILICON_B_STEP ||
|
||||
CSR_HW_REV_STEP(drv->trans->hw_rev) == SILICON_C_STEP))
|
||||
fw_pre_name = cfg->fw_name_pre_b_or_c_step;
|
||||
else if (drv->trans->cfg->integrated &&
|
||||
CSR_HW_RFID_STEP(drv->trans->hw_rf_id) == SILICON_B_STEP &&
|
||||
cfg->fw_name_pre_rf_next_step)
|
||||
|
|
|
@ -785,7 +785,8 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
|
|||
int num_of_ch, __le32 *channels, u16 fw_mcc)
|
||||
{
|
||||
int ch_idx;
|
||||
u16 ch_flags, prev_ch_flags = 0;
|
||||
u16 ch_flags;
|
||||
u32 reg_rule_flags, prev_reg_rule_flags = 0;
|
||||
const u8 *nvm_chan = cfg->ext_nvm ?
|
||||
iwl_ext_nvm_channels : iwl_nvm_channels;
|
||||
struct ieee80211_regdomain *regd;
|
||||
|
@ -834,8 +835,11 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
|
|||
continue;
|
||||
}
|
||||
|
||||
reg_rule_flags = iwl_nvm_get_regdom_bw_flags(nvm_chan, ch_idx,
|
||||
ch_flags, cfg);
|
||||
|
||||
/* we can't continue the same rule */
|
||||
if (ch_idx == 0 || prev_ch_flags != ch_flags ||
|
||||
if (ch_idx == 0 || prev_reg_rule_flags != reg_rule_flags ||
|
||||
center_freq - prev_center_freq > 20) {
|
||||
valid_rules++;
|
||||
new_rule = true;
|
||||
|
@ -854,18 +858,17 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
|
|||
rule->power_rule.max_eirp =
|
||||
DBM_TO_MBM(IWL_DEFAULT_MAX_TX_POWER);
|
||||
|
||||
rule->flags = iwl_nvm_get_regdom_bw_flags(nvm_chan, ch_idx,
|
||||
ch_flags, cfg);
|
||||
rule->flags = reg_rule_flags;
|
||||
|
||||
/* rely on auto-calculation to merge BW of contiguous chans */
|
||||
rule->flags |= NL80211_RRF_AUTO_BW;
|
||||
rule->freq_range.max_bandwidth_khz = 0;
|
||||
|
||||
prev_ch_flags = ch_flags;
|
||||
prev_center_freq = center_freq;
|
||||
prev_reg_rule_flags = reg_rule_flags;
|
||||
|
||||
IWL_DEBUG_DEV(dev, IWL_DL_LAR,
|
||||
"Ch. %d [%sGHz] %s%s%s%s%s%s%s%s%s(0x%02x): Ad-Hoc %ssupported\n",
|
||||
"Ch. %d [%sGHz] %s%s%s%s%s%s%s%s%s(0x%02x) reg_flags 0x%x: %s\n",
|
||||
center_freq,
|
||||
band == NL80211_BAND_5GHZ ? "5.2" : "2.4",
|
||||
CHECK_AND_PRINT_I(VALID),
|
||||
|
@ -877,10 +880,10 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
|
|||
CHECK_AND_PRINT_I(160MHZ),
|
||||
CHECK_AND_PRINT_I(INDOOR_ONLY),
|
||||
CHECK_AND_PRINT_I(GO_CONCURRENT),
|
||||
ch_flags,
|
||||
ch_flags, reg_rule_flags,
|
||||
((ch_flags & NVM_CHANNEL_ACTIVE) &&
|
||||
!(ch_flags & NVM_CHANNEL_RADAR))
|
||||
? "" : "not ");
|
||||
? "Ad-Hoc" : "");
|
||||
}
|
||||
|
||||
regd->n_reg_rules = valid_rules;
|
||||
|
|
|
@ -1275,8 +1275,10 @@ static int iwl_mvm_sar_get_wgds_table(struct iwl_mvm *mvm)
|
|||
|
||||
entry = &wifi_pkg->package.elements[idx++];
|
||||
if ((entry->type != ACPI_TYPE_INTEGER) ||
|
||||
(entry->integer.value > U8_MAX))
|
||||
return -EINVAL;
|
||||
(entry->integer.value > U8_MAX)) {
|
||||
ret = -EINVAL;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
mvm->geo_profiles[i].values[j] = entry->integer.value;
|
||||
}
|
||||
|
|
|
@ -2597,8 +2597,18 @@ static void iwl_mvm_purge_deferred_tx_frames(struct iwl_mvm *mvm,
|
|||
spin_lock_bh(&mvm_sta->lock);
|
||||
for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
|
||||
tid_data = &mvm_sta->tid_data[i];
|
||||
while ((skb = __skb_dequeue(&tid_data->deferred_tx_frames)))
|
||||
|
||||
while ((skb = __skb_dequeue(&tid_data->deferred_tx_frames))) {
|
||||
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
||||
|
||||
/*
|
||||
* The first deferred frame should've stopped the MAC
|
||||
* queues, so we should never get a second deferred
|
||||
* frame for the RA/TID.
|
||||
*/
|
||||
iwl_mvm_start_mac_queues(mvm, info->hw_queue);
|
||||
ieee80211_free_txskb(mvm->hw, skb);
|
||||
}
|
||||
}
|
||||
spin_unlock_bh(&mvm_sta->lock);
|
||||
}
|
||||
|
|
|
@ -1291,7 +1291,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
|||
* first index into rate scale table.
|
||||
*/
|
||||
if (info->flags & IEEE80211_TX_STAT_AMPDU) {
|
||||
rs_collect_tpc_data(mvm, lq_sta, curr_tbl, lq_rate.index,
|
||||
rs_collect_tpc_data(mvm, lq_sta, curr_tbl, tx_resp_rate.index,
|
||||
info->status.ampdu_len,
|
||||
info->status.ampdu_ack_len,
|
||||
reduced_txp);
|
||||
|
@ -1312,7 +1312,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
|||
if (info->status.ampdu_ack_len == 0)
|
||||
info->status.ampdu_len = 1;
|
||||
|
||||
rs_collect_tlc_data(mvm, lq_sta, curr_tbl, lq_rate.index,
|
||||
rs_collect_tlc_data(mvm, lq_sta, curr_tbl, tx_resp_rate.index,
|
||||
info->status.ampdu_len,
|
||||
info->status.ampdu_ack_len);
|
||||
|
||||
|
@ -1348,11 +1348,11 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
|||
continue;
|
||||
|
||||
rs_collect_tpc_data(mvm, lq_sta, tmp_tbl,
|
||||
lq_rate.index, 1,
|
||||
tx_resp_rate.index, 1,
|
||||
i < retries ? 0 : legacy_success,
|
||||
reduced_txp);
|
||||
rs_collect_tlc_data(mvm, lq_sta, tmp_tbl,
|
||||
lq_rate.index, 1,
|
||||
tx_resp_rate.index, 1,
|
||||
i < retries ? 0 : legacy_success);
|
||||
}
|
||||
|
||||
|
|
|
@ -636,9 +636,9 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
|
|||
|
||||
baid_data = rcu_dereference(mvm->baid_map[baid]);
|
||||
if (!baid_data) {
|
||||
WARN(!(reorder & IWL_RX_MPDU_REORDER_BA_OLD_SN),
|
||||
"Received baid %d, but no data exists for this BAID\n",
|
||||
baid);
|
||||
IWL_DEBUG_RX(mvm,
|
||||
"Got valid BAID but no baid allocated, bypass the re-ordering buffer. Baid %d reorder 0x%x\n",
|
||||
baid, reorder);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -759,7 +759,9 @@ static void iwl_mvm_agg_rx_received(struct iwl_mvm *mvm,
|
|||
|
||||
data = rcu_dereference(mvm->baid_map[baid]);
|
||||
if (!data) {
|
||||
WARN_ON(!(reorder_data & IWL_RX_MPDU_REORDER_BA_OLD_SN));
|
||||
IWL_DEBUG_RX(mvm,
|
||||
"Got valid BAID but no baid allocated, bypass the re-ordering buffer. Baid %d reorder 0x%x\n",
|
||||
baid, reorder_data);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
|
|
@ -121,7 +121,8 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
|||
.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
|
||||
.add_modify = update ? 1 : 0,
|
||||
.station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK |
|
||||
STA_FLG_MIMO_EN_MSK),
|
||||
STA_FLG_MIMO_EN_MSK |
|
||||
STA_FLG_RTS_MIMO_PROT),
|
||||
.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg),
|
||||
};
|
||||
int ret;
|
||||
|
@ -290,8 +291,8 @@ static void iwl_mvm_rx_agg_session_expired(unsigned long data)
|
|||
goto unlock;
|
||||
|
||||
mvm_sta = iwl_mvm_sta_from_mac80211(sta);
|
||||
ieee80211_stop_rx_ba_session_offl(mvm_sta->vif,
|
||||
sta->addr, ba_data->tid);
|
||||
ieee80211_rx_ba_timer_expired(mvm_sta->vif,
|
||||
sta->addr, ba_data->tid);
|
||||
unlock:
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
|
|
@ -185,8 +185,14 @@ static u16 iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb,
|
|||
else
|
||||
udp_hdr(skb)->check = 0;
|
||||
|
||||
/* mac header len should include IV, size is in words */
|
||||
if (info->control.hw_key)
|
||||
/*
|
||||
* mac header len should include IV, size is in words unless
|
||||
* the IV is added by the firmware like in WEP.
|
||||
* In new Tx API, the IV is always added by the firmware.
|
||||
*/
|
||||
if (!iwl_mvm_has_new_tx_api(mvm) && info->control.hw_key &&
|
||||
info->control.hw_key->cipher != WLAN_CIPHER_SUITE_WEP40 &&
|
||||
info->control.hw_key->cipher != WLAN_CIPHER_SUITE_WEP104)
|
||||
mh_len += info->control.hw_key->iv_len;
|
||||
mh_len /= 2;
|
||||
offload_assist |= mh_len << TX_CMD_OFFLD_MH_SIZE;
|
||||
|
@ -1815,6 +1821,8 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
|
|||
struct iwl_mvm_tid_data *tid_data;
|
||||
struct iwl_mvm_sta *mvmsta;
|
||||
|
||||
ba_info.flags = IEEE80211_TX_STAT_AMPDU;
|
||||
|
||||
if (iwl_mvm_has_new_tx_api(mvm)) {
|
||||
struct iwl_mvm_compressed_ba_notif *ba_res =
|
||||
(void *)pkt->data;
|
||||
|
|
|
@ -510,9 +510,17 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
|
|||
|
||||
/* 9000 Series */
|
||||
{IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x271B, 0x0210, iwl9160_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x2526, 0x0000, iwl9260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x2526, 0x0014, iwl9260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x2526, 0x4010, iwl9260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x2526, 0x0210, iwl9260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x2526, 0x0214, iwl9260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x2526, 0x1410, iwl9270_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x2526, 0x1610, iwl9270_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9460_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9460_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9460_2ac_cfg)},
|
||||
|
@ -527,10 +535,22 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
|
|||
{IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl9460_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9460_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x2526, 0x0060, iwl9460_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x2526, 0x0260, iwl9460_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x2526, 0x0064, iwl9460_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x2526, 0x00A4, iwl9460_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x2526, 0x02A4, iwl9460_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x2526, 0x00A0, iwl9460_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x2526, 0x02A0, iwl9460_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x9DF0, 0x0060, iwl9460_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0xA370, 0x0060, iwl9460_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x31DC, 0x0060, iwl9460_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x2526, 0x0030, iwl9560_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x2526, 0x4030, iwl9560_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x2526, 0x0230, iwl9560_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x2526, 0x0234, iwl9560_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x2526, 0x0238, iwl9560_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x2526, 0x023C, iwl9560_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x9DF0, 0x0030, iwl9560_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0xA370, 0x0030, iwl9560_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_cfg)},
|
||||
|
|
|
@ -522,10 +522,11 @@ struct pci_dev *pci_find_pcie_root_port(struct pci_dev *dev)
|
|||
bridge = pci_upstream_bridge(bridge);
|
||||
}
|
||||
|
||||
if (pci_pcie_type(highest_pcie_bridge) != PCI_EXP_TYPE_ROOT_PORT)
|
||||
return NULL;
|
||||
if (highest_pcie_bridge &&
|
||||
pci_pcie_type(highest_pcie_bridge) == PCI_EXP_TYPE_ROOT_PORT)
|
||||
return highest_pcie_bridge;
|
||||
|
||||
return highest_pcie_bridge;
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(pci_find_pcie_root_port);
|
||||
|
||||
|
|
|
@ -1762,6 +1762,48 @@ static void pci_configure_extended_tags(struct pci_dev *dev)
|
|||
PCI_EXP_DEVCTL_EXT_TAG);
|
||||
}
|
||||
|
||||
/**
|
||||
* pcie_relaxed_ordering_enabled - Probe for PCIe relaxed ordering enable
|
||||
* @dev: PCI device to query
|
||||
*
|
||||
* Returns true if the device has enabled relaxed ordering attribute.
|
||||
*/
|
||||
bool pcie_relaxed_ordering_enabled(struct pci_dev *dev)
|
||||
{
|
||||
u16 v;
|
||||
|
||||
pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &v);
|
||||
|
||||
return !!(v & PCI_EXP_DEVCTL_RELAX_EN);
|
||||
}
|
||||
EXPORT_SYMBOL(pcie_relaxed_ordering_enabled);
|
||||
|
||||
static void pci_configure_relaxed_ordering(struct pci_dev *dev)
|
||||
{
|
||||
struct pci_dev *root;
|
||||
|
||||
/* PCI_EXP_DEVICE_RELAX_EN is RsvdP in VFs */
|
||||
if (dev->is_virtfn)
|
||||
return;
|
||||
|
||||
if (!pcie_relaxed_ordering_enabled(dev))
|
||||
return;
|
||||
|
||||
/*
|
||||
* For now, we only deal with Relaxed Ordering issues with Root
|
||||
* Ports. Peer-to-Peer DMA is another can of worms.
|
||||
*/
|
||||
root = pci_find_pcie_root_port(dev);
|
||||
if (!root)
|
||||
return;
|
||||
|
||||
if (root->dev_flags & PCI_DEV_FLAGS_NO_RELAXED_ORDERING) {
|
||||
pcie_capability_clear_word(dev, PCI_EXP_DEVCTL,
|
||||
PCI_EXP_DEVCTL_RELAX_EN);
|
||||
dev_info(&dev->dev, "Disable Relaxed Ordering because the Root Port didn't support it\n");
|
||||
}
|
||||
}
|
||||
|
||||
static void pci_configure_device(struct pci_dev *dev)
|
||||
{
|
||||
struct hotplug_params hpp;
|
||||
|
@ -1769,6 +1811,7 @@ static void pci_configure_device(struct pci_dev *dev)
|
|||
|
||||
pci_configure_mps(dev);
|
||||
pci_configure_extended_tags(dev);
|
||||
pci_configure_relaxed_ordering(dev);
|
||||
|
||||
memset(&hpp, 0, sizeof(hpp));
|
||||
ret = pci_get_hp_params(dev, &hpp);
|
||||
|
|
|
@ -4015,6 +4015,95 @@ DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6868, PCI_CLASS_NOT_DEFINED, 8,
|
|||
DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6869, PCI_CLASS_NOT_DEFINED, 8,
|
||||
quirk_tw686x_class);
|
||||
|
||||
/*
|
||||
* Some devices have problems with Transaction Layer Packets with the Relaxed
|
||||
* Ordering Attribute set. Such devices should mark themselves and other
|
||||
* Device Drivers should check before sending TLPs with RO set.
|
||||
*/
|
||||
static void quirk_relaxedordering_disable(struct pci_dev *dev)
|
||||
{
|
||||
dev->dev_flags |= PCI_DEV_FLAGS_NO_RELAXED_ORDERING;
|
||||
dev_info(&dev->dev, "Disable Relaxed Ordering Attributes to avoid PCIe Completion erratum\n");
|
||||
}
|
||||
|
||||
/*
|
||||
* Intel Xeon processors based on Broadwell/Haswell microarchitecture Root
|
||||
* Complex has a Flow Control Credit issue which can cause performance
|
||||
* problems with Upstream Transaction Layer Packets with Relaxed Ordering set.
|
||||
*/
|
||||
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f01, PCI_CLASS_NOT_DEFINED, 8,
|
||||
quirk_relaxedordering_disable);
|
||||
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f02, PCI_CLASS_NOT_DEFINED, 8,
|
||||
quirk_relaxedordering_disable);
|
||||
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f03, PCI_CLASS_NOT_DEFINED, 8,
|
||||
quirk_relaxedordering_disable);
|
||||
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f04, PCI_CLASS_NOT_DEFINED, 8,
|
||||
quirk_relaxedordering_disable);
|
||||
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f05, PCI_CLASS_NOT_DEFINED, 8,
|
||||
quirk_relaxedordering_disable);
|
||||
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f06, PCI_CLASS_NOT_DEFINED, 8,
|
||||
quirk_relaxedordering_disable);
|
||||
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f07, PCI_CLASS_NOT_DEFINED, 8,
|
||||
quirk_relaxedordering_disable);
|
||||
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f08, PCI_CLASS_NOT_DEFINED, 8,
|
||||
quirk_relaxedordering_disable);
|
||||
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f09, PCI_CLASS_NOT_DEFINED, 8,
|
||||
quirk_relaxedordering_disable);
|
||||
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0a, PCI_CLASS_NOT_DEFINED, 8,
|
||||
quirk_relaxedordering_disable);
|
||||
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0b, PCI_CLASS_NOT_DEFINED, 8,
|
||||
quirk_relaxedordering_disable);
|
||||
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0c, PCI_CLASS_NOT_DEFINED, 8,
|
||||
quirk_relaxedordering_disable);
|
||||
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0d, PCI_CLASS_NOT_DEFINED, 8,
|
||||
quirk_relaxedordering_disable);
|
||||
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0e, PCI_CLASS_NOT_DEFINED, 8,
|
||||
quirk_relaxedordering_disable);
|
||||
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f01, PCI_CLASS_NOT_DEFINED, 8,
|
||||
quirk_relaxedordering_disable);
|
||||
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f02, PCI_CLASS_NOT_DEFINED, 8,
|
||||
quirk_relaxedordering_disable);
|
||||
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f03, PCI_CLASS_NOT_DEFINED, 8,
|
||||
quirk_relaxedordering_disable);
|
||||
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f04, PCI_CLASS_NOT_DEFINED, 8,
|
||||
quirk_relaxedordering_disable);
|
||||
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f05, PCI_CLASS_NOT_DEFINED, 8,
|
||||
quirk_relaxedordering_disable);
|
||||
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f06, PCI_CLASS_NOT_DEFINED, 8,
|
||||
quirk_relaxedordering_disable);
|
||||
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f07, PCI_CLASS_NOT_DEFINED, 8,
|
||||
quirk_relaxedordering_disable);
|
||||
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f08, PCI_CLASS_NOT_DEFINED, 8,
|
||||
quirk_relaxedordering_disable);
|
||||
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f09, PCI_CLASS_NOT_DEFINED, 8,
|
||||
quirk_relaxedordering_disable);
|
||||
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0a, PCI_CLASS_NOT_DEFINED, 8,
|
||||
quirk_relaxedordering_disable);
|
||||
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0b, PCI_CLASS_NOT_DEFINED, 8,
|
||||
quirk_relaxedordering_disable);
|
||||
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0c, PCI_CLASS_NOT_DEFINED, 8,
|
||||
quirk_relaxedordering_disable);
|
||||
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0d, PCI_CLASS_NOT_DEFINED, 8,
|
||||
quirk_relaxedordering_disable);
|
||||
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0e, PCI_CLASS_NOT_DEFINED, 8,
|
||||
quirk_relaxedordering_disable);
|
||||
|
||||
/*
|
||||
* The AMD ARM A1100 (AKA "SEATTLE") SoC has a bug in its PCIe Root Complex
|
||||
* where Upstream Transaction Layer Packets with the Relaxed Ordering
|
||||
* Attribute clear are allowed to bypass earlier TLPs with Relaxed Ordering
|
||||
* set. This is a violation of the PCIe 3.0 Transaction Ordering Rules
|
||||
* outlined in Section 2.4.1 (PCI Express(r) Base Specification Revision 3.0
|
||||
* November 10, 2010). As a result, on this platform we can't use Relaxed
|
||||
* Ordering for Upstream TLPs.
|
||||
*/
|
||||
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AMD, 0x1a00, PCI_CLASS_NOT_DEFINED, 8,
|
||||
quirk_relaxedordering_disable);
|
||||
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AMD, 0x1a01, PCI_CLASS_NOT_DEFINED, 8,
|
||||
quirk_relaxedordering_disable);
|
||||
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AMD, 0x1a02, PCI_CLASS_NOT_DEFINED, 8,
|
||||
quirk_relaxedordering_disable);
|
||||
|
||||
/*
|
||||
* Per PCIe r3.0, sec 2.2.9, "Completion headers must supply the same
|
||||
* values for the Attribute as were supplied in the header of the
|
||||
|
|
|
@ -37,7 +37,7 @@ struct net;
|
|||
|
||||
/* Historically, SOCKWQ_ASYNC_NOSPACE & SOCKWQ_ASYNC_WAITDATA were located
|
||||
* in sock->flags, but moved into sk->sk_wq->flags to be RCU protected.
|
||||
* Eventually all flags will be in sk->sk_wq_flags.
|
||||
* Eventually all flags will be in sk->sk_wq->flags.
|
||||
*/
|
||||
#define SOCKWQ_ASYNC_NOSPACE 0
|
||||
#define SOCKWQ_ASYNC_WAITDATA 1
|
||||
|
|
|
@ -188,6 +188,8 @@ enum pci_dev_flags {
|
|||
* the direct_complete optimization.
|
||||
*/
|
||||
PCI_DEV_FLAGS_NEEDS_RESUME = (__force pci_dev_flags_t) (1 << 11),
|
||||
/* Don't use Relaxed Ordering for TLPs directed at this device */
|
||||
PCI_DEV_FLAGS_NO_RELAXED_ORDERING = (__force pci_dev_flags_t) (1 << 12),
|
||||
};
|
||||
|
||||
enum pci_irq_reroute_variant {
|
||||
|
@ -1126,6 +1128,7 @@ bool pci_check_pme_status(struct pci_dev *dev);
|
|||
void pci_pme_wakeup_bus(struct pci_bus *bus);
|
||||
void pci_d3cold_enable(struct pci_dev *dev);
|
||||
void pci_d3cold_disable(struct pci_dev *dev);
|
||||
bool pcie_relaxed_ordering_enabled(struct pci_dev *dev);
|
||||
|
||||
/* PCI Virtual Channel */
|
||||
int pci_save_vc_state(struct pci_dev *dev);
|
||||
|
|
|
@ -336,6 +336,16 @@ static inline void in6_dev_put(struct inet6_dev *idev)
|
|||
in6_dev_finish_destroy(idev);
|
||||
}
|
||||
|
||||
static inline void in6_dev_put_clear(struct inet6_dev **pidev)
|
||||
{
|
||||
struct inet6_dev *idev = *pidev;
|
||||
|
||||
if (idev) {
|
||||
in6_dev_put(idev);
|
||||
*pidev = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void __in6_dev_put(struct inet6_dev *idev)
|
||||
{
|
||||
refcount_dec(&idev->refcnt);
|
||||
|
|
|
@ -277,6 +277,11 @@ static inline bool bond_is_lb(const struct bonding *bond)
|
|||
BOND_MODE(bond) == BOND_MODE_ALB;
|
||||
}
|
||||
|
||||
static inline bool bond_needs_speed_duplex(const struct bonding *bond)
|
||||
{
|
||||
return BOND_MODE(bond) == BOND_MODE_8023AD || bond_is_lb(bond);
|
||||
}
|
||||
|
||||
static inline bool bond_is_nondyn_tlb(const struct bonding *bond)
|
||||
{
|
||||
return (BOND_MODE(bond) == BOND_MODE_TLB) &&
|
||||
|
|
|
@ -29,18 +29,18 @@
|
|||
#include <linux/sched/signal.h>
|
||||
#include <net/ip.h>
|
||||
|
||||
#ifdef CONFIG_NET_RX_BUSY_POLL
|
||||
|
||||
struct napi_struct;
|
||||
extern unsigned int sysctl_net_busy_read __read_mostly;
|
||||
extern unsigned int sysctl_net_busy_poll __read_mostly;
|
||||
|
||||
/* 0 - Reserved to indicate value not set
|
||||
* 1..NR_CPUS - Reserved for sender_cpu
|
||||
* NR_CPUS+1..~0 - Region available for NAPI IDs
|
||||
*/
|
||||
#define MIN_NAPI_ID ((unsigned int)(NR_CPUS + 1))
|
||||
|
||||
#ifdef CONFIG_NET_RX_BUSY_POLL
|
||||
|
||||
struct napi_struct;
|
||||
extern unsigned int sysctl_net_busy_read __read_mostly;
|
||||
extern unsigned int sysctl_net_busy_poll __read_mostly;
|
||||
|
||||
static inline bool net_busy_loop_on(void)
|
||||
{
|
||||
return sysctl_net_busy_poll;
|
||||
|
|
|
@ -5499,6 +5499,21 @@ static inline void ieee80211_stop_rx_ba_session_offl(struct ieee80211_vif *vif,
|
|||
ieee80211_manage_rx_ba_offl(vif, addr, tid + IEEE80211_NUM_TIDS);
|
||||
}
|
||||
|
||||
/**
|
||||
* ieee80211_rx_ba_timer_expired - stop a Rx BA session due to timeout
|
||||
*
|
||||
* Some device drivers do not offload AddBa/DelBa negotiation, but handle rx
|
||||
* buffer reording internally, and therefore also handle the session timer.
|
||||
*
|
||||
* Trigger the timeout flow, which sends a DelBa.
|
||||
*
|
||||
* @vif: &struct ieee80211_vif pointer from the add_interface callback
|
||||
* @addr: station mac address
|
||||
* @tid: the rx tid
|
||||
*/
|
||||
void ieee80211_rx_ba_timer_expired(struct ieee80211_vif *vif,
|
||||
const u8 *addr, unsigned int tid);
|
||||
|
||||
/* Rate control API */
|
||||
|
||||
/**
|
||||
|
|
|
@ -366,12 +366,13 @@ static inline bool udp_skb_is_linear(struct sk_buff *skb)
|
|||
static inline int copy_linear_skb(struct sk_buff *skb, int len, int off,
|
||||
struct iov_iter *to)
|
||||
{
|
||||
int n, copy = len - off;
|
||||
int n;
|
||||
|
||||
n = copy_to_iter(skb->data + off, copy, to);
|
||||
if (n == copy)
|
||||
n = copy_to_iter(skb->data + off, len, to);
|
||||
if (n == len)
|
||||
return 0;
|
||||
|
||||
iov_iter_revert(to, n);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
|
|
|
@ -204,10 +204,36 @@ BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
|
|||
fmt_cnt++;
|
||||
}
|
||||
|
||||
return __trace_printk(1/* fake ip will not be printed */, fmt,
|
||||
mod[0] == 2 ? arg1 : mod[0] == 1 ? (long) arg1 : (u32) arg1,
|
||||
mod[1] == 2 ? arg2 : mod[1] == 1 ? (long) arg2 : (u32) arg2,
|
||||
mod[2] == 2 ? arg3 : mod[2] == 1 ? (long) arg3 : (u32) arg3);
|
||||
/* Horrid workaround for getting va_list handling working with different
|
||||
* argument type combinations generically for 32 and 64 bit archs.
|
||||
*/
|
||||
#define __BPF_TP_EMIT() __BPF_ARG3_TP()
|
||||
#define __BPF_TP(...) \
|
||||
__trace_printk(1 /* Fake ip will not be printed. */, \
|
||||
fmt, ##__VA_ARGS__)
|
||||
|
||||
#define __BPF_ARG1_TP(...) \
|
||||
((mod[0] == 2 || (mod[0] == 1 && __BITS_PER_LONG == 64)) \
|
||||
? __BPF_TP(arg1, ##__VA_ARGS__) \
|
||||
: ((mod[0] == 1 || (mod[0] == 0 && __BITS_PER_LONG == 32)) \
|
||||
? __BPF_TP((long)arg1, ##__VA_ARGS__) \
|
||||
: __BPF_TP((u32)arg1, ##__VA_ARGS__)))
|
||||
|
||||
#define __BPF_ARG2_TP(...) \
|
||||
((mod[1] == 2 || (mod[1] == 1 && __BITS_PER_LONG == 64)) \
|
||||
? __BPF_ARG1_TP(arg2, ##__VA_ARGS__) \
|
||||
: ((mod[1] == 1 || (mod[1] == 0 && __BITS_PER_LONG == 32)) \
|
||||
? __BPF_ARG1_TP((long)arg2, ##__VA_ARGS__) \
|
||||
: __BPF_ARG1_TP((u32)arg2, ##__VA_ARGS__)))
|
||||
|
||||
#define __BPF_ARG3_TP(...) \
|
||||
((mod[2] == 2 || (mod[2] == 1 && __BITS_PER_LONG == 64)) \
|
||||
? __BPF_ARG2_TP(arg3, ##__VA_ARGS__) \
|
||||
: ((mod[2] == 1 || (mod[2] == 0 && __BITS_PER_LONG == 32)) \
|
||||
? __BPF_ARG2_TP((long)arg3, ##__VA_ARGS__) \
|
||||
: __BPF_ARG2_TP((u32)arg3, ##__VA_ARGS__)))
|
||||
|
||||
return __BPF_TP_EMIT();
|
||||
}
|
||||
|
||||
static const struct bpf_func_proto bpf_trace_printk_proto = {
|
||||
|
|
|
@ -3505,6 +3505,7 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type,
|
|||
bpf_target_off(struct sk_buff, tc_index, 2,
|
||||
target_size));
|
||||
#else
|
||||
*target_size = 2;
|
||||
if (type == BPF_WRITE)
|
||||
*insn++ = BPF_MOV64_REG(si->dst_reg, si->dst_reg);
|
||||
else
|
||||
|
@ -3520,6 +3521,7 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type,
|
|||
*insn++ = BPF_JMP_IMM(BPF_JGE, si->dst_reg, MIN_NAPI_ID, 1);
|
||||
*insn++ = BPF_MOV64_IMM(si->dst_reg, 0);
|
||||
#else
|
||||
*target_size = 4;
|
||||
*insn++ = BPF_MOV64_IMM(si->dst_reg, 0);
|
||||
#endif
|
||||
break;
|
||||
|
|
|
@ -201,10 +201,7 @@ void dccp_destroy_sock(struct sock *sk)
|
|||
{
|
||||
struct dccp_sock *dp = dccp_sk(sk);
|
||||
|
||||
/*
|
||||
* DCCP doesn't use sk_write_queue, just sk_send_head
|
||||
* for retransmissions
|
||||
*/
|
||||
__skb_queue_purge(&sk->sk_write_queue);
|
||||
if (sk->sk_send_head != NULL) {
|
||||
kfree_skb(sk->sk_send_head);
|
||||
sk->sk_send_head = NULL;
|
||||
|
|
|
@ -42,6 +42,9 @@ static struct sk_buff *ksz_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
padlen = (skb->len >= ETH_ZLEN) ? 0 : ETH_ZLEN - skb->len;
|
||||
|
||||
if (skb_tailroom(skb) >= padlen + KSZ_INGRESS_TAG_LEN) {
|
||||
if (skb_put_padto(skb, skb->len + padlen))
|
||||
return NULL;
|
||||
|
||||
nskb = skb;
|
||||
} else {
|
||||
nskb = alloc_skb(NET_IP_ALIGN + skb->len +
|
||||
|
@ -56,13 +59,15 @@ static struct sk_buff *ksz_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
skb_set_transport_header(nskb,
|
||||
skb_transport_header(skb) - skb->head);
|
||||
skb_copy_and_csum_dev(skb, skb_put(nskb, skb->len));
|
||||
|
||||
if (skb_put_padto(nskb, nskb->len + padlen)) {
|
||||
kfree_skb(nskb);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
kfree_skb(skb);
|
||||
}
|
||||
|
||||
/* skb is freed when it fails */
|
||||
if (skb_put_padto(nskb, nskb->len + padlen))
|
||||
return NULL;
|
||||
|
||||
tag = skb_put(nskb, KSZ_INGRESS_TAG_LEN);
|
||||
tag[0] = 0;
|
||||
tag[1] = 1 << p->dp->index; /* destination port */
|
||||
|
|
|
@ -1083,15 +1083,17 @@ struct fib_info *fib_create_info(struct fib_config *cfg,
|
|||
fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL);
|
||||
if (!fi)
|
||||
goto failure;
|
||||
fib_info_cnt++;
|
||||
if (cfg->fc_mx) {
|
||||
fi->fib_metrics = kzalloc(sizeof(*fi->fib_metrics), GFP_KERNEL);
|
||||
if (!fi->fib_metrics)
|
||||
goto failure;
|
||||
if (unlikely(!fi->fib_metrics)) {
|
||||
kfree(fi);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
atomic_set(&fi->fib_metrics->refcnt, 1);
|
||||
} else
|
||||
} else {
|
||||
fi->fib_metrics = (struct dst_metrics *)&dst_default_metrics;
|
||||
|
||||
}
|
||||
fib_info_cnt++;
|
||||
fi->fib_net = net;
|
||||
fi->fib_protocol = cfg->fc_protocol;
|
||||
fi->fib_scope = cfg->fc_scope;
|
||||
|
|
|
@ -2750,12 +2750,13 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
|
|||
err = 0;
|
||||
if (IS_ERR(rt))
|
||||
err = PTR_ERR(rt);
|
||||
else
|
||||
skb_dst_set(skb, &rt->dst);
|
||||
}
|
||||
|
||||
if (err)
|
||||
goto errout_free;
|
||||
|
||||
skb_dst_set(skb, &rt->dst);
|
||||
if (rtm->rtm_flags & RTM_F_NOTIFY)
|
||||
rt->rt_flags |= RTCF_NOTIFY;
|
||||
|
||||
|
|
|
@ -1722,6 +1722,8 @@ process:
|
|||
*/
|
||||
sock_hold(sk);
|
||||
refcounted = true;
|
||||
if (tcp_filter(sk, skb))
|
||||
goto discard_and_relse;
|
||||
nsk = tcp_check_req(sk, skb, req, false);
|
||||
if (!nsk) {
|
||||
reqsk_put(req);
|
||||
|
@ -1729,8 +1731,6 @@ process:
|
|||
}
|
||||
if (nsk == sk) {
|
||||
reqsk_put(req);
|
||||
} else if (tcp_filter(sk, skb)) {
|
||||
goto discard_and_relse;
|
||||
} else if (tcp_child_process(sk, nsk, skb)) {
|
||||
tcp_v4_send_reset(nsk, skb);
|
||||
goto discard_and_relse;
|
||||
|
|
|
@ -122,14 +122,14 @@ int tcp_set_ulp(struct sock *sk, const char *name)
|
|||
|
||||
ulp_ops = __tcp_ulp_find_autoload(name);
|
||||
if (!ulp_ops)
|
||||
err = -ENOENT;
|
||||
else
|
||||
err = ulp_ops->init(sk);
|
||||
return -ENOENT;
|
||||
|
||||
if (err)
|
||||
goto out;
|
||||
err = ulp_ops->init(sk);
|
||||
if (err) {
|
||||
module_put(ulp_ops->owner);
|
||||
return err;
|
||||
}
|
||||
|
||||
icsk->icsk_ulp_ops = ulp_ops;
|
||||
out:
|
||||
return err;
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -417,14 +417,11 @@ static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
|
|||
struct net_device *loopback_dev =
|
||||
dev_net(dev)->loopback_dev;
|
||||
|
||||
if (dev != loopback_dev) {
|
||||
if (idev && idev->dev == dev) {
|
||||
struct inet6_dev *loopback_idev =
|
||||
in6_dev_get(loopback_dev);
|
||||
if (loopback_idev) {
|
||||
rt->rt6i_idev = loopback_idev;
|
||||
in6_dev_put(idev);
|
||||
}
|
||||
if (idev && idev->dev != loopback_dev) {
|
||||
struct inet6_dev *loopback_idev = in6_dev_get(loopback_dev);
|
||||
if (loopback_idev) {
|
||||
rt->rt6i_idev = loopback_idev;
|
||||
in6_dev_put(idev);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -3724,10 +3721,10 @@ static int ip6_route_dev_notify(struct notifier_block *this,
|
|||
/* NETDEV_UNREGISTER could be fired for multiple times by
|
||||
* netdev_wait_allrefs(). Make sure we only call this once.
|
||||
*/
|
||||
in6_dev_put(net->ipv6.ip6_null_entry->rt6i_idev);
|
||||
in6_dev_put_clear(&net->ipv6.ip6_null_entry->rt6i_idev);
|
||||
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
|
||||
in6_dev_put(net->ipv6.ip6_prohibit_entry->rt6i_idev);
|
||||
in6_dev_put(net->ipv6.ip6_blk_hole_entry->rt6i_idev);
|
||||
in6_dev_put_clear(&net->ipv6.ip6_prohibit_entry->rt6i_idev);
|
||||
in6_dev_put_clear(&net->ipv6.ip6_blk_hole_entry->rt6i_idev);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
|
@ -1456,6 +1456,8 @@ process:
|
|||
}
|
||||
sock_hold(sk);
|
||||
refcounted = true;
|
||||
if (tcp_filter(sk, skb))
|
||||
goto discard_and_relse;
|
||||
nsk = tcp_check_req(sk, skb, req, false);
|
||||
if (!nsk) {
|
||||
reqsk_put(req);
|
||||
|
@ -1464,8 +1466,6 @@ process:
|
|||
if (nsk == sk) {
|
||||
reqsk_put(req);
|
||||
tcp_v6_restore_cb(skb);
|
||||
} else if (tcp_filter(sk, skb)) {
|
||||
goto discard_and_relse;
|
||||
} else if (tcp_child_process(sk, nsk, skb)) {
|
||||
tcp_v6_send_reset(nsk, skb);
|
||||
goto discard_and_relse;
|
||||
|
|
|
@ -228,7 +228,7 @@ static int pfkey_broadcast_one(struct sk_buff *skb, struct sk_buff **skb2,
|
|||
#define BROADCAST_ONE 1
|
||||
#define BROADCAST_REGISTERED 2
|
||||
#define BROADCAST_PROMISC_ONLY 4
|
||||
static int pfkey_broadcast(struct sk_buff *skb,
|
||||
static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
|
||||
int broadcast_flags, struct sock *one_sk,
|
||||
struct net *net)
|
||||
{
|
||||
|
@ -278,7 +278,7 @@ static int pfkey_broadcast(struct sk_buff *skb,
|
|||
rcu_read_unlock();
|
||||
|
||||
if (one_sk != NULL)
|
||||
err = pfkey_broadcast_one(skb, &skb2, GFP_KERNEL, one_sk);
|
||||
err = pfkey_broadcast_one(skb, &skb2, allocation, one_sk);
|
||||
|
||||
kfree_skb(skb2);
|
||||
kfree_skb(skb);
|
||||
|
@ -311,7 +311,7 @@ static int pfkey_do_dump(struct pfkey_sock *pfk)
|
|||
hdr = (struct sadb_msg *) pfk->dump.skb->data;
|
||||
hdr->sadb_msg_seq = 0;
|
||||
hdr->sadb_msg_errno = rc;
|
||||
pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE,
|
||||
pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE,
|
||||
&pfk->sk, sock_net(&pfk->sk));
|
||||
pfk->dump.skb = NULL;
|
||||
}
|
||||
|
@ -355,7 +355,7 @@ static int pfkey_error(const struct sadb_msg *orig, int err, struct sock *sk)
|
|||
hdr->sadb_msg_len = (sizeof(struct sadb_msg) /
|
||||
sizeof(uint64_t));
|
||||
|
||||
pfkey_broadcast(skb, BROADCAST_ONE, sk, sock_net(sk));
|
||||
pfkey_broadcast(skb, GFP_KERNEL, BROADCAST_ONE, sk, sock_net(sk));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1389,7 +1389,7 @@ static int pfkey_getspi(struct sock *sk, struct sk_buff *skb, const struct sadb_
|
|||
|
||||
xfrm_state_put(x);
|
||||
|
||||
pfkey_broadcast(resp_skb, BROADCAST_ONE, sk, net);
|
||||
pfkey_broadcast(resp_skb, GFP_KERNEL, BROADCAST_ONE, sk, net);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1476,7 +1476,7 @@ static int key_notify_sa(struct xfrm_state *x, const struct km_event *c)
|
|||
hdr->sadb_msg_seq = c->seq;
|
||||
hdr->sadb_msg_pid = c->portid;
|
||||
|
||||
pfkey_broadcast(skb, BROADCAST_ALL, NULL, xs_net(x));
|
||||
pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xs_net(x));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1589,7 +1589,7 @@ static int pfkey_get(struct sock *sk, struct sk_buff *skb, const struct sadb_msg
|
|||
out_hdr->sadb_msg_reserved = 0;
|
||||
out_hdr->sadb_msg_seq = hdr->sadb_msg_seq;
|
||||
out_hdr->sadb_msg_pid = hdr->sadb_msg_pid;
|
||||
pfkey_broadcast(out_skb, BROADCAST_ONE, sk, sock_net(sk));
|
||||
pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ONE, sk, sock_net(sk));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1694,8 +1694,8 @@ static int pfkey_register(struct sock *sk, struct sk_buff *skb, const struct sad
|
|||
return -ENOBUFS;
|
||||
}
|
||||
|
||||
pfkey_broadcast(supp_skb, BROADCAST_REGISTERED, sk, sock_net(sk));
|
||||
|
||||
pfkey_broadcast(supp_skb, GFP_KERNEL, BROADCAST_REGISTERED, sk,
|
||||
sock_net(sk));
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1712,7 +1712,8 @@ static int unicast_flush_resp(struct sock *sk, const struct sadb_msg *ihdr)
|
|||
hdr->sadb_msg_errno = (uint8_t) 0;
|
||||
hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
|
||||
|
||||
return pfkey_broadcast(skb, BROADCAST_ONE, sk, sock_net(sk));
|
||||
return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ONE, sk,
|
||||
sock_net(sk));
|
||||
}
|
||||
|
||||
static int key_notify_sa_flush(const struct km_event *c)
|
||||
|
@ -1733,7 +1734,7 @@ static int key_notify_sa_flush(const struct km_event *c)
|
|||
hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
|
||||
hdr->sadb_msg_reserved = 0;
|
||||
|
||||
pfkey_broadcast(skb, BROADCAST_ALL, NULL, c->net);
|
||||
pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1790,7 +1791,7 @@ static int dump_sa(struct xfrm_state *x, int count, void *ptr)
|
|||
out_hdr->sadb_msg_pid = pfk->dump.msg_portid;
|
||||
|
||||
if (pfk->dump.skb)
|
||||
pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE,
|
||||
pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE,
|
||||
&pfk->sk, sock_net(&pfk->sk));
|
||||
pfk->dump.skb = out_skb;
|
||||
|
||||
|
@ -1878,7 +1879,7 @@ static int pfkey_promisc(struct sock *sk, struct sk_buff *skb, const struct sadb
|
|||
new_hdr->sadb_msg_errno = 0;
|
||||
}
|
||||
|
||||
pfkey_broadcast(skb, BROADCAST_ALL, NULL, sock_net(sk));
|
||||
pfkey_broadcast(skb, GFP_KERNEL, BROADCAST_ALL, NULL, sock_net(sk));
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2206,7 +2207,7 @@ static int key_notify_policy(struct xfrm_policy *xp, int dir, const struct km_ev
|
|||
out_hdr->sadb_msg_errno = 0;
|
||||
out_hdr->sadb_msg_seq = c->seq;
|
||||
out_hdr->sadb_msg_pid = c->portid;
|
||||
pfkey_broadcast(out_skb, BROADCAST_ALL, NULL, xp_net(xp));
|
||||
pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xp_net(xp));
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
@ -2426,7 +2427,7 @@ static int key_pol_get_resp(struct sock *sk, struct xfrm_policy *xp, const struc
|
|||
out_hdr->sadb_msg_errno = 0;
|
||||
out_hdr->sadb_msg_seq = hdr->sadb_msg_seq;
|
||||
out_hdr->sadb_msg_pid = hdr->sadb_msg_pid;
|
||||
pfkey_broadcast(out_skb, BROADCAST_ONE, sk, xp_net(xp));
|
||||
pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ONE, sk, xp_net(xp));
|
||||
err = 0;
|
||||
|
||||
out:
|
||||
|
@ -2682,7 +2683,7 @@ static int dump_sp(struct xfrm_policy *xp, int dir, int count, void *ptr)
|
|||
out_hdr->sadb_msg_pid = pfk->dump.msg_portid;
|
||||
|
||||
if (pfk->dump.skb)
|
||||
pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE,
|
||||
pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE,
|
||||
&pfk->sk, sock_net(&pfk->sk));
|
||||
pfk->dump.skb = out_skb;
|
||||
|
||||
|
@ -2739,7 +2740,7 @@ static int key_notify_policy_flush(const struct km_event *c)
|
|||
hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC;
|
||||
hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
|
||||
hdr->sadb_msg_reserved = 0;
|
||||
pfkey_broadcast(skb_out, BROADCAST_ALL, NULL, c->net);
|
||||
pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
@ -2803,7 +2804,7 @@ static int pfkey_process(struct sock *sk, struct sk_buff *skb, const struct sadb
|
|||
void *ext_hdrs[SADB_EXT_MAX];
|
||||
int err;
|
||||
|
||||
pfkey_broadcast(skb_clone(skb, GFP_KERNEL),
|
||||
pfkey_broadcast(skb_clone(skb, GFP_KERNEL), GFP_KERNEL,
|
||||
BROADCAST_PROMISC_ONLY, NULL, sock_net(sk));
|
||||
|
||||
memset(ext_hdrs, 0, sizeof(ext_hdrs));
|
||||
|
@ -3024,7 +3025,8 @@ static int key_notify_sa_expire(struct xfrm_state *x, const struct km_event *c)
|
|||
out_hdr->sadb_msg_seq = 0;
|
||||
out_hdr->sadb_msg_pid = 0;
|
||||
|
||||
pfkey_broadcast(out_skb, BROADCAST_REGISTERED, NULL, xs_net(x));
|
||||
pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL,
|
||||
xs_net(x));
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -3212,7 +3214,8 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct
|
|||
xfrm_ctx->ctx_len);
|
||||
}
|
||||
|
||||
return pfkey_broadcast(skb, BROADCAST_REGISTERED, NULL, xs_net(x));
|
||||
return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL,
|
||||
xs_net(x));
|
||||
}
|
||||
|
||||
static struct xfrm_policy *pfkey_compile_policy(struct sock *sk, int opt,
|
||||
|
@ -3408,7 +3411,8 @@ static int pfkey_send_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr,
|
|||
n_port->sadb_x_nat_t_port_port = sport;
|
||||
n_port->sadb_x_nat_t_port_reserved = 0;
|
||||
|
||||
return pfkey_broadcast(skb, BROADCAST_REGISTERED, NULL, xs_net(x));
|
||||
return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL,
|
||||
xs_net(x));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NET_KEY_MIGRATE
|
||||
|
@ -3599,7 +3603,7 @@ static int pfkey_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
|
|||
}
|
||||
|
||||
/* broadcast migrate message to sockets */
|
||||
pfkey_broadcast(skb, BROADCAST_ALL, NULL, &init_net);
|
||||
pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, &init_net);
|
||||
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
|
||||
* Copyright 2007, Michael Wu <flamingice@sourmilk.net>
|
||||
* Copyright 2007-2010, Intel Corporation
|
||||
* Copyright(c) 2015 Intel Deutschland GmbH
|
||||
* Copyright(c) 2015-2017 Intel Deutschland GmbH
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
|
@ -466,3 +466,23 @@ void ieee80211_manage_rx_ba_offl(struct ieee80211_vif *vif,
|
|||
rcu_read_unlock();
|
||||
}
|
||||
EXPORT_SYMBOL(ieee80211_manage_rx_ba_offl);
|
||||
|
||||
void ieee80211_rx_ba_timer_expired(struct ieee80211_vif *vif,
|
||||
const u8 *addr, unsigned int tid)
|
||||
{
|
||||
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
|
||||
struct ieee80211_local *local = sdata->local;
|
||||
struct sta_info *sta;
|
||||
|
||||
rcu_read_lock();
|
||||
sta = sta_info_get_bss(sdata, addr);
|
||||
if (!sta)
|
||||
goto unlock;
|
||||
|
||||
set_bit(tid, sta->ampdu_mlme.tid_rx_timer_expired);
|
||||
ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
|
||||
|
||||
unlock:
|
||||
rcu_read_unlock();
|
||||
}
|
||||
EXPORT_SYMBOL(ieee80211_rx_ba_timer_expired);
|
||||
|
|
|
@ -286,9 +286,6 @@ static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
|
|||
void qdisc_hash_add(struct Qdisc *q, bool invisible)
|
||||
{
|
||||
if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
|
||||
struct Qdisc *root = qdisc_dev(q)->qdisc;
|
||||
|
||||
WARN_ON_ONCE(root == &noop_qdisc);
|
||||
ASSERT_RTNL();
|
||||
hash_add_rcu(qdisc_dev(q)->qdisc_hash, &q->hash, q->handle);
|
||||
if (invisible)
|
||||
|
|
|
@ -572,8 +572,10 @@ static void atm_tc_destroy(struct Qdisc *sch)
|
|||
struct atm_flow_data *flow, *tmp;
|
||||
|
||||
pr_debug("atm_tc_destroy(sch %p,[qdisc %p])\n", sch, p);
|
||||
list_for_each_entry(flow, &p->flows, list)
|
||||
list_for_each_entry(flow, &p->flows, list) {
|
||||
tcf_block_put(flow->block);
|
||||
flow->block = NULL;
|
||||
}
|
||||
|
||||
list_for_each_entry_safe(flow, tmp, &p->flows, list) {
|
||||
if (flow->ref > 1)
|
||||
|
|
|
@ -1431,8 +1431,10 @@ static void cbq_destroy(struct Qdisc *sch)
|
|||
* be bound to classes which have been destroyed already. --TGR '04
|
||||
*/
|
||||
for (h = 0; h < q->clhash.hashsize; h++) {
|
||||
hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode)
|
||||
hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
|
||||
tcf_block_put(cl->block);
|
||||
cl->block = NULL;
|
||||
}
|
||||
}
|
||||
for (h = 0; h < q->clhash.hashsize; h++) {
|
||||
hlist_for_each_entry_safe(cl, next, &q->clhash.hash[h],
|
||||
|
|
|
@ -1428,6 +1428,10 @@ hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
|
|||
return err;
|
||||
q->eligible = RB_ROOT;
|
||||
|
||||
err = tcf_block_get(&q->root.block, &q->root.filter_list);
|
||||
if (err)
|
||||
goto err_tcf;
|
||||
|
||||
q->root.cl_common.classid = sch->handle;
|
||||
q->root.refcnt = 1;
|
||||
q->root.sched = q;
|
||||
|
@ -1447,6 +1451,10 @@ hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
|
|||
qdisc_watchdog_init(&q->watchdog, sch);
|
||||
|
||||
return 0;
|
||||
|
||||
err_tcf:
|
||||
qdisc_class_hash_destroy(&q->clhash);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -1522,8 +1530,10 @@ hfsc_destroy_qdisc(struct Qdisc *sch)
|
|||
unsigned int i;
|
||||
|
||||
for (i = 0; i < q->clhash.hashsize; i++) {
|
||||
hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode)
|
||||
hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode) {
|
||||
tcf_block_put(cl->block);
|
||||
cl->block = NULL;
|
||||
}
|
||||
}
|
||||
for (i = 0; i < q->clhash.hashsize; i++) {
|
||||
hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
|
||||
|
|
|
@ -1258,8 +1258,10 @@ static void htb_destroy(struct Qdisc *sch)
|
|||
tcf_block_put(q->block);
|
||||
|
||||
for (i = 0; i < q->clhash.hashsize; i++) {
|
||||
hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode)
|
||||
hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
|
||||
tcf_block_put(cl->block);
|
||||
cl->block = NULL;
|
||||
}
|
||||
}
|
||||
for (i = 0; i < q->clhash.hashsize; i++) {
|
||||
hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
|
||||
|
|
|
@ -437,6 +437,7 @@ congestion_drop:
|
|||
qdisc_drop(head, sch, to_free);
|
||||
|
||||
slot_queue_add(slot, skb);
|
||||
qdisc_tree_reduce_backlog(sch, 0, delta);
|
||||
return NET_XMIT_CN;
|
||||
}
|
||||
|
||||
|
@ -468,8 +469,10 @@ enqueue:
|
|||
/* Return Congestion Notification only if we dropped a packet
|
||||
* from this flow.
|
||||
*/
|
||||
if (qlen != slot->qlen)
|
||||
if (qlen != slot->qlen) {
|
||||
qdisc_tree_reduce_backlog(sch, 0, dropped - qdisc_pkt_len(skb));
|
||||
return NET_XMIT_CN;
|
||||
}
|
||||
|
||||
/* As we dropped a packet, better let upper stack know this */
|
||||
qdisc_tree_reduce_backlog(sch, 1, dropped);
|
||||
|
|
|
@ -596,7 +596,7 @@ static int tipc_l2_rcv_msg(struct sk_buff *skb, struct net_device *dev,
|
|||
rcu_read_lock();
|
||||
b = rcu_dereference_rtnl(dev->tipc_ptr);
|
||||
if (likely(b && test_bit(0, &b->up) &&
|
||||
(skb->pkt_type <= PACKET_BROADCAST))) {
|
||||
(skb->pkt_type <= PACKET_MULTICAST))) {
|
||||
skb->next = NULL;
|
||||
tipc_rcv(dev_net(dev), skb, b);
|
||||
rcu_read_unlock();
|
||||
|
|
|
@ -513,6 +513,7 @@ bool tipc_msg_reverse(u32 own_node, struct sk_buff **skb, int err)
|
|||
|
||||
/* Now reverse the concerned fields */
|
||||
msg_set_errcode(hdr, err);
|
||||
msg_set_non_seq(hdr, 0);
|
||||
msg_set_origport(hdr, msg_destport(&ohdr));
|
||||
msg_set_destport(hdr, msg_origport(&ohdr));
|
||||
msg_set_destnode(hdr, msg_prevnode(&ohdr));
|
||||
|
|
Loading…
Reference in New Issue