Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Fix refcounting wrt timers which hold onto inet6 address objects, from Xin Long. 2) Fix an ancient bug in wireless wext ioctls, from Johannes Berg. 3) Firmware handling fixes in brcm80211 driver, from Arend Van Spriel. 4) Several mlx5 driver fixes (firmware readiness, timestamp cap reporting, devlink command validity checking, tc offloading, etc.) From Eli Cohen, Maor Dickman, Chris Mi, and Or Gerlitz. 5) Fix dst leak in IP/IP6 tunnels, from Haishuang Yan. 6) Fix dst refcount bug in decnet, from Wei Wang. 7) Netdev can be double freed in register_vlan_device(). Fix from Gao Feng. 8) Don't allow object to be destroyed while it is being dumped in SCTP, from Xin Long. 9) Fix dpaa_eth build when modular, from Madalin Bucur. 10) Fix throw route leaks, from Serhey Popovych. 11) IFLA_GROUP missing from if_nlmsg_size() and ifla_policy[] table, also from Serhey Popovych. 12) Fix premature TX SKB free in stmmac, from Niklas Cassel. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (36 commits) igmp: add a missing spin_lock_init() net: stmmac: free an skb first when there are no longer any descriptors using it sfc: remove duplicate up_write on VF filter_sem rtnetlink: add IFLA_GROUP to ifla_policy ipv6: Do not leak throw route references dt-bindings: net: sms911x: Add missing optional VDD regulators dpaa_eth: reuse the dma_ops provided by the FMan MAC device fsl/fman: propagate dma_ops net/core: remove explicit do_softirq() from busy_poll_stop() fib_rules: Resolve goto rules target on delete sctp: ensure ep is not destroyed before doing the dump net/hns:bugfix of ethtool -t phy self_test net: 8021q: Fix one possible panic caused by BUG_ON in free_netdev cxgb4: notify uP to route ctrlq compl to rdma rspq ip6_tunnel: Correct tos value in collect_md mode decnet: always not take dst->__refcnt when inserting dst into hash table ip6_tunnel: fix potential issue in __ip6_tnl_rcv ip_tunnel: fix potential issue in ip_tunnel_rcv brcmfmac: fix uninitialized warning in brcmf_usb_probe_phase2() net/mlx5e: Avoid doing a cleanup call if the profile doesn't have it ...
This commit is contained in:
commit
48b6bbef9a
|
@ -34,7 +34,7 @@ Required properties:
|
|||
"brcm,bcm6328-switch"
|
||||
"brcm,bcm6368-switch" and the mandatory "brcm,bcm63xx-switch"
|
||||
|
||||
See Documentation/devicetree/bindings/dsa/dsa.txt for a list of additional
|
||||
See Documentation/devicetree/bindings/net/dsa/dsa.txt for a list of additional
|
||||
required and optional properties.
|
||||
|
||||
Examples:
|
||||
|
|
|
@ -27,6 +27,7 @@ Optional properties:
|
|||
of the device. On many systems this is wired high so the device goes
|
||||
out of reset at power-on, but if it is under program control, this
|
||||
optional GPIO can wake up in response to it.
|
||||
- vdd33a-supply, vddvario-supply : 3.3V analog and IO logic power supplies
|
||||
|
||||
Examples:
|
||||
|
||||
|
|
|
@ -2171,9 +2171,10 @@ static int cxgb_up(struct adapter *adap)
|
|||
{
|
||||
int err;
|
||||
|
||||
mutex_lock(&uld_mutex);
|
||||
err = setup_sge_queues(adap);
|
||||
if (err)
|
||||
goto out;
|
||||
goto rel_lock;
|
||||
err = setup_rss(adap);
|
||||
if (err)
|
||||
goto freeq;
|
||||
|
@ -2197,7 +2198,6 @@ static int cxgb_up(struct adapter *adap)
|
|||
goto irq_err;
|
||||
}
|
||||
|
||||
mutex_lock(&uld_mutex);
|
||||
enable_rx(adap);
|
||||
t4_sge_start(adap);
|
||||
t4_intr_enable(adap);
|
||||
|
@ -2210,13 +2210,15 @@ static int cxgb_up(struct adapter *adap)
|
|||
#endif
|
||||
/* Initialize hash mac addr list*/
|
||||
INIT_LIST_HEAD(&adap->mac_hlist);
|
||||
out:
|
||||
return err;
|
||||
|
||||
irq_err:
|
||||
dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
|
||||
freeq:
|
||||
t4_free_sge_resources(adap);
|
||||
goto out;
|
||||
rel_lock:
|
||||
mutex_unlock(&uld_mutex);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void cxgb_down(struct adapter *adapter)
|
||||
|
|
|
@ -2647,7 +2647,7 @@ static int dpaa_eth_probe(struct platform_device *pdev)
|
|||
priv->buf_layout[TX].priv_data_size = DPAA_TX_PRIV_DATA_SIZE; /* Tx */
|
||||
|
||||
/* device used for DMA mapping */
|
||||
arch_setup_dma_ops(dev, 0, 0, NULL, false);
|
||||
set_dma_ops(dev, get_dma_ops(&pdev->dev));
|
||||
err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(40));
|
||||
if (err) {
|
||||
dev_err(dev, "dma_coerce_mask_and_coherent() failed\n");
|
||||
|
|
|
@ -623,6 +623,8 @@ static struct platform_device *dpaa_eth_add_device(int fman_id,
|
|||
goto no_mem;
|
||||
}
|
||||
|
||||
set_dma_ops(&pdev->dev, get_dma_ops(priv->dev));
|
||||
|
||||
ret = platform_device_add_data(pdev, &data, sizeof(data));
|
||||
if (ret)
|
||||
goto err;
|
||||
|
|
|
@ -288,9 +288,15 @@ static int hns_nic_config_phy_loopback(struct phy_device *phy_dev, u8 en)
|
|||
|
||||
/* Force 1000M Link, Default is 0x0200 */
|
||||
phy_write(phy_dev, 7, 0x20C);
|
||||
phy_write(phy_dev, HNS_PHY_PAGE_REG, 0);
|
||||
|
||||
/* Enable PHY loop-back */
|
||||
/* Powerup Fiber */
|
||||
phy_write(phy_dev, HNS_PHY_PAGE_REG, 1);
|
||||
val = phy_read(phy_dev, COPPER_CONTROL_REG);
|
||||
val &= ~PHY_POWER_DOWN;
|
||||
phy_write(phy_dev, COPPER_CONTROL_REG, val);
|
||||
|
||||
/* Enable Phy Loopback */
|
||||
phy_write(phy_dev, HNS_PHY_PAGE_REG, 0);
|
||||
val = phy_read(phy_dev, COPPER_CONTROL_REG);
|
||||
val |= PHY_LOOP_BACK;
|
||||
val &= ~PHY_POWER_DOWN;
|
||||
|
@ -299,6 +305,12 @@ static int hns_nic_config_phy_loopback(struct phy_device *phy_dev, u8 en)
|
|||
phy_write(phy_dev, HNS_PHY_PAGE_REG, 0xFA);
|
||||
phy_write(phy_dev, 1, 0x400);
|
||||
phy_write(phy_dev, 7, 0x200);
|
||||
|
||||
phy_write(phy_dev, HNS_PHY_PAGE_REG, 1);
|
||||
val = phy_read(phy_dev, COPPER_CONTROL_REG);
|
||||
val |= PHY_POWER_DOWN;
|
||||
phy_write(phy_dev, COPPER_CONTROL_REG, val);
|
||||
|
||||
phy_write(phy_dev, HNS_PHY_PAGE_REG, 0);
|
||||
phy_write(phy_dev, 9, 0xF00);
|
||||
|
||||
|
|
|
@ -1242,11 +1242,11 @@ static int mlx5e_get_ts_info(struct net_device *dev,
|
|||
SOF_TIMESTAMPING_RX_HARDWARE |
|
||||
SOF_TIMESTAMPING_RAW_HARDWARE;
|
||||
|
||||
info->tx_types = (BIT(1) << HWTSTAMP_TX_OFF) |
|
||||
(BIT(1) << HWTSTAMP_TX_ON);
|
||||
info->tx_types = BIT(HWTSTAMP_TX_OFF) |
|
||||
BIT(HWTSTAMP_TX_ON);
|
||||
|
||||
info->rx_filters = (BIT(1) << HWTSTAMP_FILTER_NONE) |
|
||||
(BIT(1) << HWTSTAMP_FILTER_ALL);
|
||||
info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
|
||||
BIT(HWTSTAMP_FILTER_ALL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -4241,7 +4241,8 @@ struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
|
|||
return netdev;
|
||||
|
||||
err_cleanup_nic:
|
||||
profile->cleanup(priv);
|
||||
if (profile->cleanup)
|
||||
profile->cleanup(priv);
|
||||
free_netdev(netdev);
|
||||
|
||||
return NULL;
|
||||
|
|
|
@ -791,6 +791,8 @@ static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev,
|
|||
params->tx_max_inline = mlx5e_get_max_inline_cap(mdev);
|
||||
params->num_tc = 1;
|
||||
params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
|
||||
|
||||
mlx5_query_min_inline(mdev, ¶ms->tx_min_inline_mode);
|
||||
}
|
||||
|
||||
static void mlx5e_build_rep_netdev(struct net_device *netdev)
|
||||
|
|
|
@ -895,7 +895,6 @@ static struct mlx5_fields fields[] = {
|
|||
{MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0, 2, offsetof(struct pedit_headers, eth.h_source[4])},
|
||||
{MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE, 2, offsetof(struct pedit_headers, eth.h_proto)},
|
||||
|
||||
{MLX5_ACTION_IN_FIELD_OUT_IP_DSCP, 1, offsetof(struct pedit_headers, ip4.tos)},
|
||||
{MLX5_ACTION_IN_FIELD_OUT_IP_TTL, 1, offsetof(struct pedit_headers, ip4.ttl)},
|
||||
{MLX5_ACTION_IN_FIELD_OUT_SIPV4, 4, offsetof(struct pedit_headers, ip4.saddr)},
|
||||
{MLX5_ACTION_IN_FIELD_OUT_DIPV4, 4, offsetof(struct pedit_headers, ip4.daddr)},
|
||||
|
|
|
@ -906,21 +906,34 @@ static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
|
||||
static int mlx5_devlink_eswitch_check(struct devlink *devlink)
|
||||
{
|
||||
struct mlx5_core_dev *dev;
|
||||
u16 cur_mlx5_mode, mlx5_mode = 0;
|
||||
struct mlx5_core_dev *dev = devlink_priv(devlink);
|
||||
|
||||
dev = devlink_priv(devlink);
|
||||
if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (!MLX5_CAP_GEN(dev, vport_group_manager))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
cur_mlx5_mode = dev->priv.eswitch->mode;
|
||||
|
||||
if (cur_mlx5_mode == SRIOV_NONE)
|
||||
if (dev->priv.eswitch->mode == SRIOV_NONE)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
|
||||
{
|
||||
struct mlx5_core_dev *dev = devlink_priv(devlink);
|
||||
u16 cur_mlx5_mode, mlx5_mode = 0;
|
||||
int err;
|
||||
|
||||
err = mlx5_devlink_eswitch_check(devlink);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
cur_mlx5_mode = dev->priv.eswitch->mode;
|
||||
|
||||
if (esw_mode_from_devlink(mode, &mlx5_mode))
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -937,15 +950,12 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
|
|||
|
||||
int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
|
||||
{
|
||||
struct mlx5_core_dev *dev;
|
||||
struct mlx5_core_dev *dev = devlink_priv(devlink);
|
||||
int err;
|
||||
|
||||
dev = devlink_priv(devlink);
|
||||
|
||||
if (!MLX5_CAP_GEN(dev, vport_group_manager))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (dev->priv.eswitch->mode == SRIOV_NONE)
|
||||
return -EOPNOTSUPP;
|
||||
err = mlx5_devlink_eswitch_check(devlink);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
|
||||
}
|
||||
|
@ -954,15 +964,12 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
|
|||
{
|
||||
struct mlx5_core_dev *dev = devlink_priv(devlink);
|
||||
struct mlx5_eswitch *esw = dev->priv.eswitch;
|
||||
int num_vports = esw->enabled_vports;
|
||||
int err, vport;
|
||||
u8 mlx5_mode;
|
||||
|
||||
if (!MLX5_CAP_GEN(dev, vport_group_manager))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (esw->mode == SRIOV_NONE)
|
||||
return -EOPNOTSUPP;
|
||||
err = mlx5_devlink_eswitch_check(devlink);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
|
||||
case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
|
||||
|
@ -985,7 +992,7 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
|
|||
if (err)
|
||||
goto out;
|
||||
|
||||
for (vport = 1; vport < num_vports; vport++) {
|
||||
for (vport = 1; vport < esw->enabled_vports; vport++) {
|
||||
err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
|
||||
if (err) {
|
||||
esw_warn(dev, "Failed to set min inline on vport %d\n",
|
||||
|
@ -1010,12 +1017,11 @@ int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
|
|||
{
|
||||
struct mlx5_core_dev *dev = devlink_priv(devlink);
|
||||
struct mlx5_eswitch *esw = dev->priv.eswitch;
|
||||
int err;
|
||||
|
||||
if (!MLX5_CAP_GEN(dev, vport_group_manager))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (esw->mode == SRIOV_NONE)
|
||||
return -EOPNOTSUPP;
|
||||
err = mlx5_devlink_eswitch_check(devlink);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
|
||||
}
|
||||
|
@ -1062,11 +1068,9 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap)
|
|||
struct mlx5_eswitch *esw = dev->priv.eswitch;
|
||||
int err;
|
||||
|
||||
if (!MLX5_CAP_GEN(dev, vport_group_manager))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (esw->mode == SRIOV_NONE)
|
||||
return -EOPNOTSUPP;
|
||||
err = mlx5_devlink_eswitch_check(devlink);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
|
||||
(!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) ||
|
||||
|
@ -1105,12 +1109,11 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap)
|
|||
{
|
||||
struct mlx5_core_dev *dev = devlink_priv(devlink);
|
||||
struct mlx5_eswitch *esw = dev->priv.eswitch;
|
||||
int err;
|
||||
|
||||
if (!MLX5_CAP_GEN(dev, vport_group_manager))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (esw->mode == SRIOV_NONE)
|
||||
return -EOPNOTSUPP;
|
||||
err = mlx5_devlink_eswitch_check(devlink);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
*encap = esw->offloads.encap;
|
||||
return 0;
|
||||
|
|
|
@ -175,8 +175,9 @@ static struct mlx5_profile profile[] = {
|
|||
},
|
||||
};
|
||||
|
||||
#define FW_INIT_TIMEOUT_MILI 2000
|
||||
#define FW_INIT_WAIT_MS 2
|
||||
#define FW_INIT_TIMEOUT_MILI 2000
|
||||
#define FW_INIT_WAIT_MS 2
|
||||
#define FW_PRE_INIT_TIMEOUT_MILI 10000
|
||||
|
||||
static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili)
|
||||
{
|
||||
|
@ -1013,6 +1014,15 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
|
|||
*/
|
||||
dev->state = MLX5_DEVICE_STATE_UP;
|
||||
|
||||
/* wait for firmware to accept initialization segments configurations
|
||||
*/
|
||||
err = wait_fw_init(dev, FW_PRE_INIT_TIMEOUT_MILI);
|
||||
if (err) {
|
||||
dev_err(&dev->pdev->dev, "Firmware over %d MS in pre-initializing state, aborting\n",
|
||||
FW_PRE_INIT_TIMEOUT_MILI);
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = mlx5_cmd_init(dev);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "Failed initializing command interface, aborting\n");
|
||||
|
|
|
@ -661,8 +661,6 @@ restore_filters:
|
|||
up_write(&vf->efx->filter_sem);
|
||||
mutex_unlock(&vf->efx->mac_lock);
|
||||
|
||||
up_write(&vf->efx->filter_sem);
|
||||
|
||||
rc2 = efx_net_open(vf->efx->net_dev);
|
||||
if (rc2)
|
||||
goto reset_nic;
|
||||
|
|
|
@ -2831,7 +2831,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
|
||||
tx_q->tx_skbuff_dma[first_entry].buf = des;
|
||||
tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
|
||||
tx_q->tx_skbuff[first_entry] = skb;
|
||||
|
||||
first->des0 = cpu_to_le32(des);
|
||||
|
||||
|
@ -2865,6 +2864,14 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
|
||||
tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
|
||||
|
||||
/* Only the last descriptor gets to point to the skb. */
|
||||
tx_q->tx_skbuff[tx_q->cur_tx] = skb;
|
||||
|
||||
/* We've used all descriptors we need for this skb, however,
|
||||
* advance cur_tx so that it references a fresh descriptor.
|
||||
* ndo_start_xmit will fill this descriptor the next time it's
|
||||
* called and stmmac_tx_clean may clean up to this descriptor.
|
||||
*/
|
||||
tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
|
||||
|
||||
if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
|
||||
|
@ -2998,8 +3005,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
|
||||
first = desc;
|
||||
|
||||
tx_q->tx_skbuff[first_entry] = skb;
|
||||
|
||||
enh_desc = priv->plat->enh_desc;
|
||||
/* To program the descriptors according to the size of the frame */
|
||||
if (enh_desc)
|
||||
|
@ -3047,8 +3052,15 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
skb->len);
|
||||
}
|
||||
|
||||
entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
|
||||
/* Only the last descriptor gets to point to the skb. */
|
||||
tx_q->tx_skbuff[entry] = skb;
|
||||
|
||||
/* We've used all descriptors we need for this skb, however,
|
||||
* advance cur_tx so that it references a fresh descriptor.
|
||||
* ndo_start_xmit will fill this descriptor the next time it's
|
||||
* called and stmmac_tx_clean may clean up to this descriptor.
|
||||
*/
|
||||
entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
|
||||
tx_q->cur_tx = entry;
|
||||
|
||||
if (netif_msg_pktdata(priv)) {
|
||||
|
|
|
@ -442,7 +442,7 @@ struct brcmf_fw {
|
|||
const char *nvram_name;
|
||||
u16 domain_nr;
|
||||
u16 bus_nr;
|
||||
void (*done)(struct device *dev, const struct firmware *fw,
|
||||
void (*done)(struct device *dev, int err, const struct firmware *fw,
|
||||
void *nvram_image, u32 nvram_len);
|
||||
};
|
||||
|
||||
|
@ -477,52 +477,51 @@ static void brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx)
|
|||
if (!nvram && !(fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL))
|
||||
goto fail;
|
||||
|
||||
fwctx->done(fwctx->dev, fwctx->code, nvram, nvram_length);
|
||||
fwctx->done(fwctx->dev, 0, fwctx->code, nvram, nvram_length);
|
||||
kfree(fwctx);
|
||||
return;
|
||||
|
||||
fail:
|
||||
brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev));
|
||||
release_firmware(fwctx->code);
|
||||
device_release_driver(fwctx->dev);
|
||||
fwctx->done(fwctx->dev, -ENOENT, NULL, NULL, 0);
|
||||
kfree(fwctx);
|
||||
}
|
||||
|
||||
static void brcmf_fw_request_code_done(const struct firmware *fw, void *ctx)
|
||||
{
|
||||
struct brcmf_fw *fwctx = ctx;
|
||||
int ret;
|
||||
int ret = 0;
|
||||
|
||||
brcmf_dbg(TRACE, "enter: dev=%s\n", dev_name(fwctx->dev));
|
||||
if (!fw)
|
||||
if (!fw) {
|
||||
ret = -ENOENT;
|
||||
goto fail;
|
||||
|
||||
/* only requested code so done here */
|
||||
if (!(fwctx->flags & BRCMF_FW_REQUEST_NVRAM)) {
|
||||
fwctx->done(fwctx->dev, fw, NULL, 0);
|
||||
kfree(fwctx);
|
||||
return;
|
||||
}
|
||||
/* only requested code so done here */
|
||||
if (!(fwctx->flags & BRCMF_FW_REQUEST_NVRAM))
|
||||
goto done;
|
||||
|
||||
fwctx->code = fw;
|
||||
ret = request_firmware_nowait(THIS_MODULE, true, fwctx->nvram_name,
|
||||
fwctx->dev, GFP_KERNEL, fwctx,
|
||||
brcmf_fw_request_nvram_done);
|
||||
|
||||
if (!ret)
|
||||
return;
|
||||
|
||||
brcmf_fw_request_nvram_done(NULL, fwctx);
|
||||
/* pass NULL to nvram callback for bcm47xx fallback */
|
||||
if (ret)
|
||||
brcmf_fw_request_nvram_done(NULL, fwctx);
|
||||
return;
|
||||
|
||||
fail:
|
||||
brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev));
|
||||
device_release_driver(fwctx->dev);
|
||||
done:
|
||||
fwctx->done(fwctx->dev, ret, fw, NULL, 0);
|
||||
kfree(fwctx);
|
||||
}
|
||||
|
||||
int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags,
|
||||
const char *code, const char *nvram,
|
||||
void (*fw_cb)(struct device *dev,
|
||||
void (*fw_cb)(struct device *dev, int err,
|
||||
const struct firmware *fw,
|
||||
void *nvram_image, u32 nvram_len),
|
||||
u16 domain_nr, u16 bus_nr)
|
||||
|
@ -555,7 +554,7 @@ int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags,
|
|||
|
||||
int brcmf_fw_get_firmwares(struct device *dev, u16 flags,
|
||||
const char *code, const char *nvram,
|
||||
void (*fw_cb)(struct device *dev,
|
||||
void (*fw_cb)(struct device *dev, int err,
|
||||
const struct firmware *fw,
|
||||
void *nvram_image, u32 nvram_len))
|
||||
{
|
||||
|
|
|
@ -73,13 +73,13 @@ void brcmf_fw_nvram_free(void *nvram);
|
|||
*/
|
||||
int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags,
|
||||
const char *code, const char *nvram,
|
||||
void (*fw_cb)(struct device *dev,
|
||||
void (*fw_cb)(struct device *dev, int err,
|
||||
const struct firmware *fw,
|
||||
void *nvram_image, u32 nvram_len),
|
||||
u16 domain_nr, u16 bus_nr);
|
||||
int brcmf_fw_get_firmwares(struct device *dev, u16 flags,
|
||||
const char *code, const char *nvram,
|
||||
void (*fw_cb)(struct device *dev,
|
||||
void (*fw_cb)(struct device *dev, int err,
|
||||
const struct firmware *fw,
|
||||
void *nvram_image, u32 nvram_len));
|
||||
|
||||
|
|
|
@ -2145,7 +2145,7 @@ void brcmf_fws_add_interface(struct brcmf_if *ifp)
|
|||
struct brcmf_fws_info *fws = drvr_to_fws(ifp->drvr);
|
||||
struct brcmf_fws_mac_descriptor *entry;
|
||||
|
||||
if (!ifp->ndev || fws->fcmode == BRCMF_FWS_FCMODE_NONE)
|
||||
if (!ifp->ndev || !brcmf_fws_queue_skbs(fws))
|
||||
return;
|
||||
|
||||
entry = &fws->desc.iface[ifp->ifidx];
|
||||
|
|
|
@ -1650,16 +1650,23 @@ static const struct brcmf_buscore_ops brcmf_pcie_buscore_ops = {
|
|||
.write32 = brcmf_pcie_buscore_write32,
|
||||
};
|
||||
|
||||
static void brcmf_pcie_setup(struct device *dev, const struct firmware *fw,
|
||||
static void brcmf_pcie_setup(struct device *dev, int ret,
|
||||
const struct firmware *fw,
|
||||
void *nvram, u32 nvram_len)
|
||||
{
|
||||
struct brcmf_bus *bus = dev_get_drvdata(dev);
|
||||
struct brcmf_pciedev *pcie_bus_dev = bus->bus_priv.pcie;
|
||||
struct brcmf_pciedev_info *devinfo = pcie_bus_dev->devinfo;
|
||||
struct brcmf_bus *bus;
|
||||
struct brcmf_pciedev *pcie_bus_dev;
|
||||
struct brcmf_pciedev_info *devinfo;
|
||||
struct brcmf_commonring **flowrings;
|
||||
int ret;
|
||||
u32 i;
|
||||
|
||||
/* check firmware loading result */
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
bus = dev_get_drvdata(dev);
|
||||
pcie_bus_dev = bus->bus_priv.pcie;
|
||||
devinfo = pcie_bus_dev->devinfo;
|
||||
brcmf_pcie_attach(devinfo);
|
||||
|
||||
/* Some of the firmwares have the size of the memory of the device
|
||||
|
|
|
@ -3982,21 +3982,26 @@ static const struct brcmf_bus_ops brcmf_sdio_bus_ops = {
|
|||
.get_memdump = brcmf_sdio_bus_get_memdump,
|
||||
};
|
||||
|
||||
static void brcmf_sdio_firmware_callback(struct device *dev,
|
||||
static void brcmf_sdio_firmware_callback(struct device *dev, int err,
|
||||
const struct firmware *code,
|
||||
void *nvram, u32 nvram_len)
|
||||
{
|
||||
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
|
||||
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
|
||||
struct brcmf_sdio *bus = sdiodev->bus;
|
||||
int err = 0;
|
||||
struct brcmf_bus *bus_if;
|
||||
struct brcmf_sdio_dev *sdiodev;
|
||||
struct brcmf_sdio *bus;
|
||||
u8 saveclk;
|
||||
|
||||
brcmf_dbg(TRACE, "Enter: dev=%s\n", dev_name(dev));
|
||||
brcmf_dbg(TRACE, "Enter: dev=%s, err=%d\n", dev_name(dev), err);
|
||||
bus_if = dev_get_drvdata(dev);
|
||||
sdiodev = bus_if->bus_priv.sdio;
|
||||
if (err)
|
||||
goto fail;
|
||||
|
||||
if (!bus_if->drvr)
|
||||
return;
|
||||
|
||||
bus = sdiodev->bus;
|
||||
|
||||
/* try to download image and nvram to the dongle */
|
||||
bus->alp_only = true;
|
||||
err = brcmf_sdio_download_firmware(bus, code, nvram, nvram_len);
|
||||
|
@ -4083,6 +4088,7 @@ release:
|
|||
fail:
|
||||
brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), err);
|
||||
device_release_driver(dev);
|
||||
device_release_driver(&sdiodev->func[2]->dev);
|
||||
}
|
||||
|
||||
struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
|
||||
|
|
|
@ -1159,17 +1159,18 @@ fail:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void brcmf_usb_probe_phase2(struct device *dev,
|
||||
static void brcmf_usb_probe_phase2(struct device *dev, int ret,
|
||||
const struct firmware *fw,
|
||||
void *nvram, u32 nvlen)
|
||||
{
|
||||
struct brcmf_bus *bus = dev_get_drvdata(dev);
|
||||
struct brcmf_usbdev_info *devinfo;
|
||||
int ret;
|
||||
struct brcmf_usbdev_info *devinfo = bus->bus_priv.usb->devinfo;
|
||||
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
brcmf_dbg(USB, "Start fw downloading\n");
|
||||
|
||||
devinfo = bus->bus_priv.usb->devinfo;
|
||||
ret = check_file(fw->data);
|
||||
if (ret < 0) {
|
||||
brcmf_err("invalid firmware\n");
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
struct net;
|
||||
|
||||
#ifdef CONFIG_WEXT_CORE
|
||||
int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd,
|
||||
int wext_handle_ioctl(struct net *net, struct iwreq *iwr, unsigned int cmd,
|
||||
void __user *arg);
|
||||
int compat_wext_handle_ioctl(struct net *net, unsigned int cmd,
|
||||
unsigned long arg);
|
||||
|
@ -14,7 +14,7 @@ int compat_wext_handle_ioctl(struct net *net, unsigned int cmd,
|
|||
struct iw_statistics *get_wireless_stats(struct net_device *dev);
|
||||
int call_commit_handler(struct net_device *dev);
|
||||
#else
|
||||
static inline int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd,
|
||||
static inline int wext_handle_ioctl(struct net *net, struct iwreq *iwr, unsigned int cmd,
|
||||
void __user *arg)
|
||||
{
|
||||
return -EINVAL;
|
||||
|
|
|
@ -277,7 +277,8 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
|
|||
return 0;
|
||||
|
||||
out_free_newdev:
|
||||
free_netdev(new_dev);
|
||||
if (new_dev->reg_state == NETREG_UNINITIALIZED)
|
||||
free_netdev(new_dev);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
@ -5206,8 +5206,6 @@ static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock)
|
|||
if (rc == BUSY_POLL_BUDGET)
|
||||
__napi_schedule(napi);
|
||||
local_bh_enable();
|
||||
if (local_softirq_pending())
|
||||
do_softirq();
|
||||
}
|
||||
|
||||
void napi_busy_loop(unsigned int napi_id,
|
||||
|
|
|
@ -410,6 +410,22 @@ int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
|
|||
if (cmd == SIOCGIFNAME)
|
||||
return dev_ifname(net, (struct ifreq __user *)arg);
|
||||
|
||||
/*
|
||||
* Take care of Wireless Extensions. Unfortunately struct iwreq
|
||||
* isn't a proper subset of struct ifreq (it's 8 byte shorter)
|
||||
* so we need to treat it specially, otherwise applications may
|
||||
* fault if the struct they're passing happens to land at the
|
||||
* end of a mapped page.
|
||||
*/
|
||||
if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) {
|
||||
struct iwreq iwr;
|
||||
|
||||
if (copy_from_user(&iwr, arg, sizeof(iwr)))
|
||||
return -EFAULT;
|
||||
|
||||
return wext_handle_ioctl(net, &iwr, cmd, arg);
|
||||
}
|
||||
|
||||
if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
|
||||
return -EFAULT;
|
||||
|
||||
|
@ -559,9 +575,6 @@ int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
|
|||
ret = -EFAULT;
|
||||
return ret;
|
||||
}
|
||||
/* Take care of Wireless Extensions */
|
||||
if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
|
||||
return wext_handle_ioctl(net, &ifr, cmd, arg);
|
||||
return -ENOTTY;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -568,7 +568,7 @@ int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh,
|
|||
struct net *net = sock_net(skb->sk);
|
||||
struct fib_rule_hdr *frh = nlmsg_data(nlh);
|
||||
struct fib_rules_ops *ops = NULL;
|
||||
struct fib_rule *rule, *tmp;
|
||||
struct fib_rule *rule, *r;
|
||||
struct nlattr *tb[FRA_MAX+1];
|
||||
struct fib_kuid_range range;
|
||||
int err = -EINVAL;
|
||||
|
@ -668,16 +668,23 @@ int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh,
|
|||
|
||||
/*
|
||||
* Check if this rule is a target to any of them. If so,
|
||||
* adjust to the next one with the same preference or
|
||||
* disable them. As this operation is eventually very
|
||||
* expensive, it is only performed if goto rules have
|
||||
* actually been added.
|
||||
* expensive, it is only performed if goto rules, except
|
||||
* current if it is goto rule, have actually been added.
|
||||
*/
|
||||
if (ops->nr_goto_rules > 0) {
|
||||
list_for_each_entry(tmp, &ops->rules_list, list) {
|
||||
if (rtnl_dereference(tmp->ctarget) == rule) {
|
||||
RCU_INIT_POINTER(tmp->ctarget, NULL);
|
||||
struct fib_rule *n;
|
||||
|
||||
n = list_next_entry(rule, list);
|
||||
if (&n->list == &ops->rules_list || n->pref != rule->pref)
|
||||
n = NULL;
|
||||
list_for_each_entry(r, &ops->rules_list, list) {
|
||||
if (rtnl_dereference(r->ctarget) != rule)
|
||||
continue;
|
||||
rcu_assign_pointer(r->ctarget, n);
|
||||
if (!n)
|
||||
ops->unresolved_rules++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -931,6 +931,7 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev,
|
|||
+ nla_total_size(1) /* IFLA_LINKMODE */
|
||||
+ nla_total_size(4) /* IFLA_CARRIER_CHANGES */
|
||||
+ nla_total_size(4) /* IFLA_LINK_NETNSID */
|
||||
+ nla_total_size(4) /* IFLA_GROUP */
|
||||
+ nla_total_size(ext_filter_mask
|
||||
& RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */
|
||||
+ rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
|
||||
|
@ -1468,6 +1469,7 @@ static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
|
|||
[IFLA_LINK_NETNSID] = { .type = NLA_S32 },
|
||||
[IFLA_PROTO_DOWN] = { .type = NLA_U8 },
|
||||
[IFLA_XDP] = { .type = NLA_NESTED },
|
||||
[IFLA_GROUP] = { .type = NLA_U32 },
|
||||
};
|
||||
|
||||
static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
|
||||
|
|
|
@ -188,12 +188,6 @@ static inline void dnrt_free(struct dn_route *rt)
|
|||
call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
|
||||
}
|
||||
|
||||
static inline void dnrt_drop(struct dn_route *rt)
|
||||
{
|
||||
dst_release(&rt->dst);
|
||||
call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
|
||||
}
|
||||
|
||||
static void dn_dst_check_expire(unsigned long dummy)
|
||||
{
|
||||
int i;
|
||||
|
@ -248,7 +242,7 @@ static int dn_dst_gc(struct dst_ops *ops)
|
|||
}
|
||||
*rtp = rt->dst.dn_next;
|
||||
rt->dst.dn_next = NULL;
|
||||
dnrt_drop(rt);
|
||||
dnrt_free(rt);
|
||||
break;
|
||||
}
|
||||
spin_unlock_bh(&dn_rt_hash_table[i].lock);
|
||||
|
@ -350,7 +344,7 @@ static int dn_insert_route(struct dn_route *rt, unsigned int hash, struct dn_rou
|
|||
dst_use(&rth->dst, now);
|
||||
spin_unlock_bh(&dn_rt_hash_table[hash].lock);
|
||||
|
||||
dnrt_drop(rt);
|
||||
dst_free(&rt->dst);
|
||||
*rp = rth;
|
||||
return 0;
|
||||
}
|
||||
|
@ -380,7 +374,7 @@ static void dn_run_flush(unsigned long dummy)
|
|||
for(; rt; rt = next) {
|
||||
next = rcu_dereference_raw(rt->dst.dn_next);
|
||||
RCU_INIT_POINTER(rt->dst.dn_next, NULL);
|
||||
dst_free((struct dst_entry *)rt);
|
||||
dnrt_free(rt);
|
||||
}
|
||||
|
||||
nothing_to_declare:
|
||||
|
@ -1187,7 +1181,7 @@ make_route:
|
|||
if (dev_out->flags & IFF_LOOPBACK)
|
||||
flags |= RTCF_LOCAL;
|
||||
|
||||
rt = dst_alloc(&dn_dst_ops, dev_out, 1, DST_OBSOLETE_NONE, DST_HOST);
|
||||
rt = dst_alloc(&dn_dst_ops, dev_out, 0, DST_OBSOLETE_NONE, DST_HOST);
|
||||
if (rt == NULL)
|
||||
goto e_nobufs;
|
||||
|
||||
|
|
|
@ -1112,6 +1112,7 @@ static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im)
|
|||
pmc = kzalloc(sizeof(*pmc), GFP_KERNEL);
|
||||
if (!pmc)
|
||||
return;
|
||||
spin_lock_init(&pmc->lock);
|
||||
spin_lock_bh(&im->lock);
|
||||
pmc->interface = im->interface;
|
||||
in_dev_hold(in_dev);
|
||||
|
|
|
@ -446,6 +446,8 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
|
|||
return 0;
|
||||
|
||||
drop:
|
||||
if (tun_dst)
|
||||
dst_release((struct dst_entry *)tun_dst);
|
||||
kfree_skb(skb);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -332,9 +332,9 @@ static void addrconf_mod_rs_timer(struct inet6_dev *idev,
|
|||
static void addrconf_mod_dad_work(struct inet6_ifaddr *ifp,
|
||||
unsigned long delay)
|
||||
{
|
||||
if (!delayed_work_pending(&ifp->dad_work))
|
||||
in6_ifa_hold(ifp);
|
||||
mod_delayed_work(addrconf_wq, &ifp->dad_work, delay);
|
||||
in6_ifa_hold(ifp);
|
||||
if (mod_delayed_work(addrconf_wq, &ifp->dad_work, delay))
|
||||
in6_ifa_put(ifp);
|
||||
}
|
||||
|
||||
static int snmp6_alloc_dev(struct inet6_dev *idev)
|
||||
|
|
|
@ -32,7 +32,6 @@ struct fib6_rule {
|
|||
struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
|
||||
int flags, pol_lookup_t lookup)
|
||||
{
|
||||
struct rt6_info *rt;
|
||||
struct fib_lookup_arg arg = {
|
||||
.lookup_ptr = lookup,
|
||||
.flags = FIB_LOOKUP_NOREF,
|
||||
|
@ -44,21 +43,11 @@ struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
|
|||
fib_rules_lookup(net->ipv6.fib6_rules_ops,
|
||||
flowi6_to_flowi(fl6), flags, &arg);
|
||||
|
||||
rt = arg.result;
|
||||
if (arg.result)
|
||||
return arg.result;
|
||||
|
||||
if (!rt) {
|
||||
dst_hold(&net->ipv6.ip6_null_entry->dst);
|
||||
return &net->ipv6.ip6_null_entry->dst;
|
||||
}
|
||||
|
||||
if (rt->rt6i_flags & RTF_REJECT &&
|
||||
rt->dst.error == -EAGAIN) {
|
||||
ip6_rt_put(rt);
|
||||
rt = net->ipv6.ip6_null_entry;
|
||||
dst_hold(&rt->dst);
|
||||
}
|
||||
|
||||
return &rt->dst;
|
||||
dst_hold(&net->ipv6.ip6_null_entry->dst);
|
||||
return &net->ipv6.ip6_null_entry->dst;
|
||||
}
|
||||
|
||||
static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
|
||||
|
@ -121,7 +110,8 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
|
|||
flp6->saddr = saddr;
|
||||
}
|
||||
err = rt->dst.error;
|
||||
goto out;
|
||||
if (err != -EAGAIN)
|
||||
goto out;
|
||||
}
|
||||
again:
|
||||
ip6_rt_put(rt);
|
||||
|
|
|
@ -289,8 +289,7 @@ struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
|
|||
struct rt6_info *rt;
|
||||
|
||||
rt = lookup(net, net->ipv6.fib6_main_tbl, fl6, flags);
|
||||
if (rt->rt6i_flags & RTF_REJECT &&
|
||||
rt->dst.error == -EAGAIN) {
|
||||
if (rt->dst.error == -EAGAIN) {
|
||||
ip6_rt_put(rt);
|
||||
rt = net->ipv6.ip6_null_entry;
|
||||
dst_hold(&rt->dst);
|
||||
|
|
|
@ -858,6 +858,8 @@ static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
|
|||
return 0;
|
||||
|
||||
drop:
|
||||
if (tun_dst)
|
||||
dst_release((struct dst_entry *)tun_dst);
|
||||
kfree_skb(skb);
|
||||
return 0;
|
||||
}
|
||||
|
@ -1246,7 +1248,7 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
fl6.flowi6_proto = IPPROTO_IPIP;
|
||||
fl6.daddr = key->u.ipv6.dst;
|
||||
fl6.flowlabel = key->label;
|
||||
dsfield = ip6_tclass(key->label);
|
||||
dsfield = key->tos;
|
||||
} else {
|
||||
if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
|
||||
encap_limit = t->parms.encap_limit;
|
||||
|
@ -1317,7 +1319,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
fl6.flowi6_proto = IPPROTO_IPV6;
|
||||
fl6.daddr = key->u.ipv6.dst;
|
||||
fl6.flowlabel = key->label;
|
||||
dsfield = ip6_tclass(key->label);
|
||||
dsfield = key->tos;
|
||||
} else {
|
||||
offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
|
||||
/* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */
|
||||
|
|
|
@ -217,7 +217,7 @@ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ,
|
|||
unsigned int *_toklen)
|
||||
{
|
||||
const __be32 *xdr = *_xdr;
|
||||
unsigned int toklen = *_toklen, n_parts, loop, tmp;
|
||||
unsigned int toklen = *_toklen, n_parts, loop, tmp, paddedlen;
|
||||
|
||||
/* there must be at least one name, and at least #names+1 length
|
||||
* words */
|
||||
|
@ -247,16 +247,16 @@ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ,
|
|||
toklen -= 4;
|
||||
if (tmp <= 0 || tmp > AFSTOKEN_STRING_MAX)
|
||||
return -EINVAL;
|
||||
if (tmp > toklen)
|
||||
paddedlen = (tmp + 3) & ~3;
|
||||
if (paddedlen > toklen)
|
||||
return -EINVAL;
|
||||
princ->name_parts[loop] = kmalloc(tmp + 1, GFP_KERNEL);
|
||||
if (!princ->name_parts[loop])
|
||||
return -ENOMEM;
|
||||
memcpy(princ->name_parts[loop], xdr, tmp);
|
||||
princ->name_parts[loop][tmp] = 0;
|
||||
tmp = (tmp + 3) & ~3;
|
||||
toklen -= tmp;
|
||||
xdr += tmp >> 2;
|
||||
toklen -= paddedlen;
|
||||
xdr += paddedlen >> 2;
|
||||
}
|
||||
|
||||
if (toklen < 4)
|
||||
|
@ -265,16 +265,16 @@ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ,
|
|||
toklen -= 4;
|
||||
if (tmp <= 0 || tmp > AFSTOKEN_K5_REALM_MAX)
|
||||
return -EINVAL;
|
||||
if (tmp > toklen)
|
||||
paddedlen = (tmp + 3) & ~3;
|
||||
if (paddedlen > toklen)
|
||||
return -EINVAL;
|
||||
princ->realm = kmalloc(tmp + 1, GFP_KERNEL);
|
||||
if (!princ->realm)
|
||||
return -ENOMEM;
|
||||
memcpy(princ->realm, xdr, tmp);
|
||||
princ->realm[tmp] = 0;
|
||||
tmp = (tmp + 3) & ~3;
|
||||
toklen -= tmp;
|
||||
xdr += tmp >> 2;
|
||||
toklen -= paddedlen;
|
||||
xdr += paddedlen >> 2;
|
||||
|
||||
_debug("%s/...@%s", princ->name_parts[0], princ->realm);
|
||||
|
||||
|
@ -293,7 +293,7 @@ static int rxrpc_krb5_decode_tagged_data(struct krb5_tagged_data *td,
|
|||
unsigned int *_toklen)
|
||||
{
|
||||
const __be32 *xdr = *_xdr;
|
||||
unsigned int toklen = *_toklen, len;
|
||||
unsigned int toklen = *_toklen, len, paddedlen;
|
||||
|
||||
/* there must be at least one tag and one length word */
|
||||
if (toklen <= 8)
|
||||
|
@ -307,15 +307,17 @@ static int rxrpc_krb5_decode_tagged_data(struct krb5_tagged_data *td,
|
|||
toklen -= 8;
|
||||
if (len > max_data_size)
|
||||
return -EINVAL;
|
||||
paddedlen = (len + 3) & ~3;
|
||||
if (paddedlen > toklen)
|
||||
return -EINVAL;
|
||||
td->data_len = len;
|
||||
|
||||
if (len > 0) {
|
||||
td->data = kmemdup(xdr, len, GFP_KERNEL);
|
||||
if (!td->data)
|
||||
return -ENOMEM;
|
||||
len = (len + 3) & ~3;
|
||||
toklen -= len;
|
||||
xdr += len >> 2;
|
||||
toklen -= paddedlen;
|
||||
xdr += paddedlen >> 2;
|
||||
}
|
||||
|
||||
_debug("tag %x len %x", td->tag, td->data_len);
|
||||
|
@ -387,7 +389,7 @@ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen,
|
|||
const __be32 **_xdr, unsigned int *_toklen)
|
||||
{
|
||||
const __be32 *xdr = *_xdr;
|
||||
unsigned int toklen = *_toklen, len;
|
||||
unsigned int toklen = *_toklen, len, paddedlen;
|
||||
|
||||
/* there must be at least one length word */
|
||||
if (toklen <= 4)
|
||||
|
@ -399,6 +401,9 @@ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen,
|
|||
toklen -= 4;
|
||||
if (len > AFSTOKEN_K5_TIX_MAX)
|
||||
return -EINVAL;
|
||||
paddedlen = (len + 3) & ~3;
|
||||
if (paddedlen > toklen)
|
||||
return -EINVAL;
|
||||
*_tktlen = len;
|
||||
|
||||
_debug("ticket len %u", len);
|
||||
|
@ -407,9 +412,8 @@ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen,
|
|||
*_ticket = kmemdup(xdr, len, GFP_KERNEL);
|
||||
if (!*_ticket)
|
||||
return -ENOMEM;
|
||||
len = (len + 3) & ~3;
|
||||
toklen -= len;
|
||||
xdr += len >> 2;
|
||||
toklen -= paddedlen;
|
||||
xdr += paddedlen >> 2;
|
||||
}
|
||||
|
||||
*_xdr = xdr;
|
||||
|
@ -552,7 +556,7 @@ static int rxrpc_preparse_xdr(struct key_preparsed_payload *prep)
|
|||
{
|
||||
const __be32 *xdr = prep->data, *token;
|
||||
const char *cp;
|
||||
unsigned int len, tmp, loop, ntoken, toklen, sec_ix;
|
||||
unsigned int len, paddedlen, loop, ntoken, toklen, sec_ix;
|
||||
size_t datalen = prep->datalen;
|
||||
int ret;
|
||||
|
||||
|
@ -578,22 +582,21 @@ static int rxrpc_preparse_xdr(struct key_preparsed_payload *prep)
|
|||
if (len < 1 || len > AFSTOKEN_CELL_MAX)
|
||||
goto not_xdr;
|
||||
datalen -= 4;
|
||||
tmp = (len + 3) & ~3;
|
||||
if (tmp > datalen)
|
||||
paddedlen = (len + 3) & ~3;
|
||||
if (paddedlen > datalen)
|
||||
goto not_xdr;
|
||||
|
||||
cp = (const char *) xdr;
|
||||
for (loop = 0; loop < len; loop++)
|
||||
if (!isprint(cp[loop]))
|
||||
goto not_xdr;
|
||||
if (len < tmp)
|
||||
for (; loop < tmp; loop++)
|
||||
if (cp[loop])
|
||||
goto not_xdr;
|
||||
for (; loop < paddedlen; loop++)
|
||||
if (cp[loop])
|
||||
goto not_xdr;
|
||||
_debug("cellname: [%u/%u] '%*.*s'",
|
||||
len, tmp, len, len, (const char *) xdr);
|
||||
datalen -= tmp;
|
||||
xdr += tmp >> 2;
|
||||
len, paddedlen, len, len, (const char *) xdr);
|
||||
datalen -= paddedlen;
|
||||
xdr += paddedlen >> 2;
|
||||
|
||||
/* get the token count */
|
||||
if (datalen < 12)
|
||||
|
@ -614,10 +617,11 @@ static int rxrpc_preparse_xdr(struct key_preparsed_payload *prep)
|
|||
sec_ix = ntohl(*xdr);
|
||||
datalen -= 4;
|
||||
_debug("token: [%x/%zx] %x", toklen, datalen, sec_ix);
|
||||
if (toklen < 20 || toklen > datalen)
|
||||
paddedlen = (toklen + 3) & ~3;
|
||||
if (toklen < 20 || toklen > datalen || paddedlen > datalen)
|
||||
goto not_xdr;
|
||||
datalen -= (toklen + 3) & ~3;
|
||||
xdr += (toklen + 3) >> 2;
|
||||
datalen -= paddedlen;
|
||||
xdr += paddedlen >> 2;
|
||||
|
||||
} while (--loop > 0);
|
||||
|
||||
|
|
|
@ -275,6 +275,7 @@ static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
|
|||
if (sctp_sk(sk)->bind_hash)
|
||||
sctp_put_port(sk);
|
||||
|
||||
sctp_sk(sk)->ep = NULL;
|
||||
sock_put(sk);
|
||||
}
|
||||
|
||||
|
|
|
@ -278,7 +278,6 @@ out:
|
|||
|
||||
static int sctp_sock_dump(struct sock *sk, void *p)
|
||||
{
|
||||
struct sctp_endpoint *ep = sctp_sk(sk)->ep;
|
||||
struct sctp_comm_param *commp = p;
|
||||
struct sk_buff *skb = commp->skb;
|
||||
struct netlink_callback *cb = commp->cb;
|
||||
|
@ -287,7 +286,9 @@ static int sctp_sock_dump(struct sock *sk, void *p)
|
|||
int err = 0;
|
||||
|
||||
lock_sock(sk);
|
||||
list_for_each_entry(assoc, &ep->asocs, asocs) {
|
||||
if (!sctp_sk(sk)->ep)
|
||||
goto release;
|
||||
list_for_each_entry(assoc, &sctp_sk(sk)->ep->asocs, asocs) {
|
||||
if (cb->args[4] < cb->args[1])
|
||||
goto next;
|
||||
|
||||
|
|
|
@ -4666,9 +4666,8 @@ int sctp_for_each_transport(int (*cb)(struct sctp_transport *, void *),
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
sctp_transport_get_idx(net, &hti, pos);
|
||||
obj = sctp_transport_get_next(net, &hti);
|
||||
for (; obj && !IS_ERR(obj); obj = sctp_transport_get_next(net, &hti)) {
|
||||
obj = sctp_transport_get_idx(net, &hti, pos + 1);
|
||||
for (; !IS_ERR_OR_NULL(obj); obj = sctp_transport_get_next(net, &hti)) {
|
||||
struct sctp_transport *transport = obj;
|
||||
|
||||
if (!sctp_transport_hold(transport))
|
||||
|
|
|
@ -914,13 +914,12 @@ int call_commit_handler(struct net_device *dev)
|
|||
* Main IOCTl dispatcher.
|
||||
* Check the type of IOCTL and call the appropriate wrapper...
|
||||
*/
|
||||
static int wireless_process_ioctl(struct net *net, struct ifreq *ifr,
|
||||
static int wireless_process_ioctl(struct net *net, struct iwreq *iwr,
|
||||
unsigned int cmd,
|
||||
struct iw_request_info *info,
|
||||
wext_ioctl_func standard,
|
||||
wext_ioctl_func private)
|
||||
{
|
||||
struct iwreq *iwr = (struct iwreq *) ifr;
|
||||
struct net_device *dev;
|
||||
iw_handler handler;
|
||||
|
||||
|
@ -928,7 +927,7 @@ static int wireless_process_ioctl(struct net *net, struct ifreq *ifr,
|
|||
* The copy_to/from_user() of ifr is also dealt with in there */
|
||||
|
||||
/* Make sure the device exist */
|
||||
if ((dev = __dev_get_by_name(net, ifr->ifr_name)) == NULL)
|
||||
if ((dev = __dev_get_by_name(net, iwr->ifr_name)) == NULL)
|
||||
return -ENODEV;
|
||||
|
||||
/* A bunch of special cases, then the generic case...
|
||||
|
@ -957,9 +956,6 @@ static int wireless_process_ioctl(struct net *net, struct ifreq *ifr,
|
|||
else if (private)
|
||||
return private(dev, iwr, cmd, info, handler);
|
||||
}
|
||||
/* Old driver API : call driver ioctl handler */
|
||||
if (dev->netdev_ops->ndo_do_ioctl)
|
||||
return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd);
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
|
@ -977,7 +973,7 @@ static int wext_permission_check(unsigned int cmd)
|
|||
}
|
||||
|
||||
/* entry point from dev ioctl */
|
||||
static int wext_ioctl_dispatch(struct net *net, struct ifreq *ifr,
|
||||
static int wext_ioctl_dispatch(struct net *net, struct iwreq *iwr,
|
||||
unsigned int cmd, struct iw_request_info *info,
|
||||
wext_ioctl_func standard,
|
||||
wext_ioctl_func private)
|
||||
|
@ -987,9 +983,9 @@ static int wext_ioctl_dispatch(struct net *net, struct ifreq *ifr,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
dev_load(net, ifr->ifr_name);
|
||||
dev_load(net, iwr->ifr_name);
|
||||
rtnl_lock();
|
||||
ret = wireless_process_ioctl(net, ifr, cmd, info, standard, private);
|
||||
ret = wireless_process_ioctl(net, iwr, cmd, info, standard, private);
|
||||
rtnl_unlock();
|
||||
|
||||
return ret;
|
||||
|
@ -1039,18 +1035,18 @@ static int ioctl_standard_call(struct net_device * dev,
|
|||
}
|
||||
|
||||
|
||||
int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd,
|
||||
int wext_handle_ioctl(struct net *net, struct iwreq *iwr, unsigned int cmd,
|
||||
void __user *arg)
|
||||
{
|
||||
struct iw_request_info info = { .cmd = cmd, .flags = 0 };
|
||||
int ret;
|
||||
|
||||
ret = wext_ioctl_dispatch(net, ifr, cmd, &info,
|
||||
ret = wext_ioctl_dispatch(net, iwr, cmd, &info,
|
||||
ioctl_standard_call,
|
||||
ioctl_private_call);
|
||||
if (ret >= 0 &&
|
||||
IW_IS_GET(cmd) &&
|
||||
copy_to_user(arg, ifr, sizeof(struct iwreq)))
|
||||
copy_to_user(arg, iwr, sizeof(struct iwreq)))
|
||||
return -EFAULT;
|
||||
|
||||
return ret;
|
||||
|
@ -1107,7 +1103,7 @@ int compat_wext_handle_ioctl(struct net *net, unsigned int cmd,
|
|||
info.cmd = cmd;
|
||||
info.flags = IW_REQUEST_FLAG_COMPAT;
|
||||
|
||||
ret = wext_ioctl_dispatch(net, (struct ifreq *) &iwr, cmd, &info,
|
||||
ret = wext_ioctl_dispatch(net, &iwr, cmd, &info,
|
||||
compat_standard_call,
|
||||
compat_private_call);
|
||||
|
||||
|
|
Loading…
Reference in New Issue