Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Pull networking fixes from David Miller:

 1) Fix memory leak in xfrm_state code, from Steffen Klassert.

 2) Fix races between devlink reload operations and device
    setup/cleanup, from Jiri Pirko.

 3) Null deref in NFC code, from Stephan Gerhold.

 4) Refcount fixes in SMC, from Ursula Braun.

 5) Memory leak in slcan open error paths, from Jouni Hogander.

 6) Fix ETS bandwidth validation in hns3, from Yonglong Liu.

 7) Info leak on short USB request answers in ax88172a driver, from
    Oliver Neukum.

 8) Release mem region properly in ep93xx_eth, from Chuhong Yuan.

 9) PTP config timestamp flags validation, from Richard Cochran.

10) Dangling pointers after SKB data realloc in seg6, from Andrea Mayer.

11) Missing free_netdev() in gemini driver, from Chuhong Yuan.

* git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (56 commits)
  ipmr: Fix skb headroom in ipmr_get_route().
  net: hns3: cleanup of stray struct hns3_link_mode_mapping
  net/smc: fix fastopen for non-blocking connect()
  rds: ib: update WR sizes when bringing up connection
  net: gemini: add missed free_netdev
  net: dsa: tag_8021q: Fix dsa_8021q_restore_pvid for an absent pvid
  seg6: fix skb transport_header after decap_and_validate()
  seg6: fix srh pointer in get_srh()
  net: stmmac: Use the correct style for SPDX License Identifier
  octeontx2-af: Use the correct style for SPDX License Identifier
  ptp: Extend the test program to check the external time stamp flags.
  mlx5: Reject requests to enable time stamping on both edges.
  igb: Reject requests that fail to enable time stamping on both edges.
  dp83640: Reject requests to enable time stamping on both edges.
  mv88e6xxx: Reject requests to enable time stamping on both edges.
  ptp: Introduce strict checking of external time stamp options.
  renesas: reject unsupported external timestamp flags
  mlx5: reject unsupported external timestamp flags
  igb: reject unsupported external timestamp flags
  dp83640: reject unsupported external timestamp flags
  ...
This commit is contained in:
Linus Torvalds 2019-11-16 15:52:00 -08:00
commit 8be636dd8a
62 changed files with 482 additions and 125 deletions

View File

@ -617,6 +617,7 @@ err_free_chan:
sl->tty = NULL;
tty->disc_data = NULL;
clear_bit(SLF_INUSE, &sl->flags);
free_netdev(sl->dev);
err_exit:
rtnl_unlock();

View File

@ -273,6 +273,19 @@ static int mv88e6352_ptp_enable_extts(struct mv88e6xxx_chip *chip,
int pin;
int err;
/* Reject requests with unsupported flags */
if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
PTP_RISING_EDGE |
PTP_FALLING_EDGE |
PTP_STRICT_FLAGS))
return -EOPNOTSUPP;
/* Reject requests to enable time stamping on both edges. */
if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
(rq->extts.flags & PTP_ENABLE_FEATURE) &&
(rq->extts.flags & PTP_EXTTS_EDGES) == PTP_EXTTS_EDGES)
return -EOPNOTSUPP;
pin = ptp_find_pin(chip->ptp_clock, PTP_PF_EXTTS, rq->extts.index);
if (pin < 0)

View File

@ -6280,6 +6280,10 @@ static int tg3_ptp_enable(struct ptp_clock_info *ptp,
switch (rq->type) {
case PTP_CLK_REQ_PEROUT:
/* Reject requests with unsupported flags */
if (rq->perout.flags)
return -EOPNOTSUPP;
if (rq->perout.index != 0)
return -EINVAL;

View File

@ -763,6 +763,7 @@ static int ep93xx_eth_remove(struct platform_device *pdev)
{
struct net_device *dev;
struct ep93xx_priv *ep;
struct resource *mem;
dev = platform_get_drvdata(pdev);
if (dev == NULL)
@ -778,8 +779,8 @@ static int ep93xx_eth_remove(struct platform_device *pdev)
iounmap(ep->base_addr);
if (ep->res != NULL) {
release_resource(ep->res);
kfree(ep->res);
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(mem->start, resource_size(mem));
}
free_netdev(dev);

View File

@ -2524,6 +2524,7 @@ static int gemini_ethernet_port_remove(struct platform_device *pdev)
struct gemini_ethernet_port *port = platform_get_drvdata(pdev);
gemini_port_remove(port);
free_netdev(port->netdev);
return 0;
}

View File

@ -2232,8 +2232,16 @@ err_set_cdan:
err_service_reg:
free_channel(priv, channel);
err_alloc_ch:
if (err == -EPROBE_DEFER)
if (err == -EPROBE_DEFER) {
for (i = 0; i < priv->num_channels; i++) {
channel = priv->channel[i];
nctx = &channel->nctx;
dpaa2_io_service_deregister(channel->dpio, nctx, dev);
free_channel(priv, channel);
}
priv->num_channels = 0;
return err;
}
if (cpumask_empty(&priv->dpio_cpumask)) {
dev_err(dev, "No cpu with an affine DPIO/DPCON\n");

View File

@ -70,11 +70,6 @@ static const struct hns3_stats hns3_rxq_stats[] = {
#define HNS3_NIC_LB_TEST_TX_CNT_ERR 2
#define HNS3_NIC_LB_TEST_RX_CNT_ERR 3
struct hns3_link_mode_mapping {
u32 hns3_link_mode;
u32 ethtool_link_mode;
};
static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en)
{
struct hnae3_handle *h = hns3_get_handle(ndev);

View File

@ -124,7 +124,7 @@ static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets,
if (ret)
return ret;
for (i = 0; i < HNAE3_MAX_TC; i++) {
for (i = 0; i < hdev->tc_max; i++) {
switch (ets->tc_tsa[i]) {
case IEEE_8021QAZ_TSA_STRICT:
if (hdev->tm_info.tc_info[i].tc_sch_mode !=
@ -318,6 +318,7 @@ static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
struct net_device *netdev = h->kinfo.netdev;
struct hclge_dev *hdev = vport->back;
u8 i, j, pfc_map, *prio_tc;
int ret;
if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
@ -347,7 +348,21 @@ static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
hclge_tm_pfc_info_update(hdev);
return hclge_pause_setup_hw(hdev, false);
ret = hclge_pause_setup_hw(hdev, false);
if (ret)
return ret;
ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
if (ret)
return ret;
ret = hclge_buffer_alloc(hdev);
if (ret) {
hclge_notify_client(hdev, HNAE3_UP_CLIENT);
return ret;
}
return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
}
/* DCBX configuration */

View File

@ -6263,11 +6263,23 @@ static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
/* read current config parameter */
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
false);
true);
req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
req->func_id = cpu_to_le32(func_id);
req->switch_param = switch_param;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
"read mac vlan switch parameter fail, ret = %d\n", ret);
return ret;
}
/* modify and write new config parameter */
hclge_cmd_reuse_desc(&desc, false);
req->switch_param = (req->switch_param & param_mask) | switch_param;
req->param_mask = param_mask;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);

View File

@ -521,6 +521,19 @@ static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp,
switch (rq->type) {
case PTP_CLK_REQ_EXTTS:
/* Reject requests with unsupported flags */
if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
PTP_RISING_EDGE |
PTP_FALLING_EDGE |
PTP_STRICT_FLAGS))
return -EOPNOTSUPP;
/* Reject requests failing to enable both edges. */
if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
(rq->extts.flags & PTP_ENABLE_FEATURE) &&
(rq->extts.flags & PTP_EXTTS_EDGES) != PTP_EXTTS_EDGES)
return -EOPNOTSUPP;
if (on) {
pin = ptp_find_pin(igb->ptp_clock, PTP_PF_EXTTS,
rq->extts.index);
@ -551,6 +564,10 @@ static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp,
return 0;
case PTP_CLK_REQ_PEROUT:
/* Reject requests with unsupported flags */
if (rq->perout.flags)
return -EOPNOTSUPP;
if (on) {
pin = ptp_find_pin(igb->ptp_clock, PTP_PF_PEROUT,
rq->perout.index);

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0
* Marvell OcteonTx2 CGX driver
/* SPDX-License-Identifier: GPL-2.0 */
/* Marvell OcteonTx2 CGX driver
*
* Copyright (C) 2018 Marvell International Ltd.
*

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0
* Marvell OcteonTx2 CGX driver
/* SPDX-License-Identifier: GPL-2.0 */
/* Marvell OcteonTx2 CGX driver
*
* Copyright (C) 2018 Marvell International Ltd.
*

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0
* Marvell OcteonTx2 RVU Admin Function driver
/* SPDX-License-Identifier: GPL-2.0 */
/* Marvell OcteonTx2 RVU Admin Function driver
*
* Copyright (C) 2018 Marvell International Ltd.
*

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0
* Marvell OcteonTx2 RVU Admin Function driver
/* SPDX-License-Identifier: GPL-2.0 */
/* Marvell OcteonTx2 RVU Admin Function driver
*
* Copyright (C) 2018 Marvell International Ltd.
*

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0
* Marvell OcteonTx2 RVU Admin Function driver
/* SPDX-License-Identifier: GPL-2.0 */
/* Marvell OcteonTx2 RVU Admin Function driver
*
* Copyright (C) 2018 Marvell International Ltd.
*

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0
* Marvell OcteonTx2 RVU Admin Function driver
/* SPDX-License-Identifier: GPL-2.0 */
/* Marvell OcteonTx2 RVU Admin Function driver
*
* Copyright (C) 2018 Marvell International Ltd.
*

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0
* Marvell OcteonTx2 RVU Admin Function driver
/* SPDX-License-Identifier: GPL-2.0 */
/* Marvell OcteonTx2 RVU Admin Function driver
*
* Copyright (C) 2018 Marvell International Ltd.
*

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0
* Marvell OcteonTx2 RVU Admin Function driver
/* SPDX-License-Identifier: GPL-2.0 */
/* Marvell OcteonTx2 RVU Admin Function driver
*
* Copyright (C) 2018 Marvell International Ltd.
*

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0
* Marvell OcteonTx2 RVU Admin Function driver
/* SPDX-License-Identifier: GPL-2.0 */
/* Marvell OcteonTx2 RVU Admin Function driver
*
* Copyright (C) 2018 Marvell International Ltd.
*

View File

@ -4010,6 +4010,7 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_params_unregister;
devlink_params_publish(devlink);
devlink_reload_enable(devlink);
pci_save_state(pdev);
return 0;
@ -4121,6 +4122,8 @@ static void mlx4_remove_one(struct pci_dev *pdev)
struct devlink *devlink = priv_to_devlink(priv);
int active_vfs = 0;
devlink_reload_disable(devlink);
if (mlx4_is_slave(dev))
persist->interface_state |= MLX4_INTERFACE_STATE_NOWAIT;

View File

@ -236,6 +236,19 @@ static int mlx5_extts_configure(struct ptp_clock_info *ptp,
if (!MLX5_PPS_CAP(mdev))
return -EOPNOTSUPP;
/* Reject requests with unsupported flags */
if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
PTP_RISING_EDGE |
PTP_FALLING_EDGE |
PTP_STRICT_FLAGS))
return -EOPNOTSUPP;
/* Reject requests to enable time stamping on both edges. */
if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
(rq->extts.flags & PTP_ENABLE_FEATURE) &&
(rq->extts.flags & PTP_EXTTS_EDGES) == PTP_EXTTS_EDGES)
return -EOPNOTSUPP;
if (rq->extts.index >= clock->ptp_info.n_pins)
return -EINVAL;
@ -290,6 +303,10 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
if (!MLX5_PPS_CAP(mdev))
return -EOPNOTSUPP;
/* Reject requests with unsupported flags */
if (rq->perout.flags)
return -EOPNOTSUPP;
if (rq->perout.index >= clock->ptp_info.n_pins)
return -EINVAL;

View File

@ -1189,6 +1189,9 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
if (mlxsw_driver->params_register)
devlink_params_publish(devlink);
if (!reload)
devlink_reload_enable(devlink);
return 0;
err_thermal_init:
@ -1249,6 +1252,8 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
{
struct devlink *devlink = priv_to_devlink(mlxsw_core);
if (!reload)
devlink_reload_disable(devlink);
if (devlink_is_reload_failed(devlink)) {
if (!reload)
/* Only the parts that were not de-initialized in the

View File

@ -429,6 +429,10 @@ static int lan743x_ptp_perout(struct lan743x_adapter *adapter, int on,
int pulse_width = 0;
int perout_bit = 0;
/* Reject requests with unsupported flags */
if (perout->flags)
return -EOPNOTSUPP;
if (!on) {
lan743x_ptp_perout_off(adapter);
return 0;

View File

@ -955,6 +955,8 @@ enum RAVB_QUEUE {
#define NUM_RX_QUEUE 2
#define NUM_TX_QUEUE 2
#define RX_BUF_SZ (2048 - ETH_FCS_LEN + sizeof(__sum16))
/* TX descriptors per packet */
#define NUM_TX_DESC_GEN2 2
#define NUM_TX_DESC_GEN3 1
@ -1018,7 +1020,6 @@ struct ravb_private {
u32 dirty_rx[NUM_RX_QUEUE]; /* Producer ring indices */
u32 cur_tx[NUM_TX_QUEUE];
u32 dirty_tx[NUM_TX_QUEUE];
u32 rx_buf_sz; /* Based on MTU+slack. */
struct napi_struct napi[NUM_RX_QUEUE];
struct work_struct work;
/* MII transceiver section. */

View File

@ -230,7 +230,7 @@ static void ravb_ring_free(struct net_device *ndev, int q)
le32_to_cpu(desc->dptr)))
dma_unmap_single(ndev->dev.parent,
le32_to_cpu(desc->dptr),
priv->rx_buf_sz,
RX_BUF_SZ,
DMA_FROM_DEVICE);
}
ring_size = sizeof(struct ravb_ex_rx_desc) *
@ -293,9 +293,9 @@ static void ravb_ring_format(struct net_device *ndev, int q)
for (i = 0; i < priv->num_rx_ring[q]; i++) {
/* RX descriptor */
rx_desc = &priv->rx_ring[q][i];
rx_desc->ds_cc = cpu_to_le16(priv->rx_buf_sz);
rx_desc->ds_cc = cpu_to_le16(RX_BUF_SZ);
dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
priv->rx_buf_sz,
RX_BUF_SZ,
DMA_FROM_DEVICE);
/* We just set the data size to 0 for a failed mapping which
* should prevent DMA from happening...
@ -342,9 +342,6 @@ static int ravb_ring_init(struct net_device *ndev, int q)
int ring_size;
int i;
priv->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ : ndev->mtu) +
ETH_HLEN + VLAN_HLEN + sizeof(__sum16);
/* Allocate RX and TX skb rings */
priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q],
sizeof(*priv->rx_skb[q]), GFP_KERNEL);
@ -354,7 +351,7 @@ static int ravb_ring_init(struct net_device *ndev, int q)
goto error;
for (i = 0; i < priv->num_rx_ring[q]; i++) {
skb = netdev_alloc_skb(ndev, priv->rx_buf_sz + RAVB_ALIGN - 1);
skb = netdev_alloc_skb(ndev, RX_BUF_SZ + RAVB_ALIGN - 1);
if (!skb)
goto error;
ravb_set_buffer_align(skb);
@ -584,7 +581,7 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
skb = priv->rx_skb[q][entry];
priv->rx_skb[q][entry] = NULL;
dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
priv->rx_buf_sz,
RX_BUF_SZ,
DMA_FROM_DEVICE);
get_ts &= (q == RAVB_NC) ?
RAVB_RXTSTAMP_TYPE_V2_L2_EVENT :
@ -617,11 +614,11 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
desc = &priv->rx_ring[q][entry];
desc->ds_cc = cpu_to_le16(priv->rx_buf_sz);
desc->ds_cc = cpu_to_le16(RX_BUF_SZ);
if (!priv->rx_skb[q][entry]) {
skb = netdev_alloc_skb(ndev,
priv->rx_buf_sz +
RX_BUF_SZ +
RAVB_ALIGN - 1);
if (!skb)
break; /* Better luck next round. */
@ -1801,10 +1798,15 @@ static int ravb_do_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
static int ravb_change_mtu(struct net_device *ndev, int new_mtu)
{
if (netif_running(ndev))
return -EBUSY;
struct ravb_private *priv = netdev_priv(ndev);
ndev->mtu = new_mtu;
if (netif_running(ndev)) {
synchronize_irq(priv->emac_irq);
ravb_emac_init(ndev);
}
netdev_update_features(ndev);
return 0;

View File

@ -182,6 +182,13 @@ static int ravb_ptp_extts(struct ptp_clock_info *ptp,
struct net_device *ndev = priv->ndev;
unsigned long flags;
/* Reject requests with unsupported flags */
if (req->flags & ~(PTP_ENABLE_FEATURE |
PTP_RISING_EDGE |
PTP_FALLING_EDGE |
PTP_STRICT_FLAGS))
return -EOPNOTSUPP;
if (req->index)
return -EINVAL;
@ -211,6 +218,10 @@ static int ravb_ptp_perout(struct ptp_clock_info *ptp,
unsigned long flags;
int error = 0;
/* Reject requests with unsupported flags */
if (req->flags)
return -EOPNOTSUPP;
if (req->index)
return -EINVAL;

View File

@ -1226,7 +1226,7 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
dwmac_mux:
sun8i_dwmac_unset_syscon(gmac);
dwmac_exit:
sun8i_dwmac_exit(pdev, plat_dat->bsp_priv);
stmmac_pltfr_remove(pdev);
return ret;
}

View File

@ -1,4 +1,4 @@
// SPDX-License-Identifier: (GPL-2.0 OR MIT)
/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
// Copyright (c) 2017 Synopsys, Inc. and/or its affiliates.
// stmmac Support for 5.xx Ethernet QoS cores

View File

@ -1,4 +1,4 @@
// SPDX-License-Identifier: (GPL-2.0 OR MIT)
/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
/*
* Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
* stmmac XGMAC definitions.

View File

@ -1,4 +1,4 @@
// SPDX-License-Identifier: (GPL-2.0 OR MIT)
/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
// Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
// stmmac HW Interface Callbacks

View File

@ -140,6 +140,10 @@ static int stmmac_enable(struct ptp_clock_info *ptp,
switch (rq->type) {
case PTP_CLK_REQ_PEROUT:
/* Reject requests with unsupported flags */
if (rq->perout.flags)
return -EOPNOTSUPP;
cfg = &priv->pps[rq->perout.index];
cfg->start.tv_sec = rq->perout.start.sec;

View File

@ -708,6 +708,7 @@ nsim_dev_create(struct nsim_bus_dev *nsim_bus_dev, unsigned int port_count)
goto err_debugfs_exit;
devlink_params_publish(devlink);
devlink_reload_enable(devlink);
return nsim_dev;
err_debugfs_exit:
@ -732,6 +733,7 @@ static void nsim_dev_destroy(struct nsim_dev *nsim_dev)
{
struct devlink *devlink = priv_to_devlink(nsim_dev);
devlink_reload_disable(devlink);
nsim_bpf_dev_exit(nsim_dev);
nsim_dev_debugfs_exit(nsim_dev);
nsim_dev_traps_exit(devlink);

View File

@ -469,6 +469,19 @@ static int ptp_dp83640_enable(struct ptp_clock_info *ptp,
switch (rq->type) {
case PTP_CLK_REQ_EXTTS:
/* Reject requests with unsupported flags */
if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
PTP_RISING_EDGE |
PTP_FALLING_EDGE |
PTP_STRICT_FLAGS))
return -EOPNOTSUPP;
/* Reject requests to enable time stamping on both edges. */
if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
(rq->extts.flags & PTP_ENABLE_FEATURE) &&
(rq->extts.flags & PTP_EXTTS_EDGES) == PTP_EXTTS_EDGES)
return -EOPNOTSUPP;
index = rq->extts.index;
if (index >= N_EXT_TS)
return -EINVAL;
@ -491,6 +504,9 @@ static int ptp_dp83640_enable(struct ptp_clock_info *ptp,
return 0;
case PTP_CLK_REQ_PEROUT:
/* Reject requests with unsupported flags */
if (rq->perout.flags)
return -EOPNOTSUPP;
if (rq->perout.index >= N_PER_OUT)
return -EINVAL;
return periodic_output(clock, rq, on, rq->perout.index);

View File

@ -64,11 +64,12 @@ static int mdiobus_register_reset(struct mdio_device *mdiodev)
if (mdiodev->dev.of_node)
reset = devm_reset_control_get_exclusive(&mdiodev->dev,
"phy");
if (PTR_ERR(reset) == -ENOENT ||
PTR_ERR(reset) == -ENOTSUPP)
if (IS_ERR(reset)) {
if (PTR_ERR(reset) == -ENOENT || PTR_ERR(reset) == -ENOSYS)
reset = NULL;
else if (IS_ERR(reset))
else
return PTR_ERR(reset);
}
mdiodev->reset_ctrl = reset;

View File

@ -855,6 +855,7 @@ err_free_chan:
sl->tty = NULL;
tty->disc_data = NULL;
clear_bit(SLF_INUSE, &sl->flags);
free_netdev(sl->dev);
err_exit:
rtnl_unlock();

View File

@ -196,7 +196,7 @@ static int ax88172a_bind(struct usbnet *dev, struct usb_interface *intf)
/* Get the MAC address */
ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf, 0);
if (ret < 0) {
if (ret < ETH_ALEN) {
netdev_err(dev->net, "Failed to read MAC address: %d\n", ret);
goto free;
}

View File

@ -579,7 +579,7 @@ static void cdc_ncm_set_dgram_size(struct usbnet *dev, int new_size)
err = usbnet_read_cmd(dev, USB_CDC_GET_MAX_DATAGRAM_SIZE,
USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE,
0, iface_no, &max_datagram_size, sizeof(max_datagram_size));
if (err < sizeof(max_datagram_size)) {
if (err != sizeof(max_datagram_size)) {
dev_dbg(&dev->intf->dev, "GET_MAX_DATAGRAM_SIZE failed\n");
goto out;
}

View File

@ -1371,6 +1371,8 @@ static const struct usb_device_id products[] = {
{QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */
{QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */
{QMI_QUIRK_SET_DTR(0x2cb7, 0x0104, 4)}, /* Fibocom NL678 series */
{QMI_FIXED_INTF(0x0489, 0xe0b4, 0)}, /* Foxconn T77W968 LTE */
{QMI_FIXED_INTF(0x0489, 0xe0b5, 0)}, /* Foxconn T77W968 LTE with eSIM support*/
/* 4. Gobi 1000 devices */
{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */

View File

@ -251,27 +251,23 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
struct ieee80211_hdr *hdr = (void *)skb->data;
unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
unsigned int mss = skb_shinfo(skb)->gso_size;
u16 length, iv_len, amsdu_pad;
u16 length, amsdu_pad;
u8 *start_hdr;
struct iwl_tso_hdr_page *hdr_page;
struct page **page_ptr;
struct tso_t tso;
/* if the packet is protected, then it must be CCMP or GCMP */
iv_len = ieee80211_has_protected(hdr->frame_control) ?
IEEE80211_CCMP_HDR_LEN : 0;
trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd),
&dev_cmd->hdr, start_len, 0);
ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb);
snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len - iv_len;
total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len;
amsdu_pad = 0;
/* total amount of header we may need for this A-MSDU */
hdr_room = DIV_ROUND_UP(total_len, mss) *
(3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len;
(3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr));
/* Our device supports 9 segments at most, it will fit in 1 page */
hdr_page = get_page_hdr(trans, hdr_room);
@ -282,14 +278,12 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
start_hdr = hdr_page->pos;
page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
*page_ptr = hdr_page->page;
memcpy(hdr_page->pos, skb->data + hdr_len, iv_len);
hdr_page->pos += iv_len;
/*
* Pull the ieee80211 header + IV to be able to use TSO core,
* Pull the ieee80211 header to be able to use TSO core,
* we will restore it for the tx_status flow.
*/
skb_pull(skb, hdr_len + iv_len);
skb_pull(skb, hdr_len);
/*
* Remove the length of all the headers that we don't actually
@ -364,8 +358,8 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
}
}
/* re -add the WiFi header and IV */
skb_push(skb, hdr_len + iv_len);
/* re -add the WiFi header */
skb_push(skb, hdr_len);
return 0;

View File

@ -220,8 +220,10 @@ static irqreturn_t nxp_nci_i2c_irq_thread_fn(int irq, void *phy_id)
if (r == -EREMOTEIO) {
phy->hard_fault = r;
skb = NULL;
} else if (r < 0) {
if (info->mode == NXP_NCI_MODE_FW)
nxp_nci_fw_recv_frame(phy->ndev, NULL);
}
if (r < 0) {
nfc_err(&client->dev, "Read failed with error %d\n", r);
goto exit_irq_handled;
}

View File

@ -149,11 +149,21 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
err = -EFAULT;
break;
}
if (((req.extts.flags & ~PTP_EXTTS_VALID_FLAGS) ||
req.extts.rsv[0] || req.extts.rsv[1]) &&
cmd == PTP_EXTTS_REQUEST2) {
if (cmd == PTP_EXTTS_REQUEST2) {
/* Tell the drivers to check the flags carefully. */
req.extts.flags |= PTP_STRICT_FLAGS;
/* Make sure no reserved bit is set. */
if ((req.extts.flags & ~PTP_EXTTS_VALID_FLAGS) ||
req.extts.rsv[0] || req.extts.rsv[1]) {
err = -EINVAL;
break;
}
/* Ensure one of the rising/falling edge bits is set. */
if ((req.extts.flags & PTP_ENABLE_FEATURE) &&
(req.extts.flags & PTP_EXTTS_EDGES) == 0) {
err = -EINVAL;
break;
}
} else if (cmd == PTP_EXTTS_REQUEST) {
req.extts.flags &= PTP_EXTTS_V1_VALID_FLAGS;
req.extts.rsv[0] = 0;

View File

@ -65,5 +65,6 @@ extern void can_rx_unregister(struct net *net, struct net_device *dev,
void *data);
extern int can_send(struct sk_buff *skb, int loop);
void can_sock_destruct(struct sock *sk);
#endif /* !_CAN_CORE_H */

View File

@ -38,7 +38,8 @@ struct devlink {
struct device *dev;
possible_net_t _net;
struct mutex lock;
bool reload_failed;
u8 reload_failed:1,
reload_enabled:1;
char priv[0] __aligned(NETDEV_ALIGN);
};
@ -774,6 +775,8 @@ struct ib_device;
struct devlink *devlink_alloc(const struct devlink_ops *ops, size_t priv_size);
int devlink_register(struct devlink *devlink, struct device *dev);
void devlink_unregister(struct devlink *devlink);
void devlink_reload_enable(struct devlink *devlink);
void devlink_reload_disable(struct devlink *devlink);
void devlink_free(struct devlink *devlink);
int devlink_port_register(struct devlink *devlink,
struct devlink_port *devlink_port,

View File

@ -86,7 +86,7 @@ DECLARE_EVENT_CLASS(tcp_event_sk_skb,
sk->sk_v6_rcv_saddr, sk->sk_v6_daddr);
),
TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c state=%s\n",
TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c state=%s",
__entry->sport, __entry->dport, __entry->saddr, __entry->daddr,
__entry->saddr_v6, __entry->daddr_v6,
show_tcp_state_name(__entry->state))

View File

@ -421,6 +421,7 @@ enum devlink_attr {
DEVLINK_ATTR_RELOAD_FAILED, /* u8 0 or 1 */
DEVLINK_ATTR_HEALTH_REPORTER_DUMP_TS_NS, /* u64 */
/* add new attributes above here, update the policy in devlink.c */
__DEVLINK_ATTR_MAX,

View File

@ -31,13 +31,16 @@
#define PTP_ENABLE_FEATURE (1<<0)
#define PTP_RISING_EDGE (1<<1)
#define PTP_FALLING_EDGE (1<<2)
#define PTP_STRICT_FLAGS (1<<3)
#define PTP_EXTTS_EDGES (PTP_RISING_EDGE | PTP_FALLING_EDGE)
/*
* flag fields valid for the new PTP_EXTTS_REQUEST2 ioctl.
*/
#define PTP_EXTTS_VALID_FLAGS (PTP_ENABLE_FEATURE | \
PTP_RISING_EDGE | \
PTP_FALLING_EDGE)
PTP_FALLING_EDGE | \
PTP_STRICT_FLAGS)
/*
* flag fields valid for the original PTP_EXTTS_REQUEST ioctl.

View File

@ -86,11 +86,12 @@ static atomic_t skbcounter = ATOMIC_INIT(0);
/* af_can socket functions */
static void can_sock_destruct(struct sock *sk)
void can_sock_destruct(struct sock *sk)
{
skb_queue_purge(&sk->sk_receive_queue);
skb_queue_purge(&sk->sk_error_queue);
}
EXPORT_SYMBOL(can_sock_destruct);
static const struct can_proto *can_get_proto(int protocol)
{

View File

@ -51,6 +51,7 @@ static void j1939_can_recv(struct sk_buff *iskb, void *data)
if (!skb)
return;
j1939_priv_get(priv);
can_skb_set_owner(skb, iskb->sk);
/* get a pointer to the header of the skb
@ -104,6 +105,7 @@ static void j1939_can_recv(struct sk_buff *iskb, void *data)
j1939_simple_recv(priv, skb);
j1939_sk_recv(priv, skb);
done:
j1939_priv_put(priv);
kfree_skb(skb);
}
@ -150,6 +152,10 @@ static void __j1939_priv_release(struct kref *kref)
netdev_dbg(priv->ndev, "%s: 0x%p\n", __func__, priv);
WARN_ON_ONCE(!list_empty(&priv->active_session_list));
WARN_ON_ONCE(!list_empty(&priv->ecus));
WARN_ON_ONCE(!list_empty(&priv->j1939_socks));
dev_put(ndev);
kfree(priv);
}
@ -207,6 +213,9 @@ static inline struct j1939_priv *j1939_ndev_to_priv(struct net_device *ndev)
{
struct can_ml_priv *can_ml_priv = ndev->ml_priv;
if (!can_ml_priv)
return NULL;
return can_ml_priv->j1939_priv;
}

View File

@ -78,7 +78,6 @@ static void j1939_jsk_add(struct j1939_priv *priv, struct j1939_sock *jsk)
{
jsk->state |= J1939_SOCK_BOUND;
j1939_priv_get(priv);
jsk->priv = priv;
spin_lock_bh(&priv->j1939_socks_lock);
list_add_tail(&jsk->list, &priv->j1939_socks);
@ -91,7 +90,6 @@ static void j1939_jsk_del(struct j1939_priv *priv, struct j1939_sock *jsk)
list_del_init(&jsk->list);
spin_unlock_bh(&priv->j1939_socks_lock);
jsk->priv = NULL;
j1939_priv_put(priv);
jsk->state &= ~J1939_SOCK_BOUND;
}
@ -349,6 +347,34 @@ void j1939_sk_recv(struct j1939_priv *priv, struct sk_buff *skb)
spin_unlock_bh(&priv->j1939_socks_lock);
}
static void j1939_sk_sock_destruct(struct sock *sk)
{
struct j1939_sock *jsk = j1939_sk(sk);
/* This function will be call by the generic networking code, when then
* the socket is ultimately closed (sk->sk_destruct).
*
* The race between
* - processing a received CAN frame
* (can_receive -> j1939_can_recv)
* and accessing j1939_priv
* ... and ...
* - closing a socket
* (j1939_can_rx_unregister -> can_rx_unregister)
* and calling the final j1939_priv_put()
*
* is avoided by calling the final j1939_priv_put() from this
* RCU deferred cleanup call.
*/
if (jsk->priv) {
j1939_priv_put(jsk->priv);
jsk->priv = NULL;
}
/* call generic CAN sock destruct */
can_sock_destruct(sk);
}
static int j1939_sk_init(struct sock *sk)
{
struct j1939_sock *jsk = j1939_sk(sk);
@ -371,6 +397,7 @@ static int j1939_sk_init(struct sock *sk)
atomic_set(&jsk->skb_pending, 0);
spin_lock_init(&jsk->sk_session_queue_lock);
INIT_LIST_HEAD(&jsk->sk_session_queue);
sk->sk_destruct = j1939_sk_sock_destruct;
return 0;
}
@ -443,6 +470,12 @@ static int j1939_sk_bind(struct socket *sock, struct sockaddr *uaddr, int len)
}
jsk->ifindex = addr->can_ifindex;
/* the corresponding j1939_priv_put() is called via
* sk->sk_destruct, which points to j1939_sk_sock_destruct()
*/
j1939_priv_get(priv);
jsk->priv = priv;
}
/* set default transmit pgn */
@ -560,8 +593,8 @@ static int j1939_sk_release(struct socket *sock)
if (!sk)
return 0;
jsk = j1939_sk(sk);
lock_sock(sk);
jsk = j1939_sk(sk);
if (jsk->state & J1939_SOCK_BOUND) {
struct j1939_priv *priv = jsk->priv;
@ -1059,51 +1092,72 @@ static int j1939_sk_sendmsg(struct socket *sock, struct msghdr *msg,
{
struct sock *sk = sock->sk;
struct j1939_sock *jsk = j1939_sk(sk);
struct j1939_priv *priv = jsk->priv;
struct j1939_priv *priv;
int ifindex;
int ret;
lock_sock(sock->sk);
/* various socket state tests */
if (!(jsk->state & J1939_SOCK_BOUND))
return -EBADFD;
if (!(jsk->state & J1939_SOCK_BOUND)) {
ret = -EBADFD;
goto sendmsg_done;
}
priv = jsk->priv;
ifindex = jsk->ifindex;
if (!jsk->addr.src_name && jsk->addr.sa == J1939_NO_ADDR)
if (!jsk->addr.src_name && jsk->addr.sa == J1939_NO_ADDR) {
/* no source address assigned yet */
return -EBADFD;
ret = -EBADFD;
goto sendmsg_done;
}
/* deal with provided destination address info */
if (msg->msg_name) {
struct sockaddr_can *addr = msg->msg_name;
if (msg->msg_namelen < J1939_MIN_NAMELEN)
return -EINVAL;
if (msg->msg_namelen < J1939_MIN_NAMELEN) {
ret = -EINVAL;
goto sendmsg_done;
}
if (addr->can_family != AF_CAN)
return -EINVAL;
if (addr->can_family != AF_CAN) {
ret = -EINVAL;
goto sendmsg_done;
}
if (addr->can_ifindex && addr->can_ifindex != ifindex)
return -EBADFD;
if (addr->can_ifindex && addr->can_ifindex != ifindex) {
ret = -EBADFD;
goto sendmsg_done;
}
if (j1939_pgn_is_valid(addr->can_addr.j1939.pgn) &&
!j1939_pgn_is_clean_pdu(addr->can_addr.j1939.pgn))
return -EINVAL;
!j1939_pgn_is_clean_pdu(addr->can_addr.j1939.pgn)) {
ret = -EINVAL;
goto sendmsg_done;
}
if (!addr->can_addr.j1939.name &&
addr->can_addr.j1939.addr == J1939_NO_ADDR &&
!sock_flag(sk, SOCK_BROADCAST))
!sock_flag(sk, SOCK_BROADCAST)) {
/* broadcast, but SO_BROADCAST not set */
return -EACCES;
ret = -EACCES;
goto sendmsg_done;
}
} else {
if (!jsk->addr.dst_name && jsk->addr.da == J1939_NO_ADDR &&
!sock_flag(sk, SOCK_BROADCAST))
!sock_flag(sk, SOCK_BROADCAST)) {
/* broadcast, but SO_BROADCAST not set */
return -EACCES;
ret = -EACCES;
goto sendmsg_done;
}
}
ret = j1939_sk_send_loop(priv, sk, msg, size);
sendmsg_done:
release_sock(sock->sk);
return ret;
}

View File

@ -255,6 +255,7 @@ static void __j1939_session_drop(struct j1939_session *session)
return;
j1939_sock_pending_del(session->sk);
sock_put(session->sk);
}
static void j1939_session_destroy(struct j1939_session *session)
@ -266,6 +267,9 @@ static void j1939_session_destroy(struct j1939_session *session)
netdev_dbg(session->priv->ndev, "%s: 0x%p\n", __func__, session);
WARN_ON_ONCE(!list_empty(&session->sk_session_queue_entry));
WARN_ON_ONCE(!list_empty(&session->active_session_list_entry));
skb_queue_purge(&session->skb_queue);
__j1939_session_drop(session);
j1939_priv_put(session->priv);
@ -1042,12 +1046,13 @@ j1939_session_deactivate_activate_next(struct j1939_session *session)
j1939_sk_queue_activate_next(session);
}
static void j1939_session_cancel(struct j1939_session *session,
static void __j1939_session_cancel(struct j1939_session *session,
enum j1939_xtp_abort err)
{
struct j1939_priv *priv = session->priv;
WARN_ON_ONCE(!err);
lockdep_assert_held(&session->priv->active_session_list_lock);
session->err = j1939_xtp_abort_to_errno(priv, err);
/* do not send aborts on incoming broadcasts */
@ -1062,6 +1067,20 @@ static void j1939_session_cancel(struct j1939_session *session,
j1939_sk_send_loop_abort(session->sk, session->err);
}
static void j1939_session_cancel(struct j1939_session *session,
enum j1939_xtp_abort err)
{
j1939_session_list_lock(session->priv);
if (session->state >= J1939_SESSION_ACTIVE &&
session->state < J1939_SESSION_WAITING_ABORT) {
j1939_tp_set_rxtimeout(session, J1939_XTP_ABORT_TIMEOUT_MS);
__j1939_session_cancel(session, err);
}
j1939_session_list_unlock(session->priv);
}
static enum hrtimer_restart j1939_tp_txtimer(struct hrtimer *hrtimer)
{
struct j1939_session *session =
@ -1108,8 +1127,6 @@ static enum hrtimer_restart j1939_tp_txtimer(struct hrtimer *hrtimer)
netdev_alert(priv->ndev, "%s: 0x%p: tx aborted with unknown reason: %i\n",
__func__, session, ret);
if (session->skcb.addr.type != J1939_SIMPLE) {
j1939_tp_set_rxtimeout(session,
J1939_XTP_ABORT_TIMEOUT_MS);
j1939_session_cancel(session, J1939_XTP_ABORT_OTHER);
} else {
session->err = ret;
@ -1169,7 +1186,7 @@ static enum hrtimer_restart j1939_tp_rxtimer(struct hrtimer *hrtimer)
hrtimer_start(&session->rxtimer,
ms_to_ktime(J1939_XTP_ABORT_TIMEOUT_MS),
HRTIMER_MODE_REL_SOFT);
j1939_session_cancel(session, J1939_XTP_ABORT_TIMEOUT);
__j1939_session_cancel(session, J1939_XTP_ABORT_TIMEOUT);
}
j1939_session_list_unlock(session->priv);
}
@ -1375,7 +1392,6 @@ j1939_xtp_rx_cts_one(struct j1939_session *session, struct sk_buff *skb)
out_session_cancel:
j1939_session_timers_cancel(session);
j1939_tp_set_rxtimeout(session, J1939_XTP_ABORT_TIMEOUT_MS);
j1939_session_cancel(session, err);
}
@ -1572,7 +1588,6 @@ static int j1939_xtp_rx_rts_session_active(struct j1939_session *session,
/* RTS on active session */
j1939_session_timers_cancel(session);
j1939_tp_set_rxtimeout(session, J1939_XTP_ABORT_TIMEOUT_MS);
j1939_session_cancel(session, J1939_XTP_ABORT_BUSY);
}
@ -1583,7 +1598,6 @@ static int j1939_xtp_rx_rts_session_active(struct j1939_session *session,
session->last_cmd);
j1939_session_timers_cancel(session);
j1939_tp_set_rxtimeout(session, J1939_XTP_ABORT_TIMEOUT_MS);
j1939_session_cancel(session, J1939_XTP_ABORT_BUSY);
return -EBUSY;
@ -1785,7 +1799,6 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
out_session_cancel:
j1939_session_timers_cancel(session);
j1939_tp_set_rxtimeout(session, J1939_XTP_ABORT_TIMEOUT_MS);
j1939_session_cancel(session, J1939_XTP_ABORT_FAULT);
j1939_session_put(session);
}
@ -1866,6 +1879,7 @@ struct j1939_session *j1939_tp_send(struct j1939_priv *priv,
return ERR_PTR(-ENOMEM);
/* skb is recounted in j1939_session_new() */
sock_hold(skb->sk);
session->sk = skb->sk;
session->transmission = true;
session->pkt.total = (size + 6) / 7;
@ -2028,7 +2042,11 @@ int j1939_cancel_active_session(struct j1939_priv *priv, struct sock *sk)
&priv->active_session_list,
active_session_list_entry) {
if (!sk || sk == session->sk) {
j1939_session_timers_cancel(session);
if (hrtimer_try_to_cancel(&session->txtimer) == 1)
j1939_session_put(session);
if (hrtimer_try_to_cancel(&session->rxtimer) == 1)
j1939_session_put(session);
session->err = ESHUTDOWN;
j1939_session_deactivate_locked(session);
}

View File

@ -2699,7 +2699,7 @@ static int devlink_nl_cmd_reload(struct sk_buff *skb, struct genl_info *info)
struct devlink *devlink = info->user_ptr[0];
int err;
if (!devlink_reload_supported(devlink))
if (!devlink_reload_supported(devlink) || !devlink->reload_enabled)
return -EOPNOTSUPP;
err = devlink_resources_validate(devlink, NULL, info);
@ -4618,6 +4618,7 @@ struct devlink_health_reporter {
bool auto_recover;
u8 health_state;
u64 dump_ts;
u64 dump_real_ts;
u64 error_count;
u64 recovery_count;
u64 last_recovery_ts;
@ -4790,6 +4791,7 @@ static int devlink_health_do_dump(struct devlink_health_reporter *reporter,
goto dump_err;
reporter->dump_ts = jiffies;
reporter->dump_real_ts = ktime_get_real_ns();
return 0;
@ -4952,6 +4954,10 @@ devlink_nl_health_reporter_fill(struct sk_buff *msg,
jiffies_to_msecs(reporter->dump_ts),
DEVLINK_ATTR_PAD))
goto reporter_nest_cancel;
if (reporter->dump_fmsg &&
nla_put_u64_64bit(msg, DEVLINK_ATTR_HEALTH_REPORTER_DUMP_TS_NS,
reporter->dump_real_ts, DEVLINK_ATTR_PAD))
goto reporter_nest_cancel;
nla_nest_end(msg, reporter_attr);
genlmsg_end(msg, hdr);
@ -6196,12 +6202,49 @@ EXPORT_SYMBOL_GPL(devlink_register);
void devlink_unregister(struct devlink *devlink)
{
mutex_lock(&devlink_mutex);
WARN_ON(devlink_reload_supported(devlink) &&
devlink->reload_enabled);
devlink_notify(devlink, DEVLINK_CMD_DEL);
list_del(&devlink->list);
mutex_unlock(&devlink_mutex);
}
EXPORT_SYMBOL_GPL(devlink_unregister);
/**
* devlink_reload_enable - Enable reload of devlink instance
*
* @devlink: devlink
*
* Should be called at end of device initialization
* process when reload operation is supported.
*/
void devlink_reload_enable(struct devlink *devlink)
{
mutex_lock(&devlink_mutex);
devlink->reload_enabled = true;
mutex_unlock(&devlink_mutex);
}
EXPORT_SYMBOL_GPL(devlink_reload_enable);
/**
* devlink_reload_disable - Disable reload of devlink instance
*
* @devlink: devlink
*
* Should be called at the beginning of device cleanup
* process when reload operation is supported.
*/
void devlink_reload_disable(struct devlink *devlink)
{
mutex_lock(&devlink_mutex);
/* Mutex is taken which ensures that no reload operation is in
* progress while setting up forbidded flag.
*/
devlink->reload_enabled = false;
mutex_unlock(&devlink_mutex);
}
EXPORT_SYMBOL_GPL(devlink_reload_disable);
/**
* devlink_free - Free devlink instance resources
*

View File

@ -106,7 +106,7 @@ static int dsa_8021q_restore_pvid(struct dsa_switch *ds, int port)
slave = ds->ports[port].slave;
err = br_vlan_get_pvid(slave, &pvid);
if (err < 0)
if (!pvid || err < 0)
/* There is no pvid on the bridge for this port, which is
* perfectly valid. Nothing to restore, bye-bye!
*/

View File

@ -2289,7 +2289,8 @@ int ipmr_get_route(struct net *net, struct sk_buff *skb,
rcu_read_unlock();
return -ENODEV;
}
skb2 = skb_clone(skb, GFP_ATOMIC);
skb2 = skb_realloc_headroom(skb, sizeof(struct iphdr));
if (!skb2) {
read_unlock(&mrt_lock);
rcu_read_unlock();

View File

@ -81,6 +81,11 @@ static struct ipv6_sr_hdr *get_srh(struct sk_buff *skb)
if (!pskb_may_pull(skb, srhoff + len))
return NULL;
/* note that pskb_may_pull may change pointers in header;
* for this reason it is necessary to reload them when needed.
*/
srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
if (!seg6_validate_srh(srh, len))
return NULL;
@ -336,6 +341,8 @@ static int input_action_end_dx6(struct sk_buff *skb,
if (!ipv6_addr_any(&slwt->nh6))
nhaddr = &slwt->nh6;
skb_set_transport_header(skb, sizeof(struct ipv6hdr));
seg6_lookup_nexthop(skb, nhaddr, 0);
return dst_input(skb);
@ -365,6 +372,8 @@ static int input_action_end_dx4(struct sk_buff *skb,
skb_dst_drop(skb);
skb_set_transport_header(skb, sizeof(struct iphdr));
err = ip_route_input(skb, nhaddr, iph->saddr, 0, skb->dev);
if (err)
goto drop;
@ -385,6 +394,8 @@ static int input_action_end_dt6(struct sk_buff *skb,
if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
goto drop;
skb_set_transport_header(skb, sizeof(struct ipv6hdr));
seg6_lookup_nexthop(skb, NULL, slwt->table);
return dst_input(skb);

View File

@ -450,6 +450,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
struct ib_qp_init_attr attr;
struct ib_cq_init_attr cq_attr = {};
struct rds_ib_device *rds_ibdev;
unsigned long max_wrs;
int ret, fr_queue_space;
/*
@ -469,10 +470,15 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
/* add the conn now so that connection establishment has the dev */
rds_ib_add_conn(rds_ibdev, conn);
if (rds_ibdev->max_wrs < ic->i_send_ring.w_nr + 1)
rds_ib_ring_resize(&ic->i_send_ring, rds_ibdev->max_wrs - 1);
if (rds_ibdev->max_wrs < ic->i_recv_ring.w_nr + 1)
rds_ib_ring_resize(&ic->i_recv_ring, rds_ibdev->max_wrs - 1);
max_wrs = rds_ibdev->max_wrs < rds_ib_sysctl_max_send_wr + 1 ?
rds_ibdev->max_wrs - 1 : rds_ib_sysctl_max_send_wr;
if (ic->i_send_ring.w_nr != max_wrs)
rds_ib_ring_resize(&ic->i_send_ring, max_wrs);
max_wrs = rds_ibdev->max_wrs < rds_ib_sysctl_max_recv_wr + 1 ?
rds_ibdev->max_wrs - 1 : rds_ib_sysctl_max_recv_wr;
if (ic->i_recv_ring.w_nr != max_wrs)
rds_ib_ring_resize(&ic->i_recv_ring, max_wrs);
/* Protection domain and memory range */
ic->i_pd = rds_ibdev->pd;
@ -1099,8 +1105,9 @@ void rds_ib_conn_path_shutdown(struct rds_conn_path *cp)
ic->i_flowctl = 0;
atomic_set(&ic->i_credits, 0);
rds_ib_ring_init(&ic->i_send_ring, rds_ib_sysctl_max_send_wr);
rds_ib_ring_init(&ic->i_recv_ring, rds_ib_sysctl_max_recv_wr);
/* Re-init rings, but retain sizes. */
rds_ib_ring_init(&ic->i_send_ring, ic->i_send_ring.w_nr);
rds_ib_ring_init(&ic->i_recv_ring, ic->i_recv_ring.w_nr);
if (ic->i_ibinc) {
rds_inc_put(&ic->i_ibinc->ii_inc);
@ -1147,8 +1154,8 @@ int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp)
* rds_ib_conn_shutdown() waits for these to be emptied so they
* must be initialized before it can be called.
*/
rds_ib_ring_init(&ic->i_send_ring, rds_ib_sysctl_max_send_wr);
rds_ib_ring_init(&ic->i_recv_ring, rds_ib_sysctl_max_recv_wr);
rds_ib_ring_init(&ic->i_send_ring, 0);
rds_ib_ring_init(&ic->i_recv_ring, 0);
ic->conn = conn;
conn->c_transport_data = ic;

View File

@ -796,6 +796,7 @@ static void smc_connect_work(struct work_struct *work)
smc->sk.sk_err = EPIPE;
else if (signal_pending(current))
smc->sk.sk_err = -sock_intr_errno(timeo);
sock_put(&smc->sk); /* passive closing */
goto out;
}
@ -1731,7 +1732,7 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
case TCP_FASTOPEN_KEY:
case TCP_FASTOPEN_NO_COOKIE:
/* option not supported by SMC */
if (sk->sk_state == SMC_INIT) {
if (sk->sk_state == SMC_INIT && !smc->connect_nonblock) {
smc_switch_to_fallback(smc);
smc->fallback_rsn = SMC_CLC_DECL_OPTUNSUPP;
} else {

View File

@ -34,8 +34,6 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include "core.h"
#include "name_table.h"
#include "subscr.h"

View File

@ -60,6 +60,12 @@
#include <linux/rhashtable.h>
#include <net/genetlink.h>
#ifdef pr_fmt
#undef pr_fmt
#endif
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
struct tipc_node;
struct tipc_bearer;
struct tipc_bc_base;

View File

@ -480,6 +480,9 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
else
XFRM_INC_STATS(net,
LINUX_MIB_XFRMINSTATEINVALID);
if (encap_type == -1)
dev_put(skb->dev);
goto drop;
}

View File

@ -495,6 +495,8 @@ static void ___xfrm_state_destroy(struct xfrm_state *x)
x->type->destructor(x);
xfrm_put_type(x->type);
}
if (x->xfrag.page)
put_page(x->xfrag.page);
xfrm_dev_state_free(x);
security_xfrm_state_free(x);
xfrm_state_free(x);

View File

@ -112,14 +112,16 @@ sanitization_single_dev_mcast_group_test()
RET=0
ip link add dev br0 type bridge mcast_snooping 0
ip link add name dummy1 up type dummy
ip link add name vxlan0 up type vxlan id 10 nolearning noudpcsum \
ttl 20 tos inherit local 198.51.100.1 dstport 4789 \
dev $swp2 group 239.0.0.1
dev dummy1 group 239.0.0.1
sanitization_single_dev_test_fail
ip link del dev vxlan0
ip link del dev dummy1
ip link del dev br0
log_test "vxlan device with a multicast group"
@ -181,13 +183,15 @@ sanitization_single_dev_local_interface_test()
RET=0
ip link add dev br0 type bridge mcast_snooping 0
ip link add name dummy1 up type dummy
ip link add name vxlan0 up type vxlan id 10 nolearning noudpcsum \
ttl 20 tos inherit local 198.51.100.1 dstport 4789 dev $swp2
ttl 20 tos inherit local 198.51.100.1 dstport 4789 dev dummy1
sanitization_single_dev_test_fail
ip link del dev vxlan0
ip link del dev dummy1
ip link del dev br0
log_test "vxlan device with local interface"

View File

@ -44,6 +44,46 @@ static int clock_adjtime(clockid_t id, struct timex *tx)
}
#endif
static void show_flag_test(int rq_index, unsigned int flags, int err)
{
printf("PTP_EXTTS_REQUEST%c flags 0x%08x : (%d) %s\n",
rq_index ? '1' + rq_index : ' ',
flags, err, strerror(errno));
/* sigh, uClibc ... */
errno = 0;
}
static void do_flag_test(int fd, unsigned int index)
{
struct ptp_extts_request extts_request;
unsigned long request[2] = {
PTP_EXTTS_REQUEST,
PTP_EXTTS_REQUEST2,
};
unsigned int enable_flags[5] = {
PTP_ENABLE_FEATURE,
PTP_ENABLE_FEATURE | PTP_RISING_EDGE,
PTP_ENABLE_FEATURE | PTP_FALLING_EDGE,
PTP_ENABLE_FEATURE | PTP_RISING_EDGE | PTP_FALLING_EDGE,
PTP_ENABLE_FEATURE | (PTP_EXTTS_VALID_FLAGS + 1),
};
int err, i, j;
memset(&extts_request, 0, sizeof(extts_request));
extts_request.index = index;
for (i = 0; i < 2; i++) {
for (j = 0; j < 5; j++) {
extts_request.flags = enable_flags[j];
err = ioctl(fd, request[i], &extts_request);
show_flag_test(i, extts_request.flags, err);
extts_request.flags = 0;
err = ioctl(fd, request[i], &extts_request);
}
}
}
static clockid_t get_clockid(int fd)
{
#define CLOCKFD 3
@ -96,7 +136,8 @@ static void usage(char *progname)
" -s set the ptp clock time from the system time\n"
" -S set the system time from the ptp clock time\n"
" -t val shift the ptp clock time by 'val' seconds\n"
" -T val set the ptp clock time to 'val' seconds\n",
" -T val set the ptp clock time to 'val' seconds\n"
" -z test combinations of rising/falling external time stamp flags\n",
progname);
}
@ -122,6 +163,7 @@ int main(int argc, char *argv[])
int adjtime = 0;
int capabilities = 0;
int extts = 0;
int flagtest = 0;
int gettime = 0;
int index = 0;
int list_pins = 0;
@ -138,7 +180,7 @@ int main(int argc, char *argv[])
progname = strrchr(argv[0], '/');
progname = progname ? 1+progname : argv[0];
while (EOF != (c = getopt(argc, argv, "cd:e:f:ghi:k:lL:p:P:sSt:T:v"))) {
while (EOF != (c = getopt(argc, argv, "cd:e:f:ghi:k:lL:p:P:sSt:T:z"))) {
switch (c) {
case 'c':
capabilities = 1;
@ -191,6 +233,9 @@ int main(int argc, char *argv[])
settime = 3;
seconds = atoi(optarg);
break;
case 'z':
flagtest = 1;
break;
case 'h':
usage(progname);
return 0;
@ -322,6 +367,10 @@ int main(int argc, char *argv[])
}
}
if (flagtest) {
do_flag_test(fd, index);
}
if (list_pins) {
int n_pins = 0;
if (ioctl(fd, PTP_CLOCK_GETCAPS, &caps)) {