Including fixes from wireless and can.
Current release - regressions: - wifi: mac80211: - fix potential null pointer dereference - fix receiving mesh packets in forwarding=0 networks - fix mesh forwarding Current release - new code bugs: - virtio/vsock: fix leaks due to missing skb owner Previous releases - regressions: - raw: fix NULL deref in raw_get_next(). - sctp: check send stream number after wait_for_sndbuf - qrtr: - fix a refcount bug in qrtr_recvmsg() - do not do DEL_SERVER broadcast after DEL_CLIENT - wifi: brcmfmac: fix SDIO suspend/resume regression - wifi: mt76: fix use-after-free in fw features query. - can: fix race between isotp_sendsmg() and isotp_release() - eth: mtk_eth_soc: fix remaining throughput regression -eth: ice: reset FDIR counter in FDIR init stage Previous releases - always broken: - core: don't let netpoll invoke NAPI if in xmit context - icmp: guard against too small mtu - ipv6: fix an uninit variable access bug in __ip6_make_skb() - wifi: mac80211: fix the size calculation of ieee80211_ie_len_eht_cap() - can: fix poll() to not report false EPOLLOUT events - eth: gve: secure enough bytes in the first TX desc for all TCP pkts Signed-off-by: Paolo Abeni <pabeni@redhat.com> Signed-off-by: Jakub Kicinski <kuba@kernel.org> -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEE6jPA+I1ugmIBA4hXMUZtbf5SIrsFAmQu4qgACgkQMUZtbf5S Irv7fA//elLM+YvGQDPgGs3KDZVnb5vnGTEPosc6mCWsYqR6EBxk6sf89yqk31xg IYbzOGXqkmi5ozhdjnNaFRGCtb+mBluV3oSPm8pM8d0NcuZta7MPPhduguEfnMS9 FcI98bxmzSXPIRzG/sCrc/tzedhepcAMlN80PtTzkxSUFlxA7z+vniatVymOZQtt MSWPa9gXl1Keon7DBzGvHlZtOK1ptDjti5cp81zw/bA20wArCEm3Zg99Xz2r9rYp eAF+KqKoclKieGUbJ7lXQIxWrHrFRznPoMbvW/ofU6JXQFi8KOh0zqJFIi9VnU0D EdtZxOgLXuLcjvKj8ijKFdIA5OFqMA65pWs2t2foBR9C0DVle8LztGpyZODf0huT agK9ZgM3av6jLzMe8CtJpz31nsWL1s4f3njM1PRucF/jTso72RWUdAx1fBurcnXm 45MK+uS0aAGch6cFT7mHqUAniGUakR+NPChA7ecn5iMetasinEWRLFxw0eQXEBcM kSPFVGXlT4u0a56xN2FoTPnXHb+k08035+cd+bRbTlUXKeMCVYg/k7DiJUr21IWL hHWVOzEnzRpDa5gsQ7apct3bcRZnHO/jlWGjkl/g+AGjwaMXae0zDFjajEazsmJ0 ZKOVsZgIcSCVAdnRLzP2IyKACuiFls6Qc46eARStKRwDjQsEoUU= =1AWK -----END PGP SIGNATURE----- Merge tag 'net-6.3-rc6-2' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net Pull networking fixes from Jakub Kicinski: "Including fixes from wireless and can. Current release - regressions: - wifi: mac80211: - fix potential null pointer dereference - fix receiving mesh packets in forwarding=0 networks - fix mesh forwarding Current release - new code bugs: - virtio/vsock: fix leaks due to missing skb owner Previous releases - regressions: - raw: fix NULL deref in raw_get_next(). - sctp: check send stream number after wait_for_sndbuf - qrtr: - fix a refcount bug in qrtr_recvmsg() - do not do DEL_SERVER broadcast after DEL_CLIENT - wifi: brcmfmac: fix SDIO suspend/resume regression - wifi: mt76: fix use-after-free in fw features query. - can: fix race between isotp_sendsmg() and isotp_release() - eth: mtk_eth_soc: fix remaining throughput regression - eth: ice: reset FDIR counter in FDIR init stage Previous releases - always broken: - core: don't let netpoll invoke NAPI if in xmit context - icmp: guard against too small mtu - ipv6: fix an uninit variable access bug in __ip6_make_skb() - wifi: mac80211: fix the size calculation of ieee80211_ie_len_eht_cap() - can: fix poll() to not report false EPOLLOUT events - eth: gve: secure enough bytes in the first TX desc for all TCP pkts" * tag 'net-6.3-rc6-2' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (47 commits) net: stmmac: check fwnode for phy device before scanning for phy net: stmmac: Add queue reset into stmmac_xdp_open() function selftests: net: rps_default_mask.sh: delete veth link specifically net: fec: make use of MDIO C45 quirk can: isotp: fix race between isotp_sendsmg() and isotp_release() can: isotp: isotp_ops: fix poll() to not report false EPOLLOUT events can: isotp: isotp_recvmsg(): use sock_recv_cmsgs() to get SOCK_RXQ_OVFL infos can: j1939: j1939_tp_tx_dat_new(): fix out-of-bounds memory access gve: Secure enough bytes in the first TX desc for all TCP pkts netlink: annotate lockless accesses to nlk->max_recvmsg_len ethtool: reset #lanes when lanes is omitted ping: Fix potentail NULL deref for /proc/net/icmp. raw: Fix NULL deref in raw_get_next(). ice: Reset FDIR counter in FDIR init stage ice: fix wrong fallback logic for FDIR net: stmmac: fix up RX flow hash indirection table when setting channels net: ethernet: ti: am65-cpsw: Fix mdio cleanup in probe wifi: mt76: ignore key disable commands wifi: ath11k: reduce the MHI timeout to 20s ipv6: Fix an uninit variable access bug in __ip6_make_skb() ...
This commit is contained in:
commit
f2afccfefe
|
@ -5601,7 +5601,7 @@ static const struct mv88e6xxx_ops mv88e6393x_ops = {
|
|||
* .port_set_upstream_port method.
|
||||
*/
|
||||
.set_egress_port = mv88e6393x_set_egress_port,
|
||||
.watchdog_ops = &mv88e6390_watchdog_ops,
|
||||
.watchdog_ops = &mv88e6393x_watchdog_ops,
|
||||
.mgmt_rsvd2cpu = mv88e6393x_port_mgmt_rsvd2cpu,
|
||||
.pot_clear = mv88e6xxx_g2_pot_clear,
|
||||
.reset = mv88e6352_g1_reset,
|
||||
|
|
|
@ -943,6 +943,26 @@ const struct mv88e6xxx_irq_ops mv88e6390_watchdog_ops = {
|
|||
.irq_free = mv88e6390_watchdog_free,
|
||||
};
|
||||
|
||||
static int mv88e6393x_watchdog_action(struct mv88e6xxx_chip *chip, int irq)
|
||||
{
|
||||
mv88e6390_watchdog_action(chip, irq);
|
||||
|
||||
/* Fix for clearing the force WD event bit.
|
||||
* Unreleased erratum on mv88e6393x.
|
||||
*/
|
||||
mv88e6xxx_g2_write(chip, MV88E6390_G2_WDOG_CTL,
|
||||
MV88E6390_G2_WDOG_CTL_UPDATE |
|
||||
MV88E6390_G2_WDOG_CTL_PTR_EVENT);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
const struct mv88e6xxx_irq_ops mv88e6393x_watchdog_ops = {
|
||||
.irq_action = mv88e6393x_watchdog_action,
|
||||
.irq_setup = mv88e6390_watchdog_setup,
|
||||
.irq_free = mv88e6390_watchdog_free,
|
||||
};
|
||||
|
||||
static irqreturn_t mv88e6xxx_g2_watchdog_thread_fn(int irq, void *dev_id)
|
||||
{
|
||||
struct mv88e6xxx_chip *chip = dev_id;
|
||||
|
|
|
@ -369,6 +369,7 @@ int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip, int target,
|
|||
extern const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops;
|
||||
extern const struct mv88e6xxx_irq_ops mv88e6250_watchdog_ops;
|
||||
extern const struct mv88e6xxx_irq_ops mv88e6390_watchdog_ops;
|
||||
extern const struct mv88e6xxx_irq_ops mv88e6393x_watchdog_ops;
|
||||
|
||||
extern const struct mv88e6xxx_avb_ops mv88e6165_avb_ops;
|
||||
extern const struct mv88e6xxx_avb_ops mv88e6352_avb_ops;
|
||||
|
|
|
@ -507,6 +507,11 @@ struct bufdesc_ex {
|
|||
/* i.MX6Q adds pm_qos support */
|
||||
#define FEC_QUIRK_HAS_PMQOS BIT(23)
|
||||
|
||||
/* Not all FEC hardware block MDIOs support accesses in C45 mode.
|
||||
* Older blocks in the ColdFire parts do not support it.
|
||||
*/
|
||||
#define FEC_QUIRK_HAS_MDIO_C45 BIT(24)
|
||||
|
||||
struct bufdesc_prop {
|
||||
int qid;
|
||||
/* Address of Rx and Tx buffers */
|
||||
|
|
|
@ -100,18 +100,19 @@ struct fec_devinfo {
|
|||
|
||||
static const struct fec_devinfo fec_imx25_info = {
|
||||
.quirks = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR |
|
||||
FEC_QUIRK_HAS_FRREG,
|
||||
FEC_QUIRK_HAS_FRREG | FEC_QUIRK_HAS_MDIO_C45,
|
||||
};
|
||||
|
||||
static const struct fec_devinfo fec_imx27_info = {
|
||||
.quirks = FEC_QUIRK_MIB_CLEAR | FEC_QUIRK_HAS_FRREG,
|
||||
.quirks = FEC_QUIRK_MIB_CLEAR | FEC_QUIRK_HAS_FRREG |
|
||||
FEC_QUIRK_HAS_MDIO_C45,
|
||||
};
|
||||
|
||||
static const struct fec_devinfo fec_imx28_info = {
|
||||
.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
|
||||
FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC |
|
||||
FEC_QUIRK_HAS_FRREG | FEC_QUIRK_CLEAR_SETUP_MII |
|
||||
FEC_QUIRK_NO_HARD_RESET,
|
||||
FEC_QUIRK_NO_HARD_RESET | FEC_QUIRK_HAS_MDIO_C45,
|
||||
};
|
||||
|
||||
static const struct fec_devinfo fec_imx6q_info = {
|
||||
|
@ -119,11 +120,12 @@ static const struct fec_devinfo fec_imx6q_info = {
|
|||
FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
|
||||
FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 |
|
||||
FEC_QUIRK_HAS_RACC | FEC_QUIRK_CLEAR_SETUP_MII |
|
||||
FEC_QUIRK_HAS_PMQOS,
|
||||
FEC_QUIRK_HAS_PMQOS | FEC_QUIRK_HAS_MDIO_C45,
|
||||
};
|
||||
|
||||
static const struct fec_devinfo fec_mvf600_info = {
|
||||
.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_RACC,
|
||||
.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_RACC |
|
||||
FEC_QUIRK_HAS_MDIO_C45,
|
||||
};
|
||||
|
||||
static const struct fec_devinfo fec_imx6x_info = {
|
||||
|
@ -132,7 +134,8 @@ static const struct fec_devinfo fec_imx6x_info = {
|
|||
FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
|
||||
FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
|
||||
FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE |
|
||||
FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES,
|
||||
FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES |
|
||||
FEC_QUIRK_HAS_MDIO_C45,
|
||||
};
|
||||
|
||||
static const struct fec_devinfo fec_imx6ul_info = {
|
||||
|
@ -140,7 +143,8 @@ static const struct fec_devinfo fec_imx6ul_info = {
|
|||
FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
|
||||
FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR007885 |
|
||||
FEC_QUIRK_BUG_CAPTURE | FEC_QUIRK_HAS_RACC |
|
||||
FEC_QUIRK_HAS_COALESCE | FEC_QUIRK_CLEAR_SETUP_MII,
|
||||
FEC_QUIRK_HAS_COALESCE | FEC_QUIRK_CLEAR_SETUP_MII |
|
||||
FEC_QUIRK_HAS_MDIO_C45,
|
||||
};
|
||||
|
||||
static const struct fec_devinfo fec_imx8mq_info = {
|
||||
|
@ -150,7 +154,8 @@ static const struct fec_devinfo fec_imx8mq_info = {
|
|||
FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
|
||||
FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE |
|
||||
FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES |
|
||||
FEC_QUIRK_HAS_EEE | FEC_QUIRK_WAKEUP_FROM_INT2,
|
||||
FEC_QUIRK_HAS_EEE | FEC_QUIRK_WAKEUP_FROM_INT2 |
|
||||
FEC_QUIRK_HAS_MDIO_C45,
|
||||
};
|
||||
|
||||
static const struct fec_devinfo fec_imx8qm_info = {
|
||||
|
@ -160,14 +165,15 @@ static const struct fec_devinfo fec_imx8qm_info = {
|
|||
FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
|
||||
FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE |
|
||||
FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES |
|
||||
FEC_QUIRK_DELAYED_CLKS_SUPPORT,
|
||||
FEC_QUIRK_DELAYED_CLKS_SUPPORT | FEC_QUIRK_HAS_MDIO_C45,
|
||||
};
|
||||
|
||||
static const struct fec_devinfo fec_s32v234_info = {
|
||||
.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
|
||||
FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
|
||||
FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
|
||||
FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE,
|
||||
FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
|
||||
FEC_QUIRK_HAS_MDIO_C45,
|
||||
};
|
||||
|
||||
static struct platform_device_id fec_devtype[] = {
|
||||
|
@ -2434,8 +2440,10 @@ static int fec_enet_mii_init(struct platform_device *pdev)
|
|||
fep->mii_bus->name = "fec_enet_mii_bus";
|
||||
fep->mii_bus->read = fec_enet_mdio_read_c22;
|
||||
fep->mii_bus->write = fec_enet_mdio_write_c22;
|
||||
fep->mii_bus->read_c45 = fec_enet_mdio_read_c45;
|
||||
fep->mii_bus->write_c45 = fec_enet_mdio_write_c45;
|
||||
if (fep->quirks & FEC_QUIRK_HAS_MDIO_C45) {
|
||||
fep->mii_bus->read_c45 = fec_enet_mdio_read_c45;
|
||||
fep->mii_bus->write_c45 = fec_enet_mdio_write_c45;
|
||||
}
|
||||
snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
|
||||
pdev->name, fep->dev_id + 1);
|
||||
fep->mii_bus->priv = fep;
|
||||
|
|
|
@ -47,6 +47,8 @@
|
|||
|
||||
#define GVE_RX_BUFFER_SIZE_DQO 2048
|
||||
|
||||
#define GVE_GQ_TX_MIN_PKT_DESC_BYTES 182
|
||||
|
||||
/* Each slot in the desc ring has a 1:1 mapping to a slot in the data ring */
|
||||
struct gve_rx_desc_queue {
|
||||
struct gve_rx_desc *desc_ring; /* the descriptor ring */
|
||||
|
|
|
@ -284,8 +284,8 @@ static inline int gve_skb_fifo_bytes_required(struct gve_tx_ring *tx,
|
|||
int bytes;
|
||||
int hlen;
|
||||
|
||||
hlen = skb_is_gso(skb) ? skb_checksum_start_offset(skb) +
|
||||
tcp_hdrlen(skb) : skb_headlen(skb);
|
||||
hlen = skb_is_gso(skb) ? skb_checksum_start_offset(skb) + tcp_hdrlen(skb) :
|
||||
min_t(int, GVE_GQ_TX_MIN_PKT_DESC_BYTES, skb->len);
|
||||
|
||||
pad_bytes = gve_tx_fifo_pad_alloc_one_frag(&tx->tx_fifo,
|
||||
hlen);
|
||||
|
@ -454,13 +454,11 @@ static int gve_tx_add_skb_copy(struct gve_priv *priv, struct gve_tx_ring *tx, st
|
|||
pkt_desc = &tx->desc[idx];
|
||||
|
||||
l4_hdr_offset = skb_checksum_start_offset(skb);
|
||||
/* If the skb is gso, then we want the tcp header in the first segment
|
||||
* otherwise we want the linear portion of the skb (which will contain
|
||||
* the checksum because skb->csum_start and skb->csum_offset are given
|
||||
* relative to skb->head) in the first segment.
|
||||
/* If the skb is gso, then we want the tcp header alone in the first segment
|
||||
* otherwise we want the minimum required by the gVNIC spec.
|
||||
*/
|
||||
hlen = is_gso ? l4_hdr_offset + tcp_hdrlen(skb) :
|
||||
skb_headlen(skb);
|
||||
min_t(int, GVE_GQ_TX_MIN_PKT_DESC_BYTES, skb->len);
|
||||
|
||||
info->skb = skb;
|
||||
/* We don't want to split the header, so if necessary, pad to the end
|
||||
|
|
|
@ -541,6 +541,21 @@ static void ice_vc_fdir_rem_prof_all(struct ice_vf *vf)
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_vc_fdir_reset_cnt_all - reset all FDIR counters for this VF FDIR
|
||||
* @fdir: pointer to the VF FDIR structure
|
||||
*/
|
||||
static void ice_vc_fdir_reset_cnt_all(struct ice_vf_fdir *fdir)
|
||||
{
|
||||
enum ice_fltr_ptype flow;
|
||||
|
||||
for (flow = ICE_FLTR_PTYPE_NONF_NONE;
|
||||
flow < ICE_FLTR_PTYPE_MAX; flow++) {
|
||||
fdir->fdir_fltr_cnt[flow][0] = 0;
|
||||
fdir->fdir_fltr_cnt[flow][1] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_vc_fdir_has_prof_conflict
|
||||
* @vf: pointer to the VF structure
|
||||
|
@ -1871,7 +1886,7 @@ int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg)
|
|||
v_ret = VIRTCHNL_STATUS_SUCCESS;
|
||||
stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
|
||||
dev_dbg(dev, "VF %d: set FDIR context failed\n", vf->vf_id);
|
||||
goto err_free_conf;
|
||||
goto err_rem_entry;
|
||||
}
|
||||
|
||||
ret = ice_vc_fdir_write_fltr(vf, conf, true, is_tun);
|
||||
|
@ -1880,15 +1895,16 @@ int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg)
|
|||
stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
|
||||
dev_err(dev, "VF %d: writing FDIR rule failed, ret:%d\n",
|
||||
vf->vf_id, ret);
|
||||
goto err_rem_entry;
|
||||
goto err_clr_irq;
|
||||
}
|
||||
|
||||
exit:
|
||||
kfree(stat);
|
||||
return ret;
|
||||
|
||||
err_rem_entry:
|
||||
err_clr_irq:
|
||||
ice_vc_fdir_clear_irq_ctx(vf);
|
||||
err_rem_entry:
|
||||
ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
|
||||
err_free_conf:
|
||||
devm_kfree(dev, conf);
|
||||
|
@ -1997,6 +2013,7 @@ void ice_vf_fdir_init(struct ice_vf *vf)
|
|||
spin_lock_init(&fdir->ctx_lock);
|
||||
fdir->ctx_irq.flags = 0;
|
||||
fdir->ctx_done.flags = 0;
|
||||
ice_vc_fdir_reset_cnt_all(fdir);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -753,6 +753,7 @@ static void mtk_mac_link_up(struct phylink_config *config,
|
|||
MAC_MCR_FORCE_RX_FC);
|
||||
|
||||
/* Configure speed */
|
||||
mac->speed = speed;
|
||||
switch (speed) {
|
||||
case SPEED_2500:
|
||||
case SPEED_1000:
|
||||
|
@ -3235,6 +3236,9 @@ found:
|
|||
if (dp->index >= MTK_QDMA_NUM_QUEUES)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
if (mac->speed > 0 && mac->speed <= s.base.speed)
|
||||
s.base.speed = 0;
|
||||
|
||||
mtk_set_queue_speed(eth, dp->index + 3, s.base.speed);
|
||||
|
||||
return NOTIFY_DONE;
|
||||
|
|
|
@ -251,7 +251,6 @@ static void intel_speed_mode_2500(struct net_device *ndev, void *intel_data)
|
|||
priv->plat->mdio_bus_data->xpcs_an_inband = false;
|
||||
} else {
|
||||
priv->plat->max_speed = 1000;
|
||||
priv->plat->mdio_bus_data->xpcs_an_inband = true;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1134,20 +1134,26 @@ static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
|
|||
static int stmmac_init_phy(struct net_device *dev)
|
||||
{
|
||||
struct stmmac_priv *priv = netdev_priv(dev);
|
||||
struct fwnode_handle *phy_fwnode;
|
||||
struct fwnode_handle *fwnode;
|
||||
int ret;
|
||||
|
||||
if (!phylink_expects_phy(priv->phylink))
|
||||
return 0;
|
||||
|
||||
fwnode = of_fwnode_handle(priv->plat->phylink_node);
|
||||
if (!fwnode)
|
||||
fwnode = dev_fwnode(priv->device);
|
||||
|
||||
if (fwnode)
|
||||
ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
|
||||
phy_fwnode = fwnode_get_phy_node(fwnode);
|
||||
else
|
||||
phy_fwnode = NULL;
|
||||
|
||||
/* Some DT bindings do not set-up the PHY handle. Let's try to
|
||||
* manually parse it
|
||||
*/
|
||||
if (!fwnode || ret) {
|
||||
if (!phy_fwnode || IS_ERR(phy_fwnode)) {
|
||||
int addr = priv->plat->phy_addr;
|
||||
struct phy_device *phydev;
|
||||
|
||||
|
@ -1163,6 +1169,9 @@ static int stmmac_init_phy(struct net_device *dev)
|
|||
}
|
||||
|
||||
ret = phylink_connect_phy(priv->phylink, phydev);
|
||||
} else {
|
||||
fwnode_handle_put(phy_fwnode);
|
||||
ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
|
||||
}
|
||||
|
||||
if (!priv->plat->pmt) {
|
||||
|
@ -6622,6 +6631,8 @@ int stmmac_xdp_open(struct net_device *dev)
|
|||
goto init_error;
|
||||
}
|
||||
|
||||
stmmac_reset_queues_param(priv);
|
||||
|
||||
/* DMA CSR Channel configuration */
|
||||
for (chan = 0; chan < dma_csr_ch; chan++) {
|
||||
stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
|
||||
|
@ -6948,7 +6959,7 @@ static void stmmac_napi_del(struct net_device *dev)
|
|||
int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
|
||||
{
|
||||
struct stmmac_priv *priv = netdev_priv(dev);
|
||||
int ret = 0;
|
||||
int ret = 0, i;
|
||||
|
||||
if (netif_running(dev))
|
||||
stmmac_release(dev);
|
||||
|
@ -6957,6 +6968,10 @@ int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
|
|||
|
||||
priv->plat->rx_queues_to_use = rx_cnt;
|
||||
priv->plat->tx_queues_to_use = tx_cnt;
|
||||
if (!netif_is_rxfh_configured(dev))
|
||||
for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
|
||||
priv->rss.table[i] = ethtool_rxfh_indir_default(i,
|
||||
rx_cnt);
|
||||
|
||||
stmmac_napi_add(dev);
|
||||
|
||||
|
|
|
@ -2926,7 +2926,8 @@ err_free_phylink:
|
|||
am65_cpsw_nuss_phylink_cleanup(common);
|
||||
am65_cpts_release(common->cpts);
|
||||
err_of_clear:
|
||||
of_platform_device_destroy(common->mdio_dev, NULL);
|
||||
if (common->mdio_dev)
|
||||
of_platform_device_destroy(common->mdio_dev, NULL);
|
||||
err_pm_clear:
|
||||
pm_runtime_put_sync(dev);
|
||||
pm_runtime_disable(dev);
|
||||
|
@ -2956,7 +2957,8 @@ static int am65_cpsw_nuss_remove(struct platform_device *pdev)
|
|||
am65_cpts_release(common->cpts);
|
||||
am65_cpsw_disable_serdes_phy(common);
|
||||
|
||||
of_platform_device_destroy(common->mdio_dev, NULL);
|
||||
if (common->mdio_dev)
|
||||
of_platform_device_destroy(common->mdio_dev, NULL);
|
||||
|
||||
pm_runtime_put_sync(&pdev->dev);
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
|
|
|
@ -1586,6 +1586,25 @@ void phylink_destroy(struct phylink *pl)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(phylink_destroy);
|
||||
|
||||
/**
|
||||
* phylink_expects_phy() - Determine if phylink expects a phy to be attached
|
||||
* @pl: a pointer to a &struct phylink returned from phylink_create()
|
||||
*
|
||||
* When using fixed-link mode, or in-band mode with 1000base-X or 2500base-X,
|
||||
* no PHY is needed.
|
||||
*
|
||||
* Returns true if phylink will be expecting a PHY.
|
||||
*/
|
||||
bool phylink_expects_phy(struct phylink *pl)
|
||||
{
|
||||
if (pl->cfg_link_an_mode == MLO_AN_FIXED ||
|
||||
(pl->cfg_link_an_mode == MLO_AN_INBAND &&
|
||||
phy_interface_mode_is_8023z(pl->link_config.interface)))
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(phylink_expects_phy);
|
||||
|
||||
static void phylink_phy_change(struct phy_device *phydev, bool up)
|
||||
{
|
||||
struct phylink *pl = phydev->phylink;
|
||||
|
|
|
@ -387,6 +387,10 @@ static const struct sfp_quirk sfp_quirks[] = {
|
|||
|
||||
SFP_QUIRK_F("HALNy", "HL-GSFP", sfp_fixup_halny_gsfp),
|
||||
|
||||
// HG MXPD-483II-F 2.5G supports 2500Base-X, but incorrectly reports
|
||||
// 2600MBd in their EERPOM
|
||||
SFP_QUIRK_M("HG GENUINE", "MXPD-483II", sfp_quirk_2500basex),
|
||||
|
||||
// Huawei MA5671A can operate at 2500base-X, but report 1.2GBd NRZ in
|
||||
// their EEPROM
|
||||
SFP_QUIRK("HUAWEI", "MA5671A", sfp_quirk_2500basex,
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
#include "pci.h"
|
||||
#include "pcic.h"
|
||||
|
||||
#define MHI_TIMEOUT_DEFAULT_MS 90000
|
||||
#define MHI_TIMEOUT_DEFAULT_MS 20000
|
||||
#define RDDM_DUMP_SIZE 0x420000
|
||||
|
||||
static struct mhi_channel_config ath11k_mhi_channels_qca6390[] = {
|
||||
|
|
|
@ -994,15 +994,34 @@ static const struct sdio_device_id brcmf_sdmmc_ids[] = {
|
|||
MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
|
||||
|
||||
|
||||
static void brcmf_sdiod_acpi_set_power_manageable(struct device *dev,
|
||||
int val)
|
||||
static void brcmf_sdiod_acpi_save_power_manageable(struct brcmf_sdio_dev *sdiodev)
|
||||
{
|
||||
#if IS_ENABLED(CONFIG_ACPI)
|
||||
struct acpi_device *adev;
|
||||
|
||||
adev = ACPI_COMPANION(dev);
|
||||
adev = ACPI_COMPANION(&sdiodev->func1->dev);
|
||||
if (adev)
|
||||
adev->flags.power_manageable = 0;
|
||||
sdiodev->func1_power_manageable = adev->flags.power_manageable;
|
||||
|
||||
adev = ACPI_COMPANION(&sdiodev->func2->dev);
|
||||
if (adev)
|
||||
sdiodev->func2_power_manageable = adev->flags.power_manageable;
|
||||
#endif
|
||||
}
|
||||
|
||||
static void brcmf_sdiod_acpi_set_power_manageable(struct brcmf_sdio_dev *sdiodev,
|
||||
int enable)
|
||||
{
|
||||
#if IS_ENABLED(CONFIG_ACPI)
|
||||
struct acpi_device *adev;
|
||||
|
||||
adev = ACPI_COMPANION(&sdiodev->func1->dev);
|
||||
if (adev)
|
||||
adev->flags.power_manageable = enable ? sdiodev->func1_power_manageable : 0;
|
||||
|
||||
adev = ACPI_COMPANION(&sdiodev->func2->dev);
|
||||
if (adev)
|
||||
adev->flags.power_manageable = enable ? sdiodev->func2_power_manageable : 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -1012,7 +1031,6 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
|
|||
int err;
|
||||
struct brcmf_sdio_dev *sdiodev;
|
||||
struct brcmf_bus *bus_if;
|
||||
struct device *dev;
|
||||
|
||||
brcmf_dbg(SDIO, "Enter\n");
|
||||
brcmf_dbg(SDIO, "Class=%x\n", func->class);
|
||||
|
@ -1020,14 +1038,9 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
|
|||
brcmf_dbg(SDIO, "sdio device ID: 0x%04x\n", func->device);
|
||||
brcmf_dbg(SDIO, "Function#: %d\n", func->num);
|
||||
|
||||
dev = &func->dev;
|
||||
|
||||
/* Set MMC_QUIRK_LENIENT_FN0 for this card */
|
||||
func->card->quirks |= MMC_QUIRK_LENIENT_FN0;
|
||||
|
||||
/* prohibit ACPI power management for this device */
|
||||
brcmf_sdiod_acpi_set_power_manageable(dev, 0);
|
||||
|
||||
/* Consume func num 1 but dont do anything with it. */
|
||||
if (func->num == 1)
|
||||
return 0;
|
||||
|
@ -1059,6 +1072,7 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
|
|||
dev_set_drvdata(&sdiodev->func1->dev, bus_if);
|
||||
sdiodev->dev = &sdiodev->func1->dev;
|
||||
|
||||
brcmf_sdiod_acpi_save_power_manageable(sdiodev);
|
||||
brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_DOWN);
|
||||
|
||||
brcmf_dbg(SDIO, "F2 found, calling brcmf_sdiod_probe...\n");
|
||||
|
@ -1124,6 +1138,8 @@ void brcmf_sdio_wowl_config(struct device *dev, bool enabled)
|
|||
|
||||
if (sdiodev->settings->bus.sdio.oob_irq_supported ||
|
||||
pm_caps & MMC_PM_WAKE_SDIO_IRQ) {
|
||||
/* Stop ACPI from turning off the device when wowl is enabled */
|
||||
brcmf_sdiod_acpi_set_power_manageable(sdiodev, !enabled);
|
||||
sdiodev->wowl_enabled = enabled;
|
||||
brcmf_dbg(SDIO, "Configuring WOWL, enabled=%d\n", enabled);
|
||||
return;
|
||||
|
|
|
@ -188,6 +188,8 @@ struct brcmf_sdio_dev {
|
|||
char nvram_name[BRCMF_FW_NAME_LEN];
|
||||
char clm_name[BRCMF_FW_NAME_LEN];
|
||||
bool wowl_enabled;
|
||||
bool func1_power_manageable;
|
||||
bool func2_power_manageable;
|
||||
enum brcmf_sdiod_state state;
|
||||
struct brcmf_sdiod_freezer *freezer;
|
||||
const struct firmware *clm_fw;
|
||||
|
|
|
@ -512,15 +512,15 @@ mt7603_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
|
|||
!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (cmd == SET_KEY) {
|
||||
key->hw_key_idx = wcid->idx;
|
||||
wcid->hw_key_idx = idx;
|
||||
} else {
|
||||
if (cmd != SET_KEY) {
|
||||
if (idx == wcid->hw_key_idx)
|
||||
wcid->hw_key_idx = -1;
|
||||
|
||||
key = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
key->hw_key_idx = wcid->idx;
|
||||
wcid->hw_key_idx = idx;
|
||||
mt76_wcid_key_setup(&dev->mt76, wcid, key);
|
||||
|
||||
return mt7603_wtbl_set_key(dev, wcid->idx, key);
|
||||
|
|
|
@ -1193,8 +1193,7 @@ EXPORT_SYMBOL_GPL(mt7615_mac_enable_rtscts);
|
|||
static int
|
||||
mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
|
||||
struct ieee80211_key_conf *key,
|
||||
enum mt76_cipher_type cipher, u16 cipher_mask,
|
||||
enum set_key_cmd cmd)
|
||||
enum mt76_cipher_type cipher, u16 cipher_mask)
|
||||
{
|
||||
u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx) + 30 * 4;
|
||||
u8 data[32] = {};
|
||||
|
@ -1203,27 +1202,18 @@ mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
|
|||
return -EINVAL;
|
||||
|
||||
mt76_rr_copy(dev, addr, data, sizeof(data));
|
||||
if (cmd == SET_KEY) {
|
||||
if (cipher == MT_CIPHER_TKIP) {
|
||||
/* Rx/Tx MIC keys are swapped */
|
||||
memcpy(data, key->key, 16);
|
||||
memcpy(data + 16, key->key + 24, 8);
|
||||
memcpy(data + 24, key->key + 16, 8);
|
||||
} else {
|
||||
if (cipher_mask == BIT(cipher))
|
||||
memcpy(data, key->key, key->keylen);
|
||||
else if (cipher != MT_CIPHER_BIP_CMAC_128)
|
||||
memcpy(data, key->key, 16);
|
||||
if (cipher == MT_CIPHER_BIP_CMAC_128)
|
||||
memcpy(data + 16, key->key, 16);
|
||||
}
|
||||
if (cipher == MT_CIPHER_TKIP) {
|
||||
/* Rx/Tx MIC keys are swapped */
|
||||
memcpy(data, key->key, 16);
|
||||
memcpy(data + 16, key->key + 24, 8);
|
||||
memcpy(data + 24, key->key + 16, 8);
|
||||
} else {
|
||||
if (cipher_mask == BIT(cipher))
|
||||
memcpy(data, key->key, key->keylen);
|
||||
else if (cipher != MT_CIPHER_BIP_CMAC_128)
|
||||
memcpy(data, key->key, 16);
|
||||
if (cipher == MT_CIPHER_BIP_CMAC_128)
|
||||
memset(data + 16, 0, 16);
|
||||
else if (cipher_mask)
|
||||
memset(data, 0, 16);
|
||||
if (!cipher_mask)
|
||||
memset(data, 0, sizeof(data));
|
||||
memcpy(data + 16, key->key, 16);
|
||||
}
|
||||
|
||||
mt76_wr_copy(dev, addr, data, sizeof(data));
|
||||
|
@ -1234,7 +1224,7 @@ mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
|
|||
static int
|
||||
mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid,
|
||||
enum mt76_cipher_type cipher, u16 cipher_mask,
|
||||
int keyidx, enum set_key_cmd cmd)
|
||||
int keyidx)
|
||||
{
|
||||
u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx), w0, w1;
|
||||
|
||||
|
@ -1253,9 +1243,7 @@ mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid,
|
|||
else
|
||||
w0 &= ~MT_WTBL_W0_RX_IK_VALID;
|
||||
|
||||
if (cmd == SET_KEY &&
|
||||
(cipher != MT_CIPHER_BIP_CMAC_128 ||
|
||||
cipher_mask == BIT(cipher))) {
|
||||
if (cipher != MT_CIPHER_BIP_CMAC_128 || cipher_mask == BIT(cipher)) {
|
||||
w0 &= ~MT_WTBL_W0_KEY_IDX;
|
||||
w0 |= FIELD_PREP(MT_WTBL_W0_KEY_IDX, keyidx);
|
||||
}
|
||||
|
@ -1272,19 +1260,10 @@ mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid,
|
|||
|
||||
static void
|
||||
mt7615_mac_wtbl_update_cipher(struct mt7615_dev *dev, struct mt76_wcid *wcid,
|
||||
enum mt76_cipher_type cipher, u16 cipher_mask,
|
||||
enum set_key_cmd cmd)
|
||||
enum mt76_cipher_type cipher, u16 cipher_mask)
|
||||
{
|
||||
u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx);
|
||||
|
||||
if (!cipher_mask) {
|
||||
mt76_clear(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE);
|
||||
return;
|
||||
}
|
||||
|
||||
if (cmd != SET_KEY)
|
||||
return;
|
||||
|
||||
if (cipher == MT_CIPHER_BIP_CMAC_128 &&
|
||||
cipher_mask & ~BIT(MT_CIPHER_BIP_CMAC_128))
|
||||
return;
|
||||
|
@ -1295,8 +1274,7 @@ mt7615_mac_wtbl_update_cipher(struct mt7615_dev *dev, struct mt76_wcid *wcid,
|
|||
|
||||
int __mt7615_mac_wtbl_set_key(struct mt7615_dev *dev,
|
||||
struct mt76_wcid *wcid,
|
||||
struct ieee80211_key_conf *key,
|
||||
enum set_key_cmd cmd)
|
||||
struct ieee80211_key_conf *key)
|
||||
{
|
||||
enum mt76_cipher_type cipher;
|
||||
u16 cipher_mask = wcid->cipher;
|
||||
|
@ -1306,19 +1284,14 @@ int __mt7615_mac_wtbl_set_key(struct mt7615_dev *dev,
|
|||
if (cipher == MT_CIPHER_NONE)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (cmd == SET_KEY)
|
||||
cipher_mask |= BIT(cipher);
|
||||
else
|
||||
cipher_mask &= ~BIT(cipher);
|
||||
|
||||
mt7615_mac_wtbl_update_cipher(dev, wcid, cipher, cipher_mask, cmd);
|
||||
err = mt7615_mac_wtbl_update_key(dev, wcid, key, cipher, cipher_mask,
|
||||
cmd);
|
||||
cipher_mask |= BIT(cipher);
|
||||
mt7615_mac_wtbl_update_cipher(dev, wcid, cipher, cipher_mask);
|
||||
err = mt7615_mac_wtbl_update_key(dev, wcid, key, cipher, cipher_mask);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
err = mt7615_mac_wtbl_update_pk(dev, wcid, cipher, cipher_mask,
|
||||
key->keyidx, cmd);
|
||||
key->keyidx);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
|
@ -1329,13 +1302,12 @@ int __mt7615_mac_wtbl_set_key(struct mt7615_dev *dev,
|
|||
|
||||
int mt7615_mac_wtbl_set_key(struct mt7615_dev *dev,
|
||||
struct mt76_wcid *wcid,
|
||||
struct ieee80211_key_conf *key,
|
||||
enum set_key_cmd cmd)
|
||||
struct ieee80211_key_conf *key)
|
||||
{
|
||||
int err;
|
||||
|
||||
spin_lock_bh(&dev->mt76.lock);
|
||||
err = __mt7615_mac_wtbl_set_key(dev, wcid, key, cmd);
|
||||
err = __mt7615_mac_wtbl_set_key(dev, wcid, key);
|
||||
spin_unlock_bh(&dev->mt76.lock);
|
||||
|
||||
return err;
|
||||
|
|
|
@ -391,18 +391,17 @@ static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
|
|||
|
||||
if (cmd == SET_KEY)
|
||||
*wcid_keyidx = idx;
|
||||
else if (idx == *wcid_keyidx)
|
||||
*wcid_keyidx = -1;
|
||||
else
|
||||
else {
|
||||
if (idx == *wcid_keyidx)
|
||||
*wcid_keyidx = -1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
mt76_wcid_key_setup(&dev->mt76, wcid,
|
||||
cmd == SET_KEY ? key : NULL);
|
||||
|
||||
mt76_wcid_key_setup(&dev->mt76, wcid, key);
|
||||
if (mt76_is_mmio(&dev->mt76))
|
||||
err = mt7615_mac_wtbl_set_key(dev, wcid, key, cmd);
|
||||
err = mt7615_mac_wtbl_set_key(dev, wcid, key);
|
||||
else
|
||||
err = __mt7615_mac_wtbl_set_key(dev, wcid, key, cmd);
|
||||
err = __mt7615_mac_wtbl_set_key(dev, wcid, key);
|
||||
|
||||
out:
|
||||
mt7615_mutex_release(dev);
|
||||
|
|
|
@ -490,11 +490,9 @@ int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
|
|||
void mt7615_mac_set_timing(struct mt7615_phy *phy);
|
||||
int __mt7615_mac_wtbl_set_key(struct mt7615_dev *dev,
|
||||
struct mt76_wcid *wcid,
|
||||
struct ieee80211_key_conf *key,
|
||||
enum set_key_cmd cmd);
|
||||
struct ieee80211_key_conf *key);
|
||||
int mt7615_mac_wtbl_set_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
|
||||
struct ieee80211_key_conf *key,
|
||||
enum set_key_cmd cmd);
|
||||
struct ieee80211_key_conf *key);
|
||||
void mt7615_mac_reset_work(struct work_struct *work);
|
||||
u32 mt7615_mac_get_sta_tid_sn(struct mt7615_dev *dev, int wcid, u8 tid);
|
||||
|
||||
|
|
|
@ -454,20 +454,20 @@ int mt76x02_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
|
|||
msta = sta ? (struct mt76x02_sta *)sta->drv_priv : NULL;
|
||||
wcid = msta ? &msta->wcid : &mvif->group_wcid;
|
||||
|
||||
if (cmd == SET_KEY) {
|
||||
key->hw_key_idx = wcid->idx;
|
||||
wcid->hw_key_idx = idx;
|
||||
if (key->flags & IEEE80211_KEY_FLAG_RX_MGMT) {
|
||||
key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
|
||||
wcid->sw_iv = true;
|
||||
}
|
||||
} else {
|
||||
if (cmd != SET_KEY) {
|
||||
if (idx == wcid->hw_key_idx) {
|
||||
wcid->hw_key_idx = -1;
|
||||
wcid->sw_iv = false;
|
||||
}
|
||||
|
||||
key = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
key->hw_key_idx = wcid->idx;
|
||||
wcid->hw_key_idx = idx;
|
||||
if (key->flags & IEEE80211_KEY_FLAG_RX_MGMT) {
|
||||
key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
|
||||
wcid->sw_iv = true;
|
||||
}
|
||||
mt76_wcid_key_setup(&dev->mt76, wcid, key);
|
||||
|
||||
|
|
|
@ -410,16 +410,15 @@ static int mt7915_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
|
|||
mt7915_mcu_add_bss_info(phy, vif, true);
|
||||
}
|
||||
|
||||
if (cmd == SET_KEY)
|
||||
if (cmd == SET_KEY) {
|
||||
*wcid_keyidx = idx;
|
||||
else if (idx == *wcid_keyidx)
|
||||
*wcid_keyidx = -1;
|
||||
else
|
||||
} else {
|
||||
if (idx == *wcid_keyidx)
|
||||
*wcid_keyidx = -1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
mt76_wcid_key_setup(&dev->mt76, wcid,
|
||||
cmd == SET_KEY ? key : NULL);
|
||||
|
||||
mt76_wcid_key_setup(&dev->mt76, wcid, key);
|
||||
err = mt76_connac_mcu_add_key(&dev->mt76, vif, &msta->bip,
|
||||
key, MCU_EXT_CMD(STA_REC_UPDATE),
|
||||
&msta->wcid, cmd);
|
||||
|
|
|
@ -171,12 +171,12 @@ mt7921_mac_init_band(struct mt7921_dev *dev, u8 band)
|
|||
|
||||
u8 mt7921_check_offload_capability(struct device *dev, const char *fw_wm)
|
||||
{
|
||||
struct mt7921_fw_features *features = NULL;
|
||||
const struct mt76_connac2_fw_trailer *hdr;
|
||||
struct mt7921_realease_info *rel_info;
|
||||
const struct firmware *fw;
|
||||
int ret, i, offset = 0;
|
||||
const u8 *data, *end;
|
||||
u8 offload_caps = 0;
|
||||
|
||||
ret = request_firmware(&fw, fw_wm, dev);
|
||||
if (ret)
|
||||
|
@ -208,7 +208,10 @@ u8 mt7921_check_offload_capability(struct device *dev, const char *fw_wm)
|
|||
data += sizeof(*rel_info);
|
||||
|
||||
if (rel_info->tag == MT7921_FW_TAG_FEATURE) {
|
||||
struct mt7921_fw_features *features;
|
||||
|
||||
features = (struct mt7921_fw_features *)data;
|
||||
offload_caps = features->data;
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -218,7 +221,7 @@ u8 mt7921_check_offload_capability(struct device *dev, const char *fw_wm)
|
|||
out:
|
||||
release_firmware(fw);
|
||||
|
||||
return features ? features->data : 0;
|
||||
return offload_caps;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mt7921_check_offload_capability);
|
||||
|
||||
|
|
|
@ -569,16 +569,15 @@ static int mt7921_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
|
|||
|
||||
mt7921_mutex_acquire(dev);
|
||||
|
||||
if (cmd == SET_KEY)
|
||||
if (cmd == SET_KEY) {
|
||||
*wcid_keyidx = idx;
|
||||
else if (idx == *wcid_keyidx)
|
||||
*wcid_keyidx = -1;
|
||||
else
|
||||
} else {
|
||||
if (idx == *wcid_keyidx)
|
||||
*wcid_keyidx = -1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
mt76_wcid_key_setup(&dev->mt76, wcid,
|
||||
cmd == SET_KEY ? key : NULL);
|
||||
|
||||
mt76_wcid_key_setup(&dev->mt76, wcid, key);
|
||||
err = mt76_connac_mcu_add_key(&dev->mt76, vif, &msta->bip,
|
||||
key, MCU_UNI_CMD(STA_REC_UPDATE),
|
||||
&msta->wcid, cmd);
|
||||
|
|
|
@ -20,7 +20,7 @@ static const struct pci_device_id mt7921_pci_device_table[] = {
|
|||
{ PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x0608),
|
||||
.driver_data = (kernel_ulong_t)MT7921_FIRMWARE_WM },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x0616),
|
||||
.driver_data = (kernel_ulong_t)MT7921_FIRMWARE_WM },
|
||||
.driver_data = (kernel_ulong_t)MT7922_FIRMWARE_WM },
|
||||
{ },
|
||||
};
|
||||
|
||||
|
|
|
@ -351,16 +351,15 @@ static int mt7996_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
|
|||
mt7996_mcu_add_bss_info(phy, vif, true);
|
||||
}
|
||||
|
||||
if (cmd == SET_KEY)
|
||||
if (cmd == SET_KEY) {
|
||||
*wcid_keyidx = idx;
|
||||
else if (idx == *wcid_keyidx)
|
||||
*wcid_keyidx = -1;
|
||||
else
|
||||
} else {
|
||||
if (idx == *wcid_keyidx)
|
||||
*wcid_keyidx = -1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
mt76_wcid_key_setup(&dev->mt76, wcid,
|
||||
cmd == SET_KEY ? key : NULL);
|
||||
|
||||
mt76_wcid_key_setup(&dev->mt76, wcid, key);
|
||||
err = mt7996_mcu_add_key(&dev->mt76, vif, &msta->bip,
|
||||
key, MCU_WMWA_UNI_CMD(STA_REC_UPDATE),
|
||||
&msta->wcid, cmd);
|
||||
|
|
|
@ -1,7 +1,5 @@
|
|||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
|
||||
ccflags-y += -Werror
|
||||
|
||||
obj-${CONFIG_MTK_T7XX} := mtk_t7xx.o
|
||||
mtk_t7xx-y:= t7xx_pci.o \
|
||||
t7xx_pcie_mac.o \
|
||||
|
|
|
@ -574,6 +574,7 @@ struct phylink *phylink_create(struct phylink_config *, struct fwnode_handle *,
|
|||
phy_interface_t iface,
|
||||
const struct phylink_mac_ops *mac_ops);
|
||||
void phylink_destroy(struct phylink *);
|
||||
bool phylink_expects_phy(struct phylink *pl);
|
||||
|
||||
int phylink_connect_phy(struct phylink *, struct phy_device *);
|
||||
int phylink_of_phy_connect(struct phylink *, struct device_node *, u32 flags);
|
||||
|
|
|
@ -37,7 +37,7 @@ int raw_rcv(struct sock *, struct sk_buff *);
|
|||
struct raw_hashinfo {
|
||||
spinlock_t lock;
|
||||
|
||||
struct hlist_nulls_head ht[RAW_HTABLE_SIZE] ____cacheline_aligned;
|
||||
struct hlist_head ht[RAW_HTABLE_SIZE] ____cacheline_aligned;
|
||||
};
|
||||
|
||||
static inline u32 raw_hashfunc(const struct net *net, u32 proto)
|
||||
|
@ -51,7 +51,7 @@ static inline void raw_hashinfo_init(struct raw_hashinfo *hashinfo)
|
|||
|
||||
spin_lock_init(&hashinfo->lock);
|
||||
for (i = 0; i < RAW_HTABLE_SIZE; i++)
|
||||
INIT_HLIST_NULLS_HEAD(&hashinfo->ht[i], i);
|
||||
INIT_HLIST_HEAD(&hashinfo->ht[i]);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
|
|
|
@ -119,7 +119,8 @@ enum {
|
|||
ISOTP_WAIT_FIRST_FC,
|
||||
ISOTP_WAIT_FC,
|
||||
ISOTP_WAIT_DATA,
|
||||
ISOTP_SENDING
|
||||
ISOTP_SENDING,
|
||||
ISOTP_SHUTDOWN,
|
||||
};
|
||||
|
||||
struct tpcon {
|
||||
|
@ -880,8 +881,8 @@ static enum hrtimer_restart isotp_tx_timer_handler(struct hrtimer *hrtimer)
|
|||
txtimer);
|
||||
struct sock *sk = &so->sk;
|
||||
|
||||
/* don't handle timeouts in IDLE state */
|
||||
if (so->tx.state == ISOTP_IDLE)
|
||||
/* don't handle timeouts in IDLE or SHUTDOWN state */
|
||||
if (so->tx.state == ISOTP_IDLE || so->tx.state == ISOTP_SHUTDOWN)
|
||||
return HRTIMER_NORESTART;
|
||||
|
||||
/* we did not get any flow control or echo frame in time */
|
||||
|
@ -918,7 +919,6 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
|
|||
{
|
||||
struct sock *sk = sock->sk;
|
||||
struct isotp_sock *so = isotp_sk(sk);
|
||||
u32 old_state = so->tx.state;
|
||||
struct sk_buff *skb;
|
||||
struct net_device *dev;
|
||||
struct canfd_frame *cf;
|
||||
|
@ -928,23 +928,24 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
|
|||
int off;
|
||||
int err;
|
||||
|
||||
if (!so->bound)
|
||||
if (!so->bound || so->tx.state == ISOTP_SHUTDOWN)
|
||||
return -EADDRNOTAVAIL;
|
||||
|
||||
wait_free_buffer:
|
||||
/* we do not support multiple buffers - for now */
|
||||
if (cmpxchg(&so->tx.state, ISOTP_IDLE, ISOTP_SENDING) != ISOTP_IDLE ||
|
||||
wq_has_sleeper(&so->wait)) {
|
||||
if (msg->msg_flags & MSG_DONTWAIT) {
|
||||
err = -EAGAIN;
|
||||
goto err_out;
|
||||
}
|
||||
if (wq_has_sleeper(&so->wait) && (msg->msg_flags & MSG_DONTWAIT))
|
||||
return -EAGAIN;
|
||||
|
||||
/* wait for complete transmission of current pdu */
|
||||
err = wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
|
||||
if (err)
|
||||
goto err_out;
|
||||
/* wait for complete transmission of current pdu */
|
||||
err = wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
|
||||
if (err)
|
||||
goto err_event_drop;
|
||||
|
||||
so->tx.state = ISOTP_SENDING;
|
||||
if (cmpxchg(&so->tx.state, ISOTP_IDLE, ISOTP_SENDING) != ISOTP_IDLE) {
|
||||
if (so->tx.state == ISOTP_SHUTDOWN)
|
||||
return -EADDRNOTAVAIL;
|
||||
|
||||
goto wait_free_buffer;
|
||||
}
|
||||
|
||||
if (!size || size > MAX_MSG_LENGTH) {
|
||||
|
@ -1074,7 +1075,9 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
|
|||
|
||||
if (wait_tx_done) {
|
||||
/* wait for complete transmission of current pdu */
|
||||
wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
|
||||
err = wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
|
||||
if (err)
|
||||
goto err_event_drop;
|
||||
|
||||
if (sk->sk_err)
|
||||
return -sk->sk_err;
|
||||
|
@ -1082,13 +1085,15 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
|
|||
|
||||
return size;
|
||||
|
||||
err_event_drop:
|
||||
/* got signal: force tx state machine to be idle */
|
||||
so->tx.state = ISOTP_IDLE;
|
||||
hrtimer_cancel(&so->txfrtimer);
|
||||
hrtimer_cancel(&so->txtimer);
|
||||
err_out_drop:
|
||||
/* drop this PDU and unlock a potential wait queue */
|
||||
old_state = ISOTP_IDLE;
|
||||
err_out:
|
||||
so->tx.state = old_state;
|
||||
if (so->tx.state == ISOTP_IDLE)
|
||||
wake_up_interruptible(&so->wait);
|
||||
so->tx.state = ISOTP_IDLE;
|
||||
wake_up_interruptible(&so->wait);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -1120,7 +1125,7 @@ static int isotp_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
|
|||
if (ret < 0)
|
||||
goto out_err;
|
||||
|
||||
sock_recv_timestamp(msg, sk, skb);
|
||||
sock_recv_cmsgs(msg, sk, skb);
|
||||
|
||||
if (msg->msg_name) {
|
||||
__sockaddr_check_size(ISOTP_MIN_NAMELEN);
|
||||
|
@ -1150,10 +1155,12 @@ static int isotp_release(struct socket *sock)
|
|||
net = sock_net(sk);
|
||||
|
||||
/* wait for complete transmission of current pdu */
|
||||
wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
|
||||
while (wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE) == 0 &&
|
||||
cmpxchg(&so->tx.state, ISOTP_IDLE, ISOTP_SHUTDOWN) != ISOTP_IDLE)
|
||||
;
|
||||
|
||||
/* force state machines to be idle also when a signal occurred */
|
||||
so->tx.state = ISOTP_IDLE;
|
||||
so->tx.state = ISOTP_SHUTDOWN;
|
||||
so->rx.state = ISOTP_IDLE;
|
||||
|
||||
spin_lock(&isotp_notifier_lock);
|
||||
|
@ -1608,6 +1615,21 @@ static int isotp_init(struct sock *sk)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static __poll_t isotp_poll(struct file *file, struct socket *sock, poll_table *wait)
|
||||
{
|
||||
struct sock *sk = sock->sk;
|
||||
struct isotp_sock *so = isotp_sk(sk);
|
||||
|
||||
__poll_t mask = datagram_poll(file, sock, wait);
|
||||
poll_wait(file, &so->wait, wait);
|
||||
|
||||
/* Check for false positives due to TX state */
|
||||
if ((mask & EPOLLWRNORM) && (so->tx.state != ISOTP_IDLE))
|
||||
mask &= ~(EPOLLOUT | EPOLLWRNORM);
|
||||
|
||||
return mask;
|
||||
}
|
||||
|
||||
static int isotp_sock_no_ioctlcmd(struct socket *sock, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
|
@ -1623,7 +1645,7 @@ static const struct proto_ops isotp_ops = {
|
|||
.socketpair = sock_no_socketpair,
|
||||
.accept = sock_no_accept,
|
||||
.getname = isotp_getname,
|
||||
.poll = datagram_poll,
|
||||
.poll = isotp_poll,
|
||||
.ioctl = isotp_sock_no_ioctlcmd,
|
||||
.gettstamp = sock_gettstamp,
|
||||
.listen = sock_no_listen,
|
||||
|
|
|
@ -604,7 +604,10 @@ sk_buff *j1939_tp_tx_dat_new(struct j1939_priv *priv,
|
|||
/* reserve CAN header */
|
||||
skb_reserve(skb, offsetof(struct can_frame, data));
|
||||
|
||||
memcpy(skb->cb, re_skcb, sizeof(skb->cb));
|
||||
/* skb->cb must be large enough to hold a j1939_sk_buff_cb structure */
|
||||
BUILD_BUG_ON(sizeof(skb->cb) < sizeof(*re_skcb));
|
||||
|
||||
memcpy(skb->cb, re_skcb, sizeof(*re_skcb));
|
||||
skcb = j1939_skb_to_cb(skb);
|
||||
if (swap_src_dst)
|
||||
j1939_skbcb_swap(skcb);
|
||||
|
|
|
@ -137,6 +137,20 @@ static void queue_process(struct work_struct *work)
|
|||
}
|
||||
}
|
||||
|
||||
static int netif_local_xmit_active(struct net_device *dev)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < dev->num_tx_queues; i++) {
|
||||
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
|
||||
|
||||
if (READ_ONCE(txq->xmit_lock_owner) == smp_processor_id())
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void poll_one_napi(struct napi_struct *napi)
|
||||
{
|
||||
int work;
|
||||
|
@ -183,7 +197,10 @@ void netpoll_poll_dev(struct net_device *dev)
|
|||
if (!ni || down_trylock(&ni->dev_lock))
|
||||
return;
|
||||
|
||||
if (!netif_running(dev)) {
|
||||
/* Some drivers will take the same locks in poll and xmit,
|
||||
* we can't poll if local CPU is already in xmit.
|
||||
*/
|
||||
if (!netif_running(dev) || netif_local_xmit_active(dev)) {
|
||||
up(&ni->dev_lock);
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -270,11 +270,12 @@ static int ethnl_update_linkmodes(struct genl_info *info, struct nlattr **tb,
|
|||
"lanes configuration not supported by device");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
} else if (!lsettings->autoneg) {
|
||||
/* If autoneg is off and lanes parameter is not passed from user,
|
||||
* set the lanes parameter to 0.
|
||||
} else if (!lsettings->autoneg && ksettings->lanes) {
|
||||
/* If autoneg is off and lanes parameter is not passed from user but
|
||||
* it was defined previously then set the lanes parameter to 0.
|
||||
*/
|
||||
ksettings->lanes = 0;
|
||||
*mod = true;
|
||||
}
|
||||
|
||||
ret = ethnl_update_bitset(ksettings->link_modes.advertising,
|
||||
|
|
|
@ -749,6 +749,11 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
|
|||
room = 576;
|
||||
room -= sizeof(struct iphdr) + icmp_param.replyopts.opt.opt.optlen;
|
||||
room -= sizeof(struct icmphdr);
|
||||
/* Guard against tiny mtu. We need to include at least one
|
||||
* IP network header for this message to make any sense.
|
||||
*/
|
||||
if (room <= (int)sizeof(struct iphdr))
|
||||
goto ende;
|
||||
|
||||
icmp_param.data_len = skb_in->len - icmp_param.offset;
|
||||
if (icmp_param.data_len > room)
|
||||
|
|
|
@ -1089,13 +1089,13 @@ static struct sock *ping_get_idx(struct seq_file *seq, loff_t pos)
|
|||
}
|
||||
|
||||
void *ping_seq_start(struct seq_file *seq, loff_t *pos, sa_family_t family)
|
||||
__acquires(RCU)
|
||||
__acquires(ping_table.lock)
|
||||
{
|
||||
struct ping_iter_state *state = seq->private;
|
||||
state->bucket = 0;
|
||||
state->family = family;
|
||||
|
||||
rcu_read_lock();
|
||||
spin_lock(&ping_table.lock);
|
||||
|
||||
return *pos ? ping_get_idx(seq, *pos-1) : SEQ_START_TOKEN;
|
||||
}
|
||||
|
@ -1121,9 +1121,9 @@ void *ping_seq_next(struct seq_file *seq, void *v, loff_t *pos)
|
|||
EXPORT_SYMBOL_GPL(ping_seq_next);
|
||||
|
||||
void ping_seq_stop(struct seq_file *seq, void *v)
|
||||
__releases(RCU)
|
||||
__releases(ping_table.lock)
|
||||
{
|
||||
rcu_read_unlock();
|
||||
spin_unlock(&ping_table.lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ping_seq_stop);
|
||||
|
||||
|
|
|
@ -91,12 +91,12 @@ EXPORT_SYMBOL_GPL(raw_v4_hashinfo);
|
|||
int raw_hash_sk(struct sock *sk)
|
||||
{
|
||||
struct raw_hashinfo *h = sk->sk_prot->h.raw_hash;
|
||||
struct hlist_nulls_head *hlist;
|
||||
struct hlist_head *hlist;
|
||||
|
||||
hlist = &h->ht[raw_hashfunc(sock_net(sk), inet_sk(sk)->inet_num)];
|
||||
|
||||
spin_lock(&h->lock);
|
||||
__sk_nulls_add_node_rcu(sk, hlist);
|
||||
sk_add_node_rcu(sk, hlist);
|
||||
sock_set_flag(sk, SOCK_RCU_FREE);
|
||||
spin_unlock(&h->lock);
|
||||
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
|
||||
|
@ -110,7 +110,7 @@ void raw_unhash_sk(struct sock *sk)
|
|||
struct raw_hashinfo *h = sk->sk_prot->h.raw_hash;
|
||||
|
||||
spin_lock(&h->lock);
|
||||
if (__sk_nulls_del_node_init_rcu(sk))
|
||||
if (sk_del_node_init_rcu(sk))
|
||||
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
|
||||
spin_unlock(&h->lock);
|
||||
}
|
||||
|
@ -163,16 +163,15 @@ static int icmp_filter(const struct sock *sk, const struct sk_buff *skb)
|
|||
static int raw_v4_input(struct net *net, struct sk_buff *skb,
|
||||
const struct iphdr *iph, int hash)
|
||||
{
|
||||
struct hlist_nulls_head *hlist;
|
||||
struct hlist_nulls_node *hnode;
|
||||
int sdif = inet_sdif(skb);
|
||||
struct hlist_head *hlist;
|
||||
int dif = inet_iif(skb);
|
||||
int delivered = 0;
|
||||
struct sock *sk;
|
||||
|
||||
hlist = &raw_v4_hashinfo.ht[hash];
|
||||
rcu_read_lock();
|
||||
sk_nulls_for_each(sk, hnode, hlist) {
|
||||
sk_for_each_rcu(sk, hlist) {
|
||||
if (!raw_v4_match(net, sk, iph->protocol,
|
||||
iph->saddr, iph->daddr, dif, sdif))
|
||||
continue;
|
||||
|
@ -264,10 +263,9 @@ static void raw_err(struct sock *sk, struct sk_buff *skb, u32 info)
|
|||
void raw_icmp_error(struct sk_buff *skb, int protocol, u32 info)
|
||||
{
|
||||
struct net *net = dev_net(skb->dev);
|
||||
struct hlist_nulls_head *hlist;
|
||||
struct hlist_nulls_node *hnode;
|
||||
int dif = skb->dev->ifindex;
|
||||
int sdif = inet_sdif(skb);
|
||||
struct hlist_head *hlist;
|
||||
const struct iphdr *iph;
|
||||
struct sock *sk;
|
||||
int hash;
|
||||
|
@ -276,7 +274,7 @@ void raw_icmp_error(struct sk_buff *skb, int protocol, u32 info)
|
|||
hlist = &raw_v4_hashinfo.ht[hash];
|
||||
|
||||
rcu_read_lock();
|
||||
sk_nulls_for_each(sk, hnode, hlist) {
|
||||
sk_for_each_rcu(sk, hlist) {
|
||||
iph = (const struct iphdr *)skb->data;
|
||||
if (!raw_v4_match(net, sk, iph->protocol,
|
||||
iph->daddr, iph->saddr, dif, sdif))
|
||||
|
@ -950,14 +948,13 @@ static struct sock *raw_get_first(struct seq_file *seq, int bucket)
|
|||
{
|
||||
struct raw_hashinfo *h = pde_data(file_inode(seq->file));
|
||||
struct raw_iter_state *state = raw_seq_private(seq);
|
||||
struct hlist_nulls_head *hlist;
|
||||
struct hlist_nulls_node *hnode;
|
||||
struct hlist_head *hlist;
|
||||
struct sock *sk;
|
||||
|
||||
for (state->bucket = bucket; state->bucket < RAW_HTABLE_SIZE;
|
||||
++state->bucket) {
|
||||
hlist = &h->ht[state->bucket];
|
||||
sk_nulls_for_each(sk, hnode, hlist) {
|
||||
sk_for_each(sk, hlist) {
|
||||
if (sock_net(sk) == seq_file_net(seq))
|
||||
return sk;
|
||||
}
|
||||
|
@ -970,7 +967,7 @@ static struct sock *raw_get_next(struct seq_file *seq, struct sock *sk)
|
|||
struct raw_iter_state *state = raw_seq_private(seq);
|
||||
|
||||
do {
|
||||
sk = sk_nulls_next(sk);
|
||||
sk = sk_next(sk);
|
||||
} while (sk && sock_net(sk) != seq_file_net(seq));
|
||||
|
||||
if (!sk)
|
||||
|
@ -989,9 +986,12 @@ static struct sock *raw_get_idx(struct seq_file *seq, loff_t pos)
|
|||
}
|
||||
|
||||
void *raw_seq_start(struct seq_file *seq, loff_t *pos)
|
||||
__acquires(RCU)
|
||||
__acquires(&h->lock)
|
||||
{
|
||||
rcu_read_lock();
|
||||
struct raw_hashinfo *h = pde_data(file_inode(seq->file));
|
||||
|
||||
spin_lock(&h->lock);
|
||||
|
||||
return *pos ? raw_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(raw_seq_start);
|
||||
|
@ -1010,9 +1010,11 @@ void *raw_seq_next(struct seq_file *seq, void *v, loff_t *pos)
|
|||
EXPORT_SYMBOL_GPL(raw_seq_next);
|
||||
|
||||
void raw_seq_stop(struct seq_file *seq, void *v)
|
||||
__releases(RCU)
|
||||
__releases(&h->lock)
|
||||
{
|
||||
rcu_read_unlock();
|
||||
struct raw_hashinfo *h = pde_data(file_inode(seq->file));
|
||||
|
||||
spin_unlock(&h->lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(raw_seq_stop);
|
||||
|
||||
|
|
|
@ -57,8 +57,7 @@ static bool raw_lookup(struct net *net, struct sock *sk,
|
|||
static struct sock *raw_sock_get(struct net *net, const struct inet_diag_req_v2 *r)
|
||||
{
|
||||
struct raw_hashinfo *hashinfo = raw_get_hashinfo(r);
|
||||
struct hlist_nulls_head *hlist;
|
||||
struct hlist_nulls_node *hnode;
|
||||
struct hlist_head *hlist;
|
||||
struct sock *sk;
|
||||
int slot;
|
||||
|
||||
|
@ -68,7 +67,7 @@ static struct sock *raw_sock_get(struct net *net, const struct inet_diag_req_v2
|
|||
rcu_read_lock();
|
||||
for (slot = 0; slot < RAW_HTABLE_SIZE; slot++) {
|
||||
hlist = &hashinfo->ht[slot];
|
||||
sk_nulls_for_each(sk, hnode, hlist) {
|
||||
sk_for_each_rcu(sk, hlist) {
|
||||
if (raw_lookup(net, sk, r)) {
|
||||
/*
|
||||
* Grab it and keep until we fill
|
||||
|
@ -142,9 +141,8 @@ static void raw_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
|
|||
struct raw_hashinfo *hashinfo = raw_get_hashinfo(r);
|
||||
struct net *net = sock_net(skb->sk);
|
||||
struct inet_diag_dump_data *cb_data;
|
||||
struct hlist_nulls_head *hlist;
|
||||
struct hlist_nulls_node *hnode;
|
||||
int num, s_num, slot, s_slot;
|
||||
struct hlist_head *hlist;
|
||||
struct sock *sk = NULL;
|
||||
struct nlattr *bc;
|
||||
|
||||
|
@ -161,7 +159,7 @@ static void raw_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
|
|||
num = 0;
|
||||
|
||||
hlist = &hashinfo->ht[slot];
|
||||
sk_nulls_for_each(sk, hnode, hlist) {
|
||||
sk_for_each_rcu(sk, hlist) {
|
||||
struct inet_sock *inet = inet_sk(sk);
|
||||
|
||||
if (!net_eq(sock_net(sk), net))
|
||||
|
|
|
@ -1965,8 +1965,13 @@ struct sk_buff *__ip6_make_skb(struct sock *sk,
|
|||
IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
|
||||
if (proto == IPPROTO_ICMPV6) {
|
||||
struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
|
||||
u8 icmp6_type;
|
||||
|
||||
ICMP6MSGOUT_INC_STATS(net, idev, icmp6_hdr(skb)->icmp6_type);
|
||||
if (sk->sk_socket->type == SOCK_RAW && !inet_sk(sk)->hdrincl)
|
||||
icmp6_type = fl6->fl6_icmp_type;
|
||||
else
|
||||
icmp6_type = icmp6_hdr(skb)->icmp6_type;
|
||||
ICMP6MSGOUT_INC_STATS(net, idev, icmp6_type);
|
||||
ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
|
||||
}
|
||||
|
||||
|
|
|
@ -141,10 +141,9 @@ EXPORT_SYMBOL(rawv6_mh_filter_unregister);
|
|||
static bool ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
|
||||
{
|
||||
struct net *net = dev_net(skb->dev);
|
||||
struct hlist_nulls_head *hlist;
|
||||
struct hlist_nulls_node *hnode;
|
||||
const struct in6_addr *saddr;
|
||||
const struct in6_addr *daddr;
|
||||
struct hlist_head *hlist;
|
||||
struct sock *sk;
|
||||
bool delivered = false;
|
||||
__u8 hash;
|
||||
|
@ -155,7 +154,7 @@ static bool ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
|
|||
hash = raw_hashfunc(net, nexthdr);
|
||||
hlist = &raw_v6_hashinfo.ht[hash];
|
||||
rcu_read_lock();
|
||||
sk_nulls_for_each(sk, hnode, hlist) {
|
||||
sk_for_each_rcu(sk, hlist) {
|
||||
int filtered;
|
||||
|
||||
if (!raw_v6_match(net, sk, nexthdr, daddr, saddr,
|
||||
|
@ -333,15 +332,14 @@ void raw6_icmp_error(struct sk_buff *skb, int nexthdr,
|
|||
u8 type, u8 code, int inner_offset, __be32 info)
|
||||
{
|
||||
struct net *net = dev_net(skb->dev);
|
||||
struct hlist_nulls_head *hlist;
|
||||
struct hlist_nulls_node *hnode;
|
||||
struct hlist_head *hlist;
|
||||
struct sock *sk;
|
||||
int hash;
|
||||
|
||||
hash = raw_hashfunc(net, nexthdr);
|
||||
hlist = &raw_v6_hashinfo.ht[hash];
|
||||
rcu_read_lock();
|
||||
sk_nulls_for_each(sk, hnode, hlist) {
|
||||
sk_for_each_rcu(sk, hlist) {
|
||||
/* Note: ipv6_hdr(skb) != skb->data */
|
||||
const struct ipv6hdr *ip6h = (const struct ipv6hdr *)skb->data;
|
||||
|
||||
|
|
|
@ -677,8 +677,8 @@ MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
|
|||
MODULE_DESCRIPTION("L2TP over IP");
|
||||
MODULE_VERSION("1.0");
|
||||
|
||||
/* Use the value of SOCK_DGRAM (2) directory, because __stringify doesn't like
|
||||
* enums
|
||||
/* Use the values of SOCK_DGRAM (2) as type and IPPROTO_L2TP (115) as protocol,
|
||||
* because __stringify doesn't like enums
|
||||
*/
|
||||
MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, 2, IPPROTO_L2TP);
|
||||
MODULE_ALIAS_NET_PF_PROTO(PF_INET, IPPROTO_L2TP);
|
||||
MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, 115, 2);
|
||||
MODULE_ALIAS_NET_PF_PROTO(PF_INET, 115);
|
||||
|
|
|
@ -806,8 +806,8 @@ MODULE_AUTHOR("Chris Elston <celston@katalix.com>");
|
|||
MODULE_DESCRIPTION("L2TP IP encapsulation for IPv6");
|
||||
MODULE_VERSION("1.0");
|
||||
|
||||
/* Use the value of SOCK_DGRAM (2) directory, because __stringify doesn't like
|
||||
* enums
|
||||
/* Use the values of SOCK_DGRAM (2) as type and IPPROTO_L2TP (115) as protocol,
|
||||
* because __stringify doesn't like enums
|
||||
*/
|
||||
MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 2, IPPROTO_L2TP);
|
||||
MODULE_ALIAS_NET_PF_PROTO(PF_INET6, IPPROTO_L2TP);
|
||||
MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 115, 2);
|
||||
MODULE_ALIAS_NET_PF_PROTO(PF_INET6, 115);
|
||||
|
|
|
@ -2769,14 +2769,6 @@ ieee80211_rx_mesh_data(struct ieee80211_sub_if_data *sdata, struct sta_info *sta
|
|||
if (sdata->crypto_tx_tailroom_needed_cnt)
|
||||
tailroom = IEEE80211_ENCRYPT_TAILROOM;
|
||||
|
||||
if (!--mesh_hdr->ttl) {
|
||||
if (multicast)
|
||||
goto rx_accept;
|
||||
|
||||
IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_ttl);
|
||||
return RX_DROP_MONITOR;
|
||||
}
|
||||
|
||||
if (mesh_hdr->flags & MESH_FLAGS_AE) {
|
||||
struct mesh_path *mppath;
|
||||
char *proxied_addr;
|
||||
|
@ -2807,6 +2799,14 @@ ieee80211_rx_mesh_data(struct ieee80211_sub_if_data *sdata, struct sta_info *sta
|
|||
if (ether_addr_equal(sdata->vif.addr, eth->h_dest))
|
||||
goto rx_accept;
|
||||
|
||||
if (!--mesh_hdr->ttl) {
|
||||
if (multicast)
|
||||
goto rx_accept;
|
||||
|
||||
IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_ttl);
|
||||
return RX_DROP_MONITOR;
|
||||
}
|
||||
|
||||
if (!ifmsh->mshcfg.dot11MeshForwarding) {
|
||||
if (is_multicast_ether_addr(eth->h_dest))
|
||||
goto rx_accept;
|
||||
|
@ -2833,6 +2833,9 @@ ieee80211_rx_mesh_data(struct ieee80211_sub_if_data *sdata, struct sta_info *sta
|
|||
|
||||
if (skb_cow_head(fwd_skb, hdrlen - sizeof(struct ethhdr)))
|
||||
return RX_DROP_UNUSABLE;
|
||||
|
||||
if (skb_linearize(fwd_skb))
|
||||
return RX_DROP_UNUSABLE;
|
||||
}
|
||||
|
||||
fwd_hdr = skb_push(fwd_skb, hdrlen - sizeof(struct ethhdr));
|
||||
|
@ -2847,7 +2850,7 @@ ieee80211_rx_mesh_data(struct ieee80211_sub_if_data *sdata, struct sta_info *sta
|
|||
hdrlen += ETH_ALEN;
|
||||
else
|
||||
fwd_skb->protocol = htons(fwd_skb->len - hdrlen);
|
||||
skb_set_network_header(fwd_skb, hdrlen);
|
||||
skb_set_network_header(fwd_skb, hdrlen + 2);
|
||||
|
||||
info = IEEE80211_SKB_CB(fwd_skb);
|
||||
memset(info, 0, sizeof(*info));
|
||||
|
@ -2896,7 +2899,7 @@ __ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx, u8 data_offset)
|
|||
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
|
||||
__le16 fc = hdr->frame_control;
|
||||
struct sk_buff_head frame_list;
|
||||
static ieee80211_rx_result res;
|
||||
ieee80211_rx_result res;
|
||||
struct ethhdr ethhdr;
|
||||
const u8 *check_da = ethhdr.h_dest, *check_sa = ethhdr.h_source;
|
||||
|
||||
|
@ -2930,7 +2933,7 @@ __ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx, u8 data_offset)
|
|||
data_offset, true))
|
||||
return RX_DROP_UNUSABLE;
|
||||
|
||||
if (rx->sta && rx->sta->amsdu_mesh_control < 0) {
|
||||
if (rx->sta->amsdu_mesh_control < 0) {
|
||||
bool valid_std = ieee80211_is_valid_amsdu(skb, true);
|
||||
bool valid_nonstd = ieee80211_is_valid_amsdu(skb, false);
|
||||
|
||||
|
@ -3006,7 +3009,7 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
|
|||
}
|
||||
}
|
||||
|
||||
if (is_multicast_ether_addr(hdr->addr1))
|
||||
if (is_multicast_ether_addr(hdr->addr1) || !rx->sta)
|
||||
return RX_DROP_UNUSABLE;
|
||||
|
||||
if (rx->key) {
|
||||
|
@ -3037,7 +3040,7 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
|
|||
struct net_device *dev = sdata->dev;
|
||||
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
|
||||
__le16 fc = hdr->frame_control;
|
||||
static ieee80211_rx_result res;
|
||||
ieee80211_rx_result res;
|
||||
bool port_control;
|
||||
int err;
|
||||
|
||||
|
|
|
@ -1264,7 +1264,8 @@ static int __must_check __sta_info_destroy_part1(struct sta_info *sta)
|
|||
list_del_rcu(&sta->list);
|
||||
sta->removed = true;
|
||||
|
||||
drv_sta_pre_rcu_remove(local, sta->sdata, sta);
|
||||
if (sta->uploaded)
|
||||
drv_sta_pre_rcu_remove(local, sta->sdata, sta);
|
||||
|
||||
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
|
||||
rcu_access_pointer(sdata->u.vlan.sta) == sta)
|
||||
|
|
|
@ -4906,7 +4906,7 @@ u8 ieee80211_ie_len_eht_cap(struct ieee80211_sub_if_data *sdata, u8 iftype)
|
|||
&eht_cap->eht_cap_elem,
|
||||
is_ap);
|
||||
return 2 + 1 +
|
||||
sizeof(he_cap->he_cap_elem) + n +
|
||||
sizeof(eht_cap->eht_cap_elem) + n +
|
||||
ieee80211_eht_ppe_size(eht_cap->eht_ppe_thres[0],
|
||||
eht_cap->eht_cap_elem.phy_cap_info);
|
||||
return 0;
|
||||
|
|
|
@ -1952,7 +1952,7 @@ static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
|
|||
struct scm_cookie scm;
|
||||
struct sock *sk = sock->sk;
|
||||
struct netlink_sock *nlk = nlk_sk(sk);
|
||||
size_t copied;
|
||||
size_t copied, max_recvmsg_len;
|
||||
struct sk_buff *skb, *data_skb;
|
||||
int err, ret;
|
||||
|
||||
|
@ -1985,9 +1985,10 @@ static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
|
|||
#endif
|
||||
|
||||
/* Record the max length of recvmsg() calls for future allocations */
|
||||
nlk->max_recvmsg_len = max(nlk->max_recvmsg_len, len);
|
||||
nlk->max_recvmsg_len = min_t(size_t, nlk->max_recvmsg_len,
|
||||
SKB_WITH_OVERHEAD(32768));
|
||||
max_recvmsg_len = max(READ_ONCE(nlk->max_recvmsg_len), len);
|
||||
max_recvmsg_len = min_t(size_t, max_recvmsg_len,
|
||||
SKB_WITH_OVERHEAD(32768));
|
||||
WRITE_ONCE(nlk->max_recvmsg_len, max_recvmsg_len);
|
||||
|
||||
copied = data_skb->len;
|
||||
if (len < copied) {
|
||||
|
@ -2236,6 +2237,7 @@ static int netlink_dump(struct sock *sk)
|
|||
struct netlink_ext_ack extack = {};
|
||||
struct netlink_callback *cb;
|
||||
struct sk_buff *skb = NULL;
|
||||
size_t max_recvmsg_len;
|
||||
struct module *module;
|
||||
int err = -ENOBUFS;
|
||||
int alloc_min_size;
|
||||
|
@ -2258,8 +2260,9 @@ static int netlink_dump(struct sock *sk)
|
|||
cb = &nlk->cb;
|
||||
alloc_min_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);
|
||||
|
||||
if (alloc_min_size < nlk->max_recvmsg_len) {
|
||||
alloc_size = nlk->max_recvmsg_len;
|
||||
max_recvmsg_len = READ_ONCE(nlk->max_recvmsg_len);
|
||||
if (alloc_min_size < max_recvmsg_len) {
|
||||
alloc_size = max_recvmsg_len;
|
||||
skb = alloc_skb(alloc_size,
|
||||
(GFP_KERNEL & ~__GFP_DIRECT_RECLAIM) |
|
||||
__GFP_NOWARN | __GFP_NORETRY);
|
||||
|
|
|
@ -393,10 +393,12 @@ static struct qrtr_node *qrtr_node_lookup(unsigned int nid)
|
|||
struct qrtr_node *node;
|
||||
unsigned long flags;
|
||||
|
||||
mutex_lock(&qrtr_node_lock);
|
||||
spin_lock_irqsave(&qrtr_nodes_lock, flags);
|
||||
node = radix_tree_lookup(&qrtr_nodes, nid);
|
||||
node = qrtr_node_acquire(node);
|
||||
spin_unlock_irqrestore(&qrtr_nodes_lock, flags);
|
||||
mutex_unlock(&qrtr_node_lock);
|
||||
|
||||
return node;
|
||||
}
|
||||
|
|
|
@ -274,7 +274,7 @@ err:
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static int server_del(struct qrtr_node *node, unsigned int port)
|
||||
static int server_del(struct qrtr_node *node, unsigned int port, bool bcast)
|
||||
{
|
||||
struct qrtr_lookup *lookup;
|
||||
struct qrtr_server *srv;
|
||||
|
@ -287,7 +287,7 @@ static int server_del(struct qrtr_node *node, unsigned int port)
|
|||
radix_tree_delete(&node->servers, port);
|
||||
|
||||
/* Broadcast the removal of local servers */
|
||||
if (srv->node == qrtr_ns.local_node)
|
||||
if (srv->node == qrtr_ns.local_node && bcast)
|
||||
service_announce_del(&qrtr_ns.bcast_sq, srv);
|
||||
|
||||
/* Announce the service's disappearance to observers */
|
||||
|
@ -373,7 +373,7 @@ static int ctrl_cmd_bye(struct sockaddr_qrtr *from)
|
|||
}
|
||||
slot = radix_tree_iter_resume(slot, &iter);
|
||||
rcu_read_unlock();
|
||||
server_del(node, srv->port);
|
||||
server_del(node, srv->port, true);
|
||||
rcu_read_lock();
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
@ -459,10 +459,13 @@ static int ctrl_cmd_del_client(struct sockaddr_qrtr *from,
|
|||
kfree(lookup);
|
||||
}
|
||||
|
||||
/* Remove the server belonging to this port */
|
||||
/* Remove the server belonging to this port but don't broadcast
|
||||
* DEL_SERVER. Neighbours would've already removed the server belonging
|
||||
* to this port due to the DEL_CLIENT broadcast from qrtr_port_remove().
|
||||
*/
|
||||
node = node_get(node_id);
|
||||
if (node)
|
||||
server_del(node, port);
|
||||
server_del(node, port, false);
|
||||
|
||||
/* Advertise the removal of this client to all local servers */
|
||||
local_node = node_get(qrtr_ns.local_node);
|
||||
|
@ -567,7 +570,7 @@ static int ctrl_cmd_del_server(struct sockaddr_qrtr *from,
|
|||
if (!node)
|
||||
return -ENOENT;
|
||||
|
||||
return server_del(node, port);
|
||||
return server_del(node, port, true);
|
||||
}
|
||||
|
||||
static int ctrl_cmd_new_lookup(struct sockaddr_qrtr *from,
|
||||
|
|
|
@ -1830,6 +1830,10 @@ static int sctp_sendmsg_to_asoc(struct sctp_association *asoc,
|
|||
err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len);
|
||||
if (err)
|
||||
goto err;
|
||||
if (unlikely(sinfo->sinfo_stream >= asoc->stream.outcnt)) {
|
||||
err = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
|
||||
if (sctp_state(asoc, CLOSED)) {
|
||||
|
|
|
@ -94,6 +94,11 @@ virtio_transport_alloc_skb(struct virtio_vsock_pkt_info *info,
|
|||
info->op,
|
||||
info->flags);
|
||||
|
||||
if (info->vsk && !skb_set_owner_sk_safe(skb, sk_vsock(info->vsk))) {
|
||||
WARN_ONCE(1, "failed to allocate skb on vsock socket with sk_refcnt == 0\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
return skb;
|
||||
|
||||
out:
|
||||
|
@ -1303,6 +1308,11 @@ void virtio_transport_recv_pkt(struct virtio_transport *t,
|
|||
goto free_pkt;
|
||||
}
|
||||
|
||||
if (!skb_set_owner_sk_safe(skb, sk)) {
|
||||
WARN_ONCE(1, "receiving vsock socket has sk_refcnt == 0\n");
|
||||
goto free_pkt;
|
||||
}
|
||||
|
||||
vsk = vsock_sk(sk);
|
||||
|
||||
lock_sock(sk);
|
||||
|
|
|
@ -1842,7 +1842,13 @@ static ssize_t vmci_transport_stream_enqueue(
|
|||
struct msghdr *msg,
|
||||
size_t len)
|
||||
{
|
||||
return vmci_qpair_enquev(vmci_trans(vsk)->qpair, msg, len, 0);
|
||||
ssize_t err;
|
||||
|
||||
err = vmci_qpair_enquev(vmci_trans(vsk)->qpair, msg, len, 0);
|
||||
if (err < 0)
|
||||
err = -ENOMEM;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static s64 vmci_transport_stream_has_data(struct vsock_sock *vsk)
|
||||
|
|
|
@ -60,6 +60,7 @@ ip link set dev $VETH up
|
|||
ip -n $NETNS link set dev $VETH up
|
||||
chk_rps "changing rps_default_mask affect newly created devices" "" $VETH 3
|
||||
chk_rps "changing rps_default_mask don't affect newly child netns[II]" $NETNS $VETH 0
|
||||
ip link del dev $VETH
|
||||
ip netns del $NETNS
|
||||
|
||||
setup
|
||||
|
|
Loading…
Reference in New Issue