Networking fixes for 6.4-rc6, including fixes from can, wifi, netfilter,
bluetooth and ebpf. Current release - regressions: - bpf: sockmap: avoid potential NULL dereference in sk_psock_verdict_data_ready() - wifi: iwlwifi: fix -Warray-bounds bug in iwl_mvm_wait_d3_notif() - phylink: actually fix ksettings_set() ethtool call - eth: dwmac-qcom-ethqos: fix a regression on EMAC < 3 Current release - new code bugs: - wifi: mt76: fix possible NULL pointer dereference in mt7996_mac_write_txwi() Previous releases - regressions: - netfilter: fix NULL pointer dereference in nf_confirm_cthelper - wifi: rtw88/rtw89: correct PS calculation for SUPPORTS_DYNAMIC_PS - openvswitch: fix upcall counter access before allocation - bluetooth: - fix use-after-free in hci_remove_ltk/hci_remove_irk - fix l2cap_disconnect_req deadlock - nic: bnxt_en: prevent kernel panic when receiving unexpected PHC_UPDATE event Previous releases - always broken: - core: annotate rfs lockless accesses - sched: fq_pie: ensure reasonable TCA_FQ_PIE_QUANTUM values - netfilter: add null check for nla_nest_start_noflag() in nft_dump_basechain_hook() - bpf: fix UAF in task local storage - ipv4: ping_group_range: allow GID from 2147483648 to 4294967294 - ipv6: rpl: fix route of death. - tcp: gso: really support BIG TCP - mptcp: fixes for user-space PM address advertisement - smc: avoid to access invalid RMBs' MRs in SMCRv1 ADD LINK CONT - can: avoid possible use-after-free when j1939_can_rx_register fails - batman-adv: fix UaF while rescheduling delayed work - eth: qede: fix scheduling while atomic - eth: ice: make writes to /dev/gnssX synchronous Signed-off-by: Paolo Abeni <pabeni@redhat.com> -----BEGIN PGP SIGNATURE----- iQJGBAABCAAwFiEEg1AjqC77wbdLX2LbKSR5jcyPE6QFAmSBsv4SHHBhYmVuaUBy ZWRoYXQuY29tAAoJECkkeY3MjxOkMXUP/jisT2xvTFRmtshX3h+xxPkBxZSo9ovx ujviqZkyCNep9fu7Njv+5WWp0V8cy3Ui6G6RiGNHDV24vBtISlX21yQt+VANOPjH 7x8oqqnANxn3PXjL5hp6YZhNaxiwfAfQGJiU+TngVo1jTJopnWEt2x8Q3EhF/k0S id8VaHGh/ugC8lRZSJBK/b+FsJjWY0sxTcsoRSjp6gg1WHUVO8mJXlCfHFhNJcQQ /8ghieuskLUs4V6UX3TGg4smGxgl2HPdA79+ohvrVhcB1WoGCsWV83SfUTBWgHkU IZrIfM4BFCThcN88IgRgJioeX95D54SK0RzEZdCnJx+elmgTK1ZdUGlBh1Vybh+v iQel2dgJI+8zyIl/4lXYdhHogLwnONVrkszMrx+Ds2PzNecmnFWg4LUK01xLjW7J poAFsZGVBk0BuTkEqXtxv/8Cc7wU/PMOmy4ZVBrHkNIyGgOLbt5eM0T/pArYoKvr +34del2Us2vGVk6i89F/GgRuNCvevO0Y+HyAArOJr2XwpakwQYQHdBdj/77FGjFZ PyR/bVJZhxdUMv+J7BdKQK+mwt+ZFBVwIRfU2gvHcDa2XQJe2Eg8GXRtcJ1P7hpr Q2A+AgiHSoAn6GrgYNHNZVBhWywQFCsu2ZpH7J0uo4zOyTUl3+4O8jyfDrD7o56D BodtDJKZit3B =X6b2 -----END PGP SIGNATURE----- Merge tag 'net-6.4-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net Pull networking fixes from Paolo Abeni: "Including fixes from can, wifi, netfilter, bluetooth and ebpf. Current release - regressions: - bpf: sockmap: avoid potential NULL dereference in sk_psock_verdict_data_ready() - wifi: iwlwifi: fix -Warray-bounds bug in iwl_mvm_wait_d3_notif() - phylink: actually fix ksettings_set() ethtool call - eth: dwmac-qcom-ethqos: fix a regression on EMAC < 3 Current release - new code bugs: - wifi: mt76: fix possible NULL pointer dereference in mt7996_mac_write_txwi() Previous releases - regressions: - netfilter: fix NULL pointer dereference in nf_confirm_cthelper - wifi: rtw88/rtw89: correct PS calculation for SUPPORTS_DYNAMIC_PS - openvswitch: fix upcall counter access before allocation - bluetooth: - fix use-after-free in hci_remove_ltk/hci_remove_irk - fix l2cap_disconnect_req deadlock - nic: bnxt_en: prevent kernel panic when receiving unexpected PHC_UPDATE event Previous releases - always broken: - core: annotate rfs lockless accesses - sched: fq_pie: ensure reasonable TCA_FQ_PIE_QUANTUM values - netfilter: add null check for nla_nest_start_noflag() in nft_dump_basechain_hook() - bpf: fix UAF in task local storage - ipv4: ping_group_range: allow GID from 2147483648 to 4294967294 - ipv6: rpl: fix route of death. - tcp: gso: really support BIG TCP - mptcp: fixes for user-space PM address advertisement - smc: avoid to access invalid RMBs' MRs in SMCRv1 ADD LINK CONT - can: avoid possible use-after-free when j1939_can_rx_register fails - batman-adv: fix UaF while rescheduling delayed work - eth: qede: fix scheduling while atomic - eth: ice: make writes to /dev/gnssX synchronous" * tag 'net-6.4-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (83 commits) bnxt_en: Implement .set_port / .unset_port UDP tunnel callbacks bnxt_en: Prevent kernel panic when receiving unexpected PHC_UPDATE event bnxt_en: Skip firmware fatal error recovery if chip is not accessible bnxt_en: Query default VLAN before VNIC setup on a VF bnxt_en: Don't issue AP reset during ethtool's reset operation bnxt_en: Fix bnxt_hwrm_update_rss_hash_cfg() net: bcmgenet: Fix EEE implementation eth: ixgbe: fix the wake condition eth: bnxt: fix the wake condition lib: cpu_rmap: Fix potential use-after-free in irq_cpu_rmap_release() bpf: Add extra path pointer check to d_path helper net: sched: fix possible refcount leak in tc_chain_tmplt_add() net: sched: act_police: fix sparse errors in tcf_police_dump() net: openvswitch: fix upcall counter access before allocation net: sched: move rtm_tca_policy declaration to include file ice: make writes to /dev/gnssX synchronous net: sched: add rcu annotations around qdisc->qdisc_sleeping rfs: annotate lockless accesses to RFS sock flow table rfs: annotate lockless accesses to sk->sk_rxhash virtio_net: use control_buf for coalesce params ...
This commit is contained in:
commit
25041a4c02
|
@ -223,7 +223,7 @@ attribute-sets:
|
|||
name: tx-min-frag-size
|
||||
type: u32
|
||||
-
|
||||
name: tx-min-frag-size
|
||||
name: rx-min-frag-size
|
||||
type: u32
|
||||
-
|
||||
name: verify-enabled
|
||||
|
@ -294,7 +294,7 @@ attribute-sets:
|
|||
name: master-slave-state
|
||||
type: u8
|
||||
-
|
||||
name: master-slave-lanes
|
||||
name: lanes
|
||||
type: u32
|
||||
-
|
||||
name: rate-matching
|
||||
|
@ -322,7 +322,7 @@ attribute-sets:
|
|||
name: ext-substate
|
||||
type: u8
|
||||
-
|
||||
name: down-cnt
|
||||
name: ext-down-cnt
|
||||
type: u32
|
||||
-
|
||||
name: debug
|
||||
|
@ -577,7 +577,7 @@ attribute-sets:
|
|||
name: phc-index
|
||||
type: u32
|
||||
-
|
||||
name: cable-test-nft-nest-result
|
||||
name: cable-test-ntf-nest-result
|
||||
attributes:
|
||||
-
|
||||
name: pair
|
||||
|
@ -586,7 +586,7 @@ attribute-sets:
|
|||
name: code
|
||||
type: u8
|
||||
-
|
||||
name: cable-test-nft-nest-fault-length
|
||||
name: cable-test-ntf-nest-fault-length
|
||||
attributes:
|
||||
-
|
||||
name: pair
|
||||
|
@ -595,16 +595,16 @@ attribute-sets:
|
|||
name: cm
|
||||
type: u32
|
||||
-
|
||||
name: cable-test-nft-nest
|
||||
name: cable-test-ntf-nest
|
||||
attributes:
|
||||
-
|
||||
name: result
|
||||
type: nest
|
||||
nested-attributes: cable-test-nft-nest-result
|
||||
nested-attributes: cable-test-ntf-nest-result
|
||||
-
|
||||
name: fault-length
|
||||
type: nest
|
||||
nested-attributes: cable-test-nft-nest-fault-length
|
||||
nested-attributes: cable-test-ntf-nest-fault-length
|
||||
-
|
||||
name: cable-test
|
||||
attributes:
|
||||
|
@ -618,7 +618,7 @@ attribute-sets:
|
|||
-
|
||||
name: nest
|
||||
type: nest
|
||||
nested-attributes: cable-test-nft-nest
|
||||
nested-attributes: cable-test-ntf-nest
|
||||
-
|
||||
name: cable-test-tdr-cfg
|
||||
attributes:
|
||||
|
@ -776,7 +776,7 @@ attribute-sets:
|
|||
name: hist-bkt-hi
|
||||
type: u32
|
||||
-
|
||||
name: hist-bkt-val
|
||||
name: hist-val
|
||||
type: u64
|
||||
-
|
||||
name: stats
|
||||
|
@ -965,7 +965,7 @@ operations:
|
|||
- duplex
|
||||
- master-slave-cfg
|
||||
- master-slave-state
|
||||
- master-slave-lanes
|
||||
- lanes
|
||||
- rate-matching
|
||||
dump: *linkmodes-get-op
|
||||
-
|
||||
|
@ -999,7 +999,7 @@ operations:
|
|||
- sqi-max
|
||||
- ext-state
|
||||
- ext-substate
|
||||
- down-cnt
|
||||
- ext-down-cnt
|
||||
dump: *linkstate-get-op
|
||||
-
|
||||
name: debug-get
|
||||
|
@ -1351,7 +1351,7 @@ operations:
|
|||
reply:
|
||||
attributes:
|
||||
- header
|
||||
- cable-test-nft-nest
|
||||
- cable-test-ntf-nest
|
||||
-
|
||||
name: cable-test-tdr-act
|
||||
doc: Cable test TDR.
|
||||
|
@ -1539,7 +1539,7 @@ operations:
|
|||
- hkey
|
||||
dump: *rss-get-op
|
||||
-
|
||||
name: plca-get
|
||||
name: plca-get-cfg
|
||||
doc: Get PLCA params.
|
||||
|
||||
attribute-set: plca
|
||||
|
@ -1561,7 +1561,7 @@ operations:
|
|||
- burst-tmr
|
||||
dump: *plca-get-op
|
||||
-
|
||||
name: plca-set
|
||||
name: plca-set-cfg
|
||||
doc: Set PLCA params.
|
||||
|
||||
attribute-set: plca
|
||||
|
@ -1585,7 +1585,7 @@ operations:
|
|||
-
|
||||
name: plca-ntf
|
||||
doc: Notification for change in PLCA params.
|
||||
notify: plca-get
|
||||
notify: plca-get-cfg
|
||||
-
|
||||
name: mm-get
|
||||
doc: Get MAC Merge configuration and state
|
||||
|
|
|
@ -1352,8 +1352,8 @@ ping_group_range - 2 INTEGERS
|
|||
Restrict ICMP_PROTO datagram sockets to users in the group range.
|
||||
The default is "1 0", meaning, that nobody (not even root) may
|
||||
create ping sockets. Setting it to "100 100" would grant permissions
|
||||
to the single group. "0 4294967295" would enable it for the world, "100
|
||||
4294967295" would enable it for the users, but not daemons.
|
||||
to the single group. "0 4294967294" would enable it for the world, "100
|
||||
4294967294" would enable it for the users, but not daemons.
|
||||
|
||||
tcp_early_demux - BOOLEAN
|
||||
Enable early demux for established TCP sockets.
|
||||
|
|
|
@ -78,7 +78,8 @@ enum qca_flags {
|
|||
QCA_HW_ERROR_EVENT,
|
||||
QCA_SSR_TRIGGERED,
|
||||
QCA_BT_OFF,
|
||||
QCA_ROM_FW
|
||||
QCA_ROM_FW,
|
||||
QCA_DEBUGFS_CREATED,
|
||||
};
|
||||
|
||||
enum qca_capabilities {
|
||||
|
@ -635,6 +636,9 @@ static void qca_debugfs_init(struct hci_dev *hdev)
|
|||
if (!hdev->debugfs)
|
||||
return;
|
||||
|
||||
if (test_and_set_bit(QCA_DEBUGFS_CREATED, &qca->flags))
|
||||
return;
|
||||
|
||||
ibs_dir = debugfs_create_dir("ibs", hdev->debugfs);
|
||||
|
||||
/* read only */
|
||||
|
|
|
@ -1188,8 +1188,6 @@ static int lan9303_port_fdb_add(struct dsa_switch *ds, int port,
|
|||
struct lan9303 *chip = ds->priv;
|
||||
|
||||
dev_dbg(chip->dev, "%s(%d, %pM, %d)\n", __func__, port, addr, vid);
|
||||
if (vid)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
return lan9303_alr_add_port(chip, addr, port, false);
|
||||
}
|
||||
|
@ -1201,8 +1199,6 @@ static int lan9303_port_fdb_del(struct dsa_switch *ds, int port,
|
|||
struct lan9303 *chip = ds->priv;
|
||||
|
||||
dev_dbg(chip->dev, "%s(%d, %pM, %d)\n", __func__, port, addr, vid);
|
||||
if (vid)
|
||||
return -EOPNOTSUPP;
|
||||
lan9303_alr_del_port(chip, addr, port);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -20,6 +20,7 @@ config NET_DSA_QCA8K_LEDS_SUPPORT
|
|||
bool "Qualcomm Atheros QCA8K Ethernet switch family LEDs support"
|
||||
depends on NET_DSA_QCA8K
|
||||
depends on LEDS_CLASS=y || LEDS_CLASS=NET_DSA_QCA8K
|
||||
depends on LEDS_TRIGGERS
|
||||
help
|
||||
This enabled support for LEDs present on the Qualcomm Atheros
|
||||
QCA8K Ethernet switch chips.
|
||||
|
|
|
@ -68,9 +68,15 @@ bool pdsc_is_fw_running(struct pdsc *pdsc)
|
|||
|
||||
bool pdsc_is_fw_good(struct pdsc *pdsc)
|
||||
{
|
||||
u8 gen = pdsc->fw_status & PDS_CORE_FW_STS_F_GENERATION;
|
||||
bool fw_running = pdsc_is_fw_running(pdsc);
|
||||
u8 gen;
|
||||
|
||||
return pdsc_is_fw_running(pdsc) && gen == pdsc->fw_generation;
|
||||
/* Make sure to update the cached fw_status by calling
|
||||
* pdsc_is_fw_running() before getting the generation
|
||||
*/
|
||||
gen = pdsc->fw_status & PDS_CORE_FW_STS_F_GENERATION;
|
||||
|
||||
return fw_running && gen == pdsc->fw_generation;
|
||||
}
|
||||
|
||||
static u8 pdsc_devcmd_status(struct pdsc *pdsc)
|
||||
|
|
|
@ -2531,9 +2531,9 @@ static int bcm_sysport_probe(struct platform_device *pdev)
|
|||
priv->irq0 = platform_get_irq(pdev, 0);
|
||||
if (!priv->is_lite) {
|
||||
priv->irq1 = platform_get_irq(pdev, 1);
|
||||
priv->wol_irq = platform_get_irq(pdev, 2);
|
||||
priv->wol_irq = platform_get_irq_optional(pdev, 2);
|
||||
} else {
|
||||
priv->wol_irq = platform_get_irq(pdev, 1);
|
||||
priv->wol_irq = platform_get_irq_optional(pdev, 1);
|
||||
}
|
||||
if (priv->irq0 <= 0 || (priv->irq1 <= 0 && !priv->is_lite)) {
|
||||
ret = -EINVAL;
|
||||
|
|
|
@ -692,7 +692,7 @@ next_tx_int:
|
|||
|
||||
__netif_txq_completed_wake(txq, nr_pkts, tx_bytes,
|
||||
bnxt_tx_avail(bp, txr), bp->tx_wake_thresh,
|
||||
READ_ONCE(txr->dev_state) != BNXT_DEV_STATE_CLOSING);
|
||||
READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING);
|
||||
}
|
||||
|
||||
static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
|
||||
|
@ -2365,6 +2365,9 @@ static int bnxt_async_event_process(struct bnxt *bp,
|
|||
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
|
||||
u64 ns;
|
||||
|
||||
if (!ptp)
|
||||
goto async_event_process_exit;
|
||||
|
||||
spin_lock_bh(&ptp->ptp_lock);
|
||||
bnxt_ptp_update_current_time(bp);
|
||||
ns = (((u64)BNXT_EVENT_PHC_RTC_UPDATE(data1) <<
|
||||
|
@ -4763,6 +4766,9 @@ int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
|
|||
if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
|
||||
!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
|
||||
continue;
|
||||
if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE &&
|
||||
!bp->ptp_cfg)
|
||||
continue;
|
||||
__set_bit(bnxt_async_events_arr[i], async_events_bmap);
|
||||
}
|
||||
if (bmap && bmap_size) {
|
||||
|
@ -5350,6 +5356,7 @@ static void bnxt_hwrm_update_rss_hash_cfg(struct bnxt *bp)
|
|||
if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_QCFG))
|
||||
return;
|
||||
|
||||
req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
|
||||
/* all contexts configured to same hash_type, zero always exists */
|
||||
req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
|
||||
resp = hwrm_req_hold(bp, req);
|
||||
|
@ -8812,6 +8819,9 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
|
|||
goto err_out;
|
||||
}
|
||||
|
||||
if (BNXT_VF(bp))
|
||||
bnxt_hwrm_func_qcfg(bp);
|
||||
|
||||
rc = bnxt_setup_vnic(bp, 0);
|
||||
if (rc)
|
||||
goto err_out;
|
||||
|
@ -11598,6 +11608,7 @@ static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue)
|
|||
static void bnxt_fw_health_check(struct bnxt *bp)
|
||||
{
|
||||
struct bnxt_fw_health *fw_health = bp->fw_health;
|
||||
struct pci_dev *pdev = bp->pdev;
|
||||
u32 val;
|
||||
|
||||
if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
|
||||
|
@ -11611,7 +11622,7 @@ static void bnxt_fw_health_check(struct bnxt *bp)
|
|||
}
|
||||
|
||||
val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
|
||||
if (val == fw_health->last_fw_heartbeat) {
|
||||
if (val == fw_health->last_fw_heartbeat && pci_device_is_present(pdev)) {
|
||||
fw_health->arrests++;
|
||||
goto fw_reset;
|
||||
}
|
||||
|
@ -11619,7 +11630,7 @@ static void bnxt_fw_health_check(struct bnxt *bp)
|
|||
fw_health->last_fw_heartbeat = val;
|
||||
|
||||
val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
|
||||
if (val != fw_health->last_fw_reset_cnt) {
|
||||
if (val != fw_health->last_fw_reset_cnt && pci_device_is_present(pdev)) {
|
||||
fw_health->discoveries++;
|
||||
goto fw_reset;
|
||||
}
|
||||
|
@ -13025,26 +13036,37 @@ static void bnxt_cfg_ntp_filters(struct bnxt *bp)
|
|||
|
||||
#endif /* CONFIG_RFS_ACCEL */
|
||||
|
||||
static int bnxt_udp_tunnel_sync(struct net_device *netdev, unsigned int table)
|
||||
static int bnxt_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
|
||||
unsigned int entry, struct udp_tunnel_info *ti)
|
||||
{
|
||||
struct bnxt *bp = netdev_priv(netdev);
|
||||
struct udp_tunnel_info ti;
|
||||
unsigned int cmd;
|
||||
|
||||
udp_tunnel_nic_get_port(netdev, table, 0, &ti);
|
||||
if (ti.type == UDP_TUNNEL_TYPE_VXLAN)
|
||||
if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
|
||||
cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
|
||||
else
|
||||
cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
|
||||
|
||||
if (ti.port)
|
||||
return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti.port, cmd);
|
||||
return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti->port, cmd);
|
||||
}
|
||||
|
||||
static int bnxt_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
|
||||
unsigned int entry, struct udp_tunnel_info *ti)
|
||||
{
|
||||
struct bnxt *bp = netdev_priv(netdev);
|
||||
unsigned int cmd;
|
||||
|
||||
if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
|
||||
cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
|
||||
else
|
||||
cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
|
||||
|
||||
return bnxt_hwrm_tunnel_dst_port_free(bp, cmd);
|
||||
}
|
||||
|
||||
static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
|
||||
.sync_table = bnxt_udp_tunnel_sync,
|
||||
.set_port = bnxt_udp_tunnel_set_port,
|
||||
.unset_port = bnxt_udp_tunnel_unset_port,
|
||||
.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
|
||||
UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
|
||||
.tables = {
|
||||
|
|
|
@ -3831,7 +3831,7 @@ static int bnxt_reset(struct net_device *dev, u32 *flags)
|
|||
}
|
||||
}
|
||||
|
||||
if (req & BNXT_FW_RESET_AP) {
|
||||
if (!BNXT_CHIP_P4_PLUS(bp) && (req & BNXT_FW_RESET_AP)) {
|
||||
/* This feature is not supported in older firmware versions */
|
||||
if (bp->hwrm_spec_code >= 0x10803) {
|
||||
if (!bnxt_firmware_reset_ap(dev)) {
|
||||
|
|
|
@ -952,6 +952,7 @@ int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg)
|
|||
bnxt_ptp_timecounter_init(bp, true);
|
||||
bnxt_ptp_adjfine_rtc(bp, 0);
|
||||
}
|
||||
bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, true);
|
||||
|
||||
ptp->ptp_info = bnxt_ptp_caps;
|
||||
if ((bp->fw_cap & BNXT_FW_CAP_PTP_PPS)) {
|
||||
|
|
|
@ -1272,7 +1272,8 @@ static void bcmgenet_get_ethtool_stats(struct net_device *dev,
|
|||
}
|
||||
}
|
||||
|
||||
static void bcmgenet_eee_enable_set(struct net_device *dev, bool enable)
|
||||
void bcmgenet_eee_enable_set(struct net_device *dev, bool enable,
|
||||
bool tx_lpi_enabled)
|
||||
{
|
||||
struct bcmgenet_priv *priv = netdev_priv(dev);
|
||||
u32 off = priv->hw_params->tbuf_offset + TBUF_ENERGY_CTRL;
|
||||
|
@ -1292,7 +1293,7 @@ static void bcmgenet_eee_enable_set(struct net_device *dev, bool enable)
|
|||
|
||||
/* Enable EEE and switch to a 27Mhz clock automatically */
|
||||
reg = bcmgenet_readl(priv->base + off);
|
||||
if (enable)
|
||||
if (tx_lpi_enabled)
|
||||
reg |= TBUF_EEE_EN | TBUF_PM_EN;
|
||||
else
|
||||
reg &= ~(TBUF_EEE_EN | TBUF_PM_EN);
|
||||
|
@ -1313,6 +1314,7 @@ static void bcmgenet_eee_enable_set(struct net_device *dev, bool enable)
|
|||
|
||||
priv->eee.eee_enabled = enable;
|
||||
priv->eee.eee_active = enable;
|
||||
priv->eee.tx_lpi_enabled = tx_lpi_enabled;
|
||||
}
|
||||
|
||||
static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e)
|
||||
|
@ -1328,6 +1330,7 @@ static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e)
|
|||
|
||||
e->eee_enabled = p->eee_enabled;
|
||||
e->eee_active = p->eee_active;
|
||||
e->tx_lpi_enabled = p->tx_lpi_enabled;
|
||||
e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER);
|
||||
|
||||
return phy_ethtool_get_eee(dev->phydev, e);
|
||||
|
@ -1337,7 +1340,6 @@ static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
|
|||
{
|
||||
struct bcmgenet_priv *priv = netdev_priv(dev);
|
||||
struct ethtool_eee *p = &priv->eee;
|
||||
int ret = 0;
|
||||
|
||||
if (GENET_IS_V1(priv))
|
||||
return -EOPNOTSUPP;
|
||||
|
@ -1348,16 +1350,11 @@ static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
|
|||
p->eee_enabled = e->eee_enabled;
|
||||
|
||||
if (!p->eee_enabled) {
|
||||
bcmgenet_eee_enable_set(dev, false);
|
||||
bcmgenet_eee_enable_set(dev, false, false);
|
||||
} else {
|
||||
ret = phy_init_eee(dev->phydev, false);
|
||||
if (ret) {
|
||||
netif_err(priv, hw, dev, "EEE initialization failed\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
p->eee_active = phy_init_eee(dev->phydev, false) >= 0;
|
||||
bcmgenet_umac_writel(priv, e->tx_lpi_timer, UMAC_EEE_LPI_TIMER);
|
||||
bcmgenet_eee_enable_set(dev, true);
|
||||
bcmgenet_eee_enable_set(dev, p->eee_active, e->tx_lpi_enabled);
|
||||
}
|
||||
|
||||
return phy_ethtool_set_eee(dev->phydev, e);
|
||||
|
@ -4279,9 +4276,6 @@ static int bcmgenet_resume(struct device *d)
|
|||
if (!device_may_wakeup(d))
|
||||
phy_resume(dev->phydev);
|
||||
|
||||
if (priv->eee.eee_enabled)
|
||||
bcmgenet_eee_enable_set(dev, true);
|
||||
|
||||
bcmgenet_netif_start(dev);
|
||||
|
||||
netif_device_attach(dev);
|
||||
|
|
|
@ -703,4 +703,7 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
|
|||
void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
|
||||
enum bcmgenet_power_mode mode);
|
||||
|
||||
void bcmgenet_eee_enable_set(struct net_device *dev, bool enable,
|
||||
bool tx_lpi_enabled);
|
||||
|
||||
#endif /* __BCMGENET_H__ */
|
||||
|
|
|
@ -87,6 +87,11 @@ static void bcmgenet_mac_config(struct net_device *dev)
|
|||
reg |= CMD_TX_EN | CMD_RX_EN;
|
||||
}
|
||||
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
|
||||
|
||||
priv->eee.eee_active = phy_init_eee(phydev, 0) >= 0;
|
||||
bcmgenet_eee_enable_set(dev,
|
||||
priv->eee.eee_enabled && priv->eee.eee_active,
|
||||
priv->eee.tx_lpi_enabled);
|
||||
}
|
||||
|
||||
/* setup netdev link state when PHY link status change and
|
||||
|
|
|
@ -1229,7 +1229,13 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
|
|||
if (!skb)
|
||||
break;
|
||||
|
||||
rx_byte_cnt += skb->len;
|
||||
/* When set, the outer VLAN header is extracted and reported
|
||||
* in the receive buffer descriptor. So rx_byte_cnt should
|
||||
* add the length of the extracted VLAN header.
|
||||
*/
|
||||
if (bd_status & ENETC_RXBD_FLAG_VLAN)
|
||||
rx_byte_cnt += VLAN_HLEN;
|
||||
rx_byte_cnt += skb->len + ETH_HLEN;
|
||||
rx_frm_cnt++;
|
||||
|
||||
napi_gro_receive(napi, skb);
|
||||
|
@ -1565,6 +1571,14 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
|
|||
enetc_build_xdp_buff(rx_ring, bd_status, &rxbd, &i,
|
||||
&cleaned_cnt, &xdp_buff);
|
||||
|
||||
/* When set, the outer VLAN header is extracted and reported
|
||||
* in the receive buffer descriptor. So rx_byte_cnt should
|
||||
* add the length of the extracted VLAN header.
|
||||
*/
|
||||
if (bd_status & ENETC_RXBD_FLAG_VLAN)
|
||||
rx_byte_cnt += VLAN_HLEN;
|
||||
rx_byte_cnt += xdp_get_buff_len(&xdp_buff);
|
||||
|
||||
xdp_act = bpf_prog_run_xdp(prog, &xdp_buff);
|
||||
|
||||
switch (xdp_act) {
|
||||
|
|
|
@ -5160,7 +5160,7 @@ ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
|
|||
*/
|
||||
int
|
||||
ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
|
||||
u16 bus_addr, __le16 addr, u8 params, u8 *data,
|
||||
u16 bus_addr, __le16 addr, u8 params, const u8 *data,
|
||||
struct ice_sq_cd *cd)
|
||||
{
|
||||
struct ice_aq_desc desc = { 0 };
|
||||
|
|
|
@ -229,7 +229,7 @@ ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
|
|||
struct ice_sq_cd *cd);
|
||||
int
|
||||
ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
|
||||
u16 bus_addr, __le16 addr, u8 params, u8 *data,
|
||||
u16 bus_addr, __le16 addr, u8 params, const u8 *data,
|
||||
struct ice_sq_cd *cd);
|
||||
bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw);
|
||||
#endif /* _ICE_COMMON_H_ */
|
||||
|
|
|
@ -16,8 +16,8 @@
|
|||
* * number of bytes written - success
|
||||
* * negative - error code
|
||||
*/
|
||||
static unsigned int
|
||||
ice_gnss_do_write(struct ice_pf *pf, unsigned char *buf, unsigned int size)
|
||||
static int
|
||||
ice_gnss_do_write(struct ice_pf *pf, const unsigned char *buf, unsigned int size)
|
||||
{
|
||||
struct ice_aqc_link_topo_addr link_topo;
|
||||
struct ice_hw *hw = &pf->hw;
|
||||
|
@ -72,39 +72,7 @@ err_out:
|
|||
dev_err(ice_pf_to_dev(pf), "GNSS failed to write, offset=%u, size=%u, err=%d\n",
|
||||
offset, size, err);
|
||||
|
||||
return offset;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_gnss_write_pending - Write all pending data to internal GNSS
|
||||
* @work: GNSS write work structure
|
||||
*/
|
||||
static void ice_gnss_write_pending(struct kthread_work *work)
|
||||
{
|
||||
struct gnss_serial *gnss = container_of(work, struct gnss_serial,
|
||||
write_work);
|
||||
struct ice_pf *pf = gnss->back;
|
||||
|
||||
if (!pf)
|
||||
return;
|
||||
|
||||
if (!test_bit(ICE_FLAG_GNSS, pf->flags))
|
||||
return;
|
||||
|
||||
if (!list_empty(&gnss->queue)) {
|
||||
struct gnss_write_buf *write_buf = NULL;
|
||||
unsigned int bytes;
|
||||
|
||||
write_buf = list_first_entry(&gnss->queue,
|
||||
struct gnss_write_buf, queue);
|
||||
|
||||
bytes = ice_gnss_do_write(pf, write_buf->buf, write_buf->size);
|
||||
dev_dbg(ice_pf_to_dev(pf), "%u bytes written to GNSS\n", bytes);
|
||||
|
||||
list_del(&write_buf->queue);
|
||||
kfree(write_buf->buf);
|
||||
kfree(write_buf);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -220,8 +188,6 @@ static struct gnss_serial *ice_gnss_struct_init(struct ice_pf *pf)
|
|||
pf->gnss_serial = gnss;
|
||||
|
||||
kthread_init_delayed_work(&gnss->read_work, ice_gnss_read);
|
||||
INIT_LIST_HEAD(&gnss->queue);
|
||||
kthread_init_work(&gnss->write_work, ice_gnss_write_pending);
|
||||
kworker = kthread_create_worker(0, "ice-gnss-%s", dev_name(dev));
|
||||
if (IS_ERR(kworker)) {
|
||||
kfree(gnss);
|
||||
|
@ -281,7 +247,6 @@ static void ice_gnss_close(struct gnss_device *gdev)
|
|||
if (!gnss)
|
||||
return;
|
||||
|
||||
kthread_cancel_work_sync(&gnss->write_work);
|
||||
kthread_cancel_delayed_work_sync(&gnss->read_work);
|
||||
}
|
||||
|
||||
|
@ -300,10 +265,7 @@ ice_gnss_write(struct gnss_device *gdev, const unsigned char *buf,
|
|||
size_t count)
|
||||
{
|
||||
struct ice_pf *pf = gnss_get_drvdata(gdev);
|
||||
struct gnss_write_buf *write_buf;
|
||||
struct gnss_serial *gnss;
|
||||
unsigned char *cmd_buf;
|
||||
int err = count;
|
||||
|
||||
/* We cannot write a single byte using our I2C implementation. */
|
||||
if (count <= 1 || count > ICE_GNSS_TTY_WRITE_BUF)
|
||||
|
@ -319,24 +281,7 @@ ice_gnss_write(struct gnss_device *gdev, const unsigned char *buf,
|
|||
if (!gnss)
|
||||
return -ENODEV;
|
||||
|
||||
cmd_buf = kcalloc(count, sizeof(*buf), GFP_KERNEL);
|
||||
if (!cmd_buf)
|
||||
return -ENOMEM;
|
||||
|
||||
memcpy(cmd_buf, buf, count);
|
||||
write_buf = kzalloc(sizeof(*write_buf), GFP_KERNEL);
|
||||
if (!write_buf) {
|
||||
kfree(cmd_buf);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
write_buf->buf = cmd_buf;
|
||||
write_buf->size = count;
|
||||
INIT_LIST_HEAD(&write_buf->queue);
|
||||
list_add_tail(&write_buf->queue, &gnss->queue);
|
||||
kthread_queue_work(gnss->kworker, &gnss->write_work);
|
||||
|
||||
return err;
|
||||
return ice_gnss_do_write(pf, buf, count);
|
||||
}
|
||||
|
||||
static const struct gnss_operations ice_gnss_ops = {
|
||||
|
@ -432,7 +377,6 @@ void ice_gnss_exit(struct ice_pf *pf)
|
|||
if (pf->gnss_serial) {
|
||||
struct gnss_serial *gnss = pf->gnss_serial;
|
||||
|
||||
kthread_cancel_work_sync(&gnss->write_work);
|
||||
kthread_cancel_delayed_work_sync(&gnss->read_work);
|
||||
kthread_destroy_worker(gnss->kworker);
|
||||
gnss->kworker = NULL;
|
||||
|
|
|
@ -22,26 +22,16 @@
|
|||
*/
|
||||
#define ICE_GNSS_UBX_WRITE_BYTES (ICE_MAX_I2C_WRITE_BYTES + 1)
|
||||
|
||||
struct gnss_write_buf {
|
||||
struct list_head queue;
|
||||
unsigned int size;
|
||||
unsigned char *buf;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct gnss_serial - data used to initialize GNSS TTY port
|
||||
* @back: back pointer to PF
|
||||
* @kworker: kwork thread for handling periodic work
|
||||
* @read_work: read_work function for handling GNSS reads
|
||||
* @write_work: write_work function for handling GNSS writes
|
||||
* @queue: write buffers queue
|
||||
*/
|
||||
struct gnss_serial {
|
||||
struct ice_pf *back;
|
||||
struct kthread_worker *kworker;
|
||||
struct kthread_delayed_work read_work;
|
||||
struct kthread_work write_work;
|
||||
struct list_head queue;
|
||||
};
|
||||
|
||||
#if IS_ENABLED(CONFIG_GNSS)
|
||||
|
|
|
@ -1256,7 +1256,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
|
|||
if (!__netif_txq_completed_wake(txq, total_packets, total_bytes,
|
||||
ixgbe_desc_unused(tx_ring),
|
||||
TX_WAKE_THRESHOLD,
|
||||
netif_carrier_ok(tx_ring->netdev) &&
|
||||
!netif_carrier_ok(tx_ring->netdev) ||
|
||||
test_bit(__IXGBE_DOWN, &adapter->state)))
|
||||
++tx_ring->tx_stats.restart_queue;
|
||||
|
||||
|
|
|
@ -1903,7 +1903,7 @@ void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats)
|
|||
{
|
||||
u32 i;
|
||||
|
||||
if (!cdev) {
|
||||
if (!cdev || cdev->recov_in_prog) {
|
||||
memset(stats, 0, sizeof(*stats));
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -269,6 +269,10 @@ struct qede_dev {
|
|||
#define QEDE_ERR_WARN 3
|
||||
|
||||
struct qede_dump_info dump_info;
|
||||
struct delayed_work periodic_task;
|
||||
unsigned long stats_coal_ticks;
|
||||
u32 stats_coal_usecs;
|
||||
spinlock_t stats_lock; /* lock for vport stats access */
|
||||
};
|
||||
|
||||
enum QEDE_STATE {
|
||||
|
|
|
@ -429,6 +429,8 @@ static void qede_get_ethtool_stats(struct net_device *dev,
|
|||
}
|
||||
}
|
||||
|
||||
spin_lock(&edev->stats_lock);
|
||||
|
||||
for (i = 0; i < QEDE_NUM_STATS; i++) {
|
||||
if (qede_is_irrelevant_stat(edev, i))
|
||||
continue;
|
||||
|
@ -438,6 +440,8 @@ static void qede_get_ethtool_stats(struct net_device *dev,
|
|||
buf++;
|
||||
}
|
||||
|
||||
spin_unlock(&edev->stats_lock);
|
||||
|
||||
__qede_unlock(edev);
|
||||
}
|
||||
|
||||
|
@ -829,6 +833,7 @@ out:
|
|||
|
||||
coal->rx_coalesce_usecs = rx_coal;
|
||||
coal->tx_coalesce_usecs = tx_coal;
|
||||
coal->stats_block_coalesce_usecs = edev->stats_coal_usecs;
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
@ -842,6 +847,19 @@ int qede_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal,
|
|||
int i, rc = 0;
|
||||
u16 rxc, txc;
|
||||
|
||||
if (edev->stats_coal_usecs != coal->stats_block_coalesce_usecs) {
|
||||
edev->stats_coal_usecs = coal->stats_block_coalesce_usecs;
|
||||
if (edev->stats_coal_usecs) {
|
||||
edev->stats_coal_ticks = usecs_to_jiffies(edev->stats_coal_usecs);
|
||||
schedule_delayed_work(&edev->periodic_task, 0);
|
||||
|
||||
DP_INFO(edev, "Configured stats coal ticks=%lu jiffies\n",
|
||||
edev->stats_coal_ticks);
|
||||
} else {
|
||||
cancel_delayed_work_sync(&edev->periodic_task);
|
||||
}
|
||||
}
|
||||
|
||||
if (!netif_running(dev)) {
|
||||
DP_INFO(edev, "Interface is down\n");
|
||||
return -EINVAL;
|
||||
|
@ -2252,7 +2270,8 @@ out:
|
|||
}
|
||||
|
||||
static const struct ethtool_ops qede_ethtool_ops = {
|
||||
.supported_coalesce_params = ETHTOOL_COALESCE_USECS,
|
||||
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
|
||||
ETHTOOL_COALESCE_STATS_BLOCK_USECS,
|
||||
.get_link_ksettings = qede_get_link_ksettings,
|
||||
.set_link_ksettings = qede_set_link_ksettings,
|
||||
.get_drvinfo = qede_get_drvinfo,
|
||||
|
@ -2303,7 +2322,8 @@ static const struct ethtool_ops qede_ethtool_ops = {
|
|||
};
|
||||
|
||||
static const struct ethtool_ops qede_vf_ethtool_ops = {
|
||||
.supported_coalesce_params = ETHTOOL_COALESCE_USECS,
|
||||
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
|
||||
ETHTOOL_COALESCE_STATS_BLOCK_USECS,
|
||||
.get_link_ksettings = qede_get_link_ksettings,
|
||||
.get_drvinfo = qede_get_drvinfo,
|
||||
.get_msglevel = qede_get_msglevel,
|
||||
|
|
|
@ -307,6 +307,8 @@ void qede_fill_by_demand_stats(struct qede_dev *edev)
|
|||
|
||||
edev->ops->get_vport_stats(edev->cdev, &stats);
|
||||
|
||||
spin_lock(&edev->stats_lock);
|
||||
|
||||
p_common->no_buff_discards = stats.common.no_buff_discards;
|
||||
p_common->packet_too_big_discard = stats.common.packet_too_big_discard;
|
||||
p_common->ttl0_discard = stats.common.ttl0_discard;
|
||||
|
@ -404,6 +406,8 @@ void qede_fill_by_demand_stats(struct qede_dev *edev)
|
|||
p_ah->tx_1519_to_max_byte_packets =
|
||||
stats.ah.tx_1519_to_max_byte_packets;
|
||||
}
|
||||
|
||||
spin_unlock(&edev->stats_lock);
|
||||
}
|
||||
|
||||
static void qede_get_stats64(struct net_device *dev,
|
||||
|
@ -412,9 +416,10 @@ static void qede_get_stats64(struct net_device *dev,
|
|||
struct qede_dev *edev = netdev_priv(dev);
|
||||
struct qede_stats_common *p_common;
|
||||
|
||||
qede_fill_by_demand_stats(edev);
|
||||
p_common = &edev->stats.common;
|
||||
|
||||
spin_lock(&edev->stats_lock);
|
||||
|
||||
stats->rx_packets = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts +
|
||||
p_common->rx_bcast_pkts;
|
||||
stats->tx_packets = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts +
|
||||
|
@ -434,6 +439,8 @@ static void qede_get_stats64(struct net_device *dev,
|
|||
stats->collisions = edev->stats.bb.tx_total_collisions;
|
||||
stats->rx_crc_errors = p_common->rx_crc_errors;
|
||||
stats->rx_frame_errors = p_common->rx_align_errors;
|
||||
|
||||
spin_unlock(&edev->stats_lock);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_QED_SRIOV
|
||||
|
@ -1063,6 +1070,23 @@ static void qede_unlock(struct qede_dev *edev)
|
|||
rtnl_unlock();
|
||||
}
|
||||
|
||||
static void qede_periodic_task(struct work_struct *work)
|
||||
{
|
||||
struct qede_dev *edev = container_of(work, struct qede_dev,
|
||||
periodic_task.work);
|
||||
|
||||
qede_fill_by_demand_stats(edev);
|
||||
schedule_delayed_work(&edev->periodic_task, edev->stats_coal_ticks);
|
||||
}
|
||||
|
||||
static void qede_init_periodic_task(struct qede_dev *edev)
|
||||
{
|
||||
INIT_DELAYED_WORK(&edev->periodic_task, qede_periodic_task);
|
||||
spin_lock_init(&edev->stats_lock);
|
||||
edev->stats_coal_usecs = USEC_PER_SEC;
|
||||
edev->stats_coal_ticks = usecs_to_jiffies(USEC_PER_SEC);
|
||||
}
|
||||
|
||||
static void qede_sp_task(struct work_struct *work)
|
||||
{
|
||||
struct qede_dev *edev = container_of(work, struct qede_dev,
|
||||
|
@ -1082,6 +1106,7 @@ static void qede_sp_task(struct work_struct *work)
|
|||
*/
|
||||
|
||||
if (test_and_clear_bit(QEDE_SP_RECOVERY, &edev->sp_flags)) {
|
||||
cancel_delayed_work_sync(&edev->periodic_task);
|
||||
#ifdef CONFIG_QED_SRIOV
|
||||
/* SRIOV must be disabled outside the lock to avoid a deadlock.
|
||||
* The recovery of the active VFs is currently not supported.
|
||||
|
@ -1272,6 +1297,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
|
|||
*/
|
||||
INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task);
|
||||
mutex_init(&edev->qede_lock);
|
||||
qede_init_periodic_task(edev);
|
||||
|
||||
rc = register_netdev(edev->ndev);
|
||||
if (rc) {
|
||||
|
@ -1296,6 +1322,11 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
|
|||
edev->rx_copybreak = QEDE_RX_HDR_SIZE;
|
||||
|
||||
qede_log_probe(edev);
|
||||
|
||||
/* retain user config (for example - after recovery) */
|
||||
if (edev->stats_coal_usecs)
|
||||
schedule_delayed_work(&edev->periodic_task, 0);
|
||||
|
||||
return 0;
|
||||
|
||||
err4:
|
||||
|
@ -1364,6 +1395,7 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
|
|||
unregister_netdev(ndev);
|
||||
|
||||
cancel_delayed_work_sync(&edev->sp_task);
|
||||
cancel_delayed_work_sync(&edev->periodic_task);
|
||||
|
||||
edev->ops->common->set_power_state(cdev, PCI_D0);
|
||||
|
||||
|
|
|
@ -644,7 +644,8 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
|
|||
plat_dat->fix_mac_speed = ethqos_fix_mac_speed;
|
||||
plat_dat->dump_debug_regs = rgmii_dump;
|
||||
plat_dat->has_gmac4 = 1;
|
||||
plat_dat->dwmac4_addrs = &data->dwmac4_addrs;
|
||||
if (ethqos->has_emac3)
|
||||
plat_dat->dwmac4_addrs = &data->dwmac4_addrs;
|
||||
plat_dat->pmt = 1;
|
||||
plat_dat->tso_en = of_property_read_bool(np, "snps,tso");
|
||||
if (of_device_is_compatible(np, "qcom,qcs404-ethqos"))
|
||||
|
|
|
@ -2225,11 +2225,13 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
|
|||
|
||||
ASSERT_RTNL();
|
||||
|
||||
/* Mask out unsupported advertisements */
|
||||
linkmode_and(config.advertising, kset->link_modes.advertising,
|
||||
pl->supported);
|
||||
|
||||
if (pl->phydev) {
|
||||
struct ethtool_link_ksettings phy_kset = *kset;
|
||||
|
||||
linkmode_and(phy_kset.link_modes.advertising,
|
||||
phy_kset.link_modes.advertising,
|
||||
pl->supported);
|
||||
|
||||
/* We can rely on phylib for this update; we also do not need
|
||||
* to update the pl->link_config settings:
|
||||
* - the configuration returned via ksettings_get() will come
|
||||
|
@ -2248,10 +2250,13 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
|
|||
* the presence of a PHY, this should not be changed as that
|
||||
* should be determined from the media side advertisement.
|
||||
*/
|
||||
return phy_ethtool_ksettings_set(pl->phydev, kset);
|
||||
return phy_ethtool_ksettings_set(pl->phydev, &phy_kset);
|
||||
}
|
||||
|
||||
config = pl->link_config;
|
||||
/* Mask out unsupported advertisements */
|
||||
linkmode_and(config.advertising, kset->link_modes.advertising,
|
||||
pl->supported);
|
||||
|
||||
/* FIXME: should we reject autoneg if phy/mac does not support it? */
|
||||
switch (kset->base.autoneg) {
|
||||
|
|
|
@ -205,6 +205,8 @@ struct control_buf {
|
|||
__virtio16 vid;
|
||||
__virtio64 offloads;
|
||||
struct virtio_net_ctrl_rss rss;
|
||||
struct virtio_net_ctrl_coal_tx coal_tx;
|
||||
struct virtio_net_ctrl_coal_rx coal_rx;
|
||||
};
|
||||
|
||||
struct virtnet_info {
|
||||
|
@ -2934,12 +2936,10 @@ static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
|
|||
struct ethtool_coalesce *ec)
|
||||
{
|
||||
struct scatterlist sgs_tx, sgs_rx;
|
||||
struct virtio_net_ctrl_coal_tx coal_tx;
|
||||
struct virtio_net_ctrl_coal_rx coal_rx;
|
||||
|
||||
coal_tx.tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs);
|
||||
coal_tx.tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames);
|
||||
sg_init_one(&sgs_tx, &coal_tx, sizeof(coal_tx));
|
||||
vi->ctrl->coal_tx.tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs);
|
||||
vi->ctrl->coal_tx.tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames);
|
||||
sg_init_one(&sgs_tx, &vi->ctrl->coal_tx, sizeof(vi->ctrl->coal_tx));
|
||||
|
||||
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
|
||||
VIRTIO_NET_CTRL_NOTF_COAL_TX_SET,
|
||||
|
@ -2950,9 +2950,9 @@ static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
|
|||
vi->tx_usecs = ec->tx_coalesce_usecs;
|
||||
vi->tx_max_packets = ec->tx_max_coalesced_frames;
|
||||
|
||||
coal_rx.rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs);
|
||||
coal_rx.rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames);
|
||||
sg_init_one(&sgs_rx, &coal_rx, sizeof(coal_rx));
|
||||
vi->ctrl->coal_rx.rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs);
|
||||
vi->ctrl->coal_rx.rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames);
|
||||
sg_init_one(&sgs_rx, &vi->ctrl->coal_rx, sizeof(vi->ctrl->coal_rx));
|
||||
|
||||
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
|
||||
VIRTIO_NET_CTRL_NOTF_COAL_RX_SET,
|
||||
|
|
|
@ -2732,17 +2732,13 @@ static bool iwl_mvm_wait_d3_notif(struct iwl_notif_wait_data *notif_wait,
|
|||
if (wowlan_info_ver < 2) {
|
||||
struct iwl_wowlan_info_notif_v1 *notif_v1 = (void *)pkt->data;
|
||||
|
||||
notif = kmemdup(notif_v1,
|
||||
offsetofend(struct iwl_wowlan_info_notif,
|
||||
received_beacons),
|
||||
GFP_ATOMIC);
|
||||
|
||||
notif = kmemdup(notif_v1, sizeof(*notif), GFP_ATOMIC);
|
||||
if (!notif)
|
||||
return false;
|
||||
|
||||
notif->tid_tear_down = notif_v1->tid_tear_down;
|
||||
notif->station_id = notif_v1->station_id;
|
||||
|
||||
memset_after(notif, 0, station_id);
|
||||
} else {
|
||||
notif = (void *)pkt->data;
|
||||
}
|
||||
|
|
|
@ -914,7 +914,10 @@ void mt7615_mac_sta_poll(struct mt7615_dev *dev)
|
|||
|
||||
msta = list_first_entry(&sta_poll_list, struct mt7615_sta,
|
||||
poll_list);
|
||||
|
||||
spin_lock_bh(&dev->sta_poll_lock);
|
||||
list_del_init(&msta->poll_list);
|
||||
spin_unlock_bh(&dev->sta_poll_lock);
|
||||
|
||||
addr = mt7615_mac_wtbl_addr(dev, msta->wcid.idx) + 19 * 4;
|
||||
|
||||
|
|
|
@ -1004,10 +1004,10 @@ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
|
|||
{
|
||||
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
||||
struct ieee80211_vif *vif = info->control.vif;
|
||||
struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
|
||||
u8 band_idx = (info->hw_queue & MT_TX_HW_QUEUE_PHY) >> 2;
|
||||
u8 p_fmt, q_idx, omac_idx = 0, wmm_idx = 0;
|
||||
bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
|
||||
struct mt7996_vif *mvif;
|
||||
u16 tx_count = 15;
|
||||
u32 val;
|
||||
bool beacon = !!(changed & (BSS_CHANGED_BEACON |
|
||||
|
@ -1015,7 +1015,8 @@ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
|
|||
bool inband_disc = !!(changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP |
|
||||
BSS_CHANGED_FILS_DISCOVERY));
|
||||
|
||||
if (vif) {
|
||||
mvif = vif ? (struct mt7996_vif *)vif->drv_priv : NULL;
|
||||
if (mvif) {
|
||||
omac_idx = mvif->mt76.omac_idx;
|
||||
wmm_idx = mvif->mt76.wmm_idx;
|
||||
band_idx = mvif->mt76.band_idx;
|
||||
|
@ -1081,12 +1082,16 @@ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
|
|||
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
|
||||
bool mcast = ieee80211_is_data(hdr->frame_control) &&
|
||||
is_multicast_ether_addr(hdr->addr1);
|
||||
u8 idx = mvif->basic_rates_idx;
|
||||
u8 idx = MT7996_BASIC_RATES_TBL;
|
||||
|
||||
if (mcast && mvif->mcast_rates_idx)
|
||||
idx = mvif->mcast_rates_idx;
|
||||
else if (beacon && mvif->beacon_rates_idx)
|
||||
idx = mvif->beacon_rates_idx;
|
||||
if (mvif) {
|
||||
if (mcast && mvif->mcast_rates_idx)
|
||||
idx = mvif->mcast_rates_idx;
|
||||
else if (beacon && mvif->beacon_rates_idx)
|
||||
idx = mvif->beacon_rates_idx;
|
||||
else
|
||||
idx = mvif->basic_rates_idx;
|
||||
}
|
||||
|
||||
txwi[6] |= cpu_to_le32(FIELD_PREP(MT_TXD6_TX_RATE, idx));
|
||||
txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE);
|
||||
|
|
|
@ -88,15 +88,6 @@ static int rtw_ops_config(struct ieee80211_hw *hw, u32 changed)
|
|||
}
|
||||
}
|
||||
|
||||
if (changed & IEEE80211_CONF_CHANGE_PS) {
|
||||
if (hw->conf.flags & IEEE80211_CONF_PS) {
|
||||
rtwdev->ps_enabled = true;
|
||||
} else {
|
||||
rtwdev->ps_enabled = false;
|
||||
rtw_leave_lps(rtwdev);
|
||||
}
|
||||
}
|
||||
|
||||
if (changed & IEEE80211_CONF_CHANGE_CHANNEL)
|
||||
rtw_set_channel(rtwdev);
|
||||
|
||||
|
@ -213,6 +204,7 @@ static int rtw_ops_add_interface(struct ieee80211_hw *hw,
|
|||
config |= PORT_SET_BCN_CTRL;
|
||||
rtw_vif_port_config(rtwdev, rtwvif, config);
|
||||
rtw_core_port_switch(rtwdev, vif);
|
||||
rtw_recalc_lps(rtwdev, vif);
|
||||
|
||||
mutex_unlock(&rtwdev->mutex);
|
||||
|
||||
|
@ -244,6 +236,7 @@ static void rtw_ops_remove_interface(struct ieee80211_hw *hw,
|
|||
config |= PORT_SET_BCN_CTRL;
|
||||
rtw_vif_port_config(rtwdev, rtwvif, config);
|
||||
clear_bit(rtwvif->port, rtwdev->hw_port);
|
||||
rtw_recalc_lps(rtwdev, NULL);
|
||||
|
||||
mutex_unlock(&rtwdev->mutex);
|
||||
}
|
||||
|
@ -438,6 +431,9 @@ static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw,
|
|||
if (changed & BSS_CHANGED_ERP_SLOT)
|
||||
rtw_conf_tx(rtwdev, rtwvif);
|
||||
|
||||
if (changed & BSS_CHANGED_PS)
|
||||
rtw_recalc_lps(rtwdev, NULL);
|
||||
|
||||
rtw_vif_port_config(rtwdev, rtwvif, config);
|
||||
|
||||
mutex_unlock(&rtwdev->mutex);
|
||||
|
|
|
@ -271,8 +271,8 @@ static void rtw_watch_dog_work(struct work_struct *work)
|
|||
* more than two stations associated to the AP, then we can not enter
|
||||
* lps, because fw does not handle the overlapped beacon interval
|
||||
*
|
||||
* mac80211 should iterate vifs and determine if driver can enter
|
||||
* ps by passing IEEE80211_CONF_PS to us, all we need to do is to
|
||||
* rtw_recalc_lps() iterate vifs and determine if driver can enter
|
||||
* ps by vif->type and vif->cfg.ps, all we need to do here is to
|
||||
* get that vif and check if device is having traffic more than the
|
||||
* threshold.
|
||||
*/
|
||||
|
|
|
@ -299,3 +299,46 @@ void rtw_leave_lps_deep(struct rtw_dev *rtwdev)
|
|||
|
||||
__rtw_leave_lps_deep(rtwdev);
|
||||
}
|
||||
|
||||
struct rtw_vif_recalc_lps_iter_data {
|
||||
struct rtw_dev *rtwdev;
|
||||
struct ieee80211_vif *found_vif;
|
||||
int count;
|
||||
};
|
||||
|
||||
static void __rtw_vif_recalc_lps(struct rtw_vif_recalc_lps_iter_data *data,
|
||||
struct ieee80211_vif *vif)
|
||||
{
|
||||
if (data->count < 0)
|
||||
return;
|
||||
|
||||
if (vif->type != NL80211_IFTYPE_STATION) {
|
||||
data->count = -1;
|
||||
return;
|
||||
}
|
||||
|
||||
data->count++;
|
||||
data->found_vif = vif;
|
||||
}
|
||||
|
||||
static void rtw_vif_recalc_lps_iter(void *data, u8 *mac,
|
||||
struct ieee80211_vif *vif)
|
||||
{
|
||||
__rtw_vif_recalc_lps(data, vif);
|
||||
}
|
||||
|
||||
void rtw_recalc_lps(struct rtw_dev *rtwdev, struct ieee80211_vif *new_vif)
|
||||
{
|
||||
struct rtw_vif_recalc_lps_iter_data data = { .rtwdev = rtwdev };
|
||||
|
||||
if (new_vif)
|
||||
__rtw_vif_recalc_lps(&data, new_vif);
|
||||
rtw_iterate_vifs(rtwdev, rtw_vif_recalc_lps_iter, &data);
|
||||
|
||||
if (data.count == 1 && data.found_vif->cfg.ps) {
|
||||
rtwdev->ps_enabled = true;
|
||||
} else {
|
||||
rtwdev->ps_enabled = false;
|
||||
rtw_leave_lps(rtwdev);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,4 +23,6 @@ void rtw_enter_lps(struct rtw_dev *rtwdev, u8 port_id);
|
|||
void rtw_leave_lps(struct rtw_dev *rtwdev);
|
||||
void rtw_leave_lps_deep(struct rtw_dev *rtwdev);
|
||||
enum rtw_lps_deep_mode rtw_get_lps_deep_mode(struct rtw_dev *rtwdev);
|
||||
void rtw_recalc_lps(struct rtw_dev *rtwdev, struct ieee80211_vif *new_vif);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -2531,9 +2531,6 @@ static void rtw89_vif_enter_lps(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwv
|
|||
rtwvif->tdls_peer)
|
||||
return;
|
||||
|
||||
if (rtwdev->total_sta_assoc > 1)
|
||||
return;
|
||||
|
||||
if (rtwvif->offchan)
|
||||
return;
|
||||
|
||||
|
|
|
@ -89,15 +89,6 @@ static int rtw89_ops_config(struct ieee80211_hw *hw, u32 changed)
|
|||
!(hw->conf.flags & IEEE80211_CONF_IDLE))
|
||||
rtw89_leave_ips(rtwdev);
|
||||
|
||||
if (changed & IEEE80211_CONF_CHANGE_PS) {
|
||||
if (hw->conf.flags & IEEE80211_CONF_PS) {
|
||||
rtwdev->lps_enabled = true;
|
||||
} else {
|
||||
rtw89_leave_lps(rtwdev);
|
||||
rtwdev->lps_enabled = false;
|
||||
}
|
||||
}
|
||||
|
||||
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
|
||||
rtw89_config_entity_chandef(rtwdev, RTW89_SUB_ENTITY_0,
|
||||
&hw->conf.chandef);
|
||||
|
@ -168,6 +159,8 @@ static int rtw89_ops_add_interface(struct ieee80211_hw *hw,
|
|||
rtw89_core_txq_init(rtwdev, vif->txq);
|
||||
|
||||
rtw89_btc_ntfy_role_info(rtwdev, rtwvif, NULL, BTC_ROLE_START);
|
||||
|
||||
rtw89_recalc_lps(rtwdev);
|
||||
out:
|
||||
mutex_unlock(&rtwdev->mutex);
|
||||
|
||||
|
@ -192,6 +185,7 @@ static void rtw89_ops_remove_interface(struct ieee80211_hw *hw,
|
|||
rtw89_mac_remove_vif(rtwdev, rtwvif);
|
||||
rtw89_core_release_bit_map(rtwdev->hw_port, rtwvif->port);
|
||||
list_del_init(&rtwvif->list);
|
||||
rtw89_recalc_lps(rtwdev);
|
||||
rtw89_enter_ips_by_hwflags(rtwdev);
|
||||
|
||||
mutex_unlock(&rtwdev->mutex);
|
||||
|
@ -451,6 +445,9 @@ static void rtw89_ops_bss_info_changed(struct ieee80211_hw *hw,
|
|||
if (changed & BSS_CHANGED_CQM)
|
||||
rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, vif, true);
|
||||
|
||||
if (changed & BSS_CHANGED_PS)
|
||||
rtw89_recalc_lps(rtwdev);
|
||||
|
||||
mutex_unlock(&rtwdev->mutex);
|
||||
}
|
||||
|
||||
|
|
|
@ -252,3 +252,29 @@ void rtw89_process_p2p_ps(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif)
|
|||
rtw89_p2p_disable_all_noa(rtwdev, vif);
|
||||
rtw89_p2p_update_noa(rtwdev, vif);
|
||||
}
|
||||
|
||||
void rtw89_recalc_lps(struct rtw89_dev *rtwdev)
|
||||
{
|
||||
struct ieee80211_vif *vif, *found_vif = NULL;
|
||||
struct rtw89_vif *rtwvif;
|
||||
int count = 0;
|
||||
|
||||
rtw89_for_each_rtwvif(rtwdev, rtwvif) {
|
||||
vif = rtwvif_to_vif(rtwvif);
|
||||
|
||||
if (vif->type != NL80211_IFTYPE_STATION) {
|
||||
count = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
count++;
|
||||
found_vif = vif;
|
||||
}
|
||||
|
||||
if (count == 1 && found_vif->cfg.ps) {
|
||||
rtwdev->lps_enabled = true;
|
||||
} else {
|
||||
rtw89_leave_lps(rtwdev);
|
||||
rtwdev->lps_enabled = false;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -15,6 +15,7 @@ void rtw89_enter_ips(struct rtw89_dev *rtwdev);
|
|||
void rtw89_leave_ips(struct rtw89_dev *rtwdev);
|
||||
void rtw89_set_coex_ctrl_lps(struct rtw89_dev *rtwdev, bool btc_ctrl);
|
||||
void rtw89_process_p2p_ps(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif);
|
||||
void rtw89_recalc_lps(struct rtw89_dev *rtwdev);
|
||||
|
||||
static inline void rtw89_leave_ips_by_hwflags(struct rtw89_dev *rtwdev)
|
||||
{
|
||||
|
|
|
@ -620,7 +620,7 @@ struct netdev_queue {
|
|||
netdevice_tracker dev_tracker;
|
||||
|
||||
struct Qdisc __rcu *qdisc;
|
||||
struct Qdisc *qdisc_sleeping;
|
||||
struct Qdisc __rcu *qdisc_sleeping;
|
||||
#ifdef CONFIG_SYSFS
|
||||
struct kobject kobj;
|
||||
#endif
|
||||
|
@ -768,8 +768,11 @@ static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
|
|||
/* We only give a hint, preemption can change CPU under us */
|
||||
val |= raw_smp_processor_id();
|
||||
|
||||
if (table->ents[index] != val)
|
||||
table->ents[index] = val;
|
||||
/* The following WRITE_ONCE() is paired with the READ_ONCE()
|
||||
* here, and another one in get_rps_cpu().
|
||||
*/
|
||||
if (READ_ONCE(table->ents[index]) != val)
|
||||
WRITE_ONCE(table->ents[index], val);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -350,6 +350,7 @@ enum {
|
|||
enum {
|
||||
HCI_SETUP,
|
||||
HCI_CONFIG,
|
||||
HCI_DEBUGFS_CREATED,
|
||||
HCI_AUTO_OFF,
|
||||
HCI_RFKILLED,
|
||||
HCI_MGMT,
|
||||
|
|
|
@ -515,6 +515,7 @@ struct hci_dev {
|
|||
struct work_struct cmd_sync_work;
|
||||
struct list_head cmd_sync_work_list;
|
||||
struct mutex cmd_sync_work_lock;
|
||||
struct mutex unregister_lock;
|
||||
struct work_struct cmd_sync_cancel_work;
|
||||
struct work_struct reenable_adv_work;
|
||||
|
||||
|
@ -1201,7 +1202,8 @@ static inline struct hci_conn *hci_conn_hash_lookup_cis(struct hci_dev *hdev,
|
|||
if (id != BT_ISO_QOS_CIS_UNSET && id != c->iso_qos.ucast.cis)
|
||||
continue;
|
||||
|
||||
if (ba_type == c->dst_type && !bacmp(&c->dst, ba)) {
|
||||
/* Match destination address if set */
|
||||
if (!ba || (ba_type == c->dst_type && !bacmp(&c->dst, ba))) {
|
||||
rcu_read_unlock();
|
||||
return c;
|
||||
}
|
||||
|
|
|
@ -180,7 +180,7 @@ struct pneigh_entry {
|
|||
netdevice_tracker dev_tracker;
|
||||
u32 flags;
|
||||
u8 protocol;
|
||||
u8 key[];
|
||||
u32 key[];
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -53,7 +53,7 @@ struct netns_sysctl_ipv6 {
|
|||
int seg6_flowlabel;
|
||||
u32 ioam6_id;
|
||||
u64 ioam6_id_wide;
|
||||
bool skip_notify_on_dev_down;
|
||||
u8 skip_notify_on_dev_down;
|
||||
u8 fib_notify_on_flag_change;
|
||||
u8 icmpv6_error_anycast_as_unicast;
|
||||
};
|
||||
|
|
|
@ -16,11 +16,7 @@
|
|||
#define PING_HTABLE_SIZE 64
|
||||
#define PING_HTABLE_MASK (PING_HTABLE_SIZE-1)
|
||||
|
||||
/*
|
||||
* gid_t is either uint or ushort. We want to pass it to
|
||||
* proc_dointvec_minmax(), so it must not be larger than MAX_INT
|
||||
*/
|
||||
#define GID_T_MAX (((gid_t)~0U) >> 1)
|
||||
#define GID_T_MAX (((gid_t)~0U) - 1)
|
||||
|
||||
/* Compatibility glue so we can support IPv6 when it's compiled as a module */
|
||||
struct pingv6_ops {
|
||||
|
|
|
@ -127,6 +127,8 @@ static inline void qdisc_run(struct Qdisc *q)
|
|||
}
|
||||
}
|
||||
|
||||
extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1];
|
||||
|
||||
/* Calculate maximal size of packet seen by hard_start_xmit
|
||||
routine of this device.
|
||||
*/
|
||||
|
|
|
@ -23,9 +23,6 @@ static inline int rpl_init(void)
|
|||
static inline void rpl_exit(void) {}
|
||||
#endif
|
||||
|
||||
/* Worst decompression memory usage ipv6 address (16) + pad 7 */
|
||||
#define IPV6_RPL_SRH_WORST_SWAP_SIZE (sizeof(struct in6_addr) + 7)
|
||||
|
||||
size_t ipv6_rpl_srh_size(unsigned char n, unsigned char cmpri,
|
||||
unsigned char cmpre);
|
||||
|
||||
|
|
|
@ -545,7 +545,7 @@ static inline struct Qdisc *qdisc_root_bh(const struct Qdisc *qdisc)
|
|||
|
||||
static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc)
|
||||
{
|
||||
return qdisc->dev_queue->qdisc_sleeping;
|
||||
return rcu_dereference_rtnl(qdisc->dev_queue->qdisc_sleeping);
|
||||
}
|
||||
|
||||
static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc)
|
||||
|
@ -754,7 +754,9 @@ static inline bool qdisc_tx_changing(const struct net_device *dev)
|
|||
|
||||
for (i = 0; i < dev->num_tx_queues; i++) {
|
||||
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
|
||||
if (rcu_access_pointer(txq->qdisc) != txq->qdisc_sleeping)
|
||||
|
||||
if (rcu_access_pointer(txq->qdisc) !=
|
||||
rcu_access_pointer(txq->qdisc_sleeping))
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
|
|
|
@ -1152,8 +1152,12 @@ static inline void sock_rps_record_flow(const struct sock *sk)
|
|||
* OR an additional socket flag
|
||||
* [1] : sk_state and sk_prot are in the same cache line.
|
||||
*/
|
||||
if (sk->sk_state == TCP_ESTABLISHED)
|
||||
sock_rps_record_flow_hash(sk->sk_rxhash);
|
||||
if (sk->sk_state == TCP_ESTABLISHED) {
|
||||
/* This READ_ONCE() is paired with the WRITE_ONCE()
|
||||
* from sock_rps_save_rxhash() and sock_rps_reset_rxhash().
|
||||
*/
|
||||
sock_rps_record_flow_hash(READ_ONCE(sk->sk_rxhash));
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
@ -1162,15 +1166,19 @@ static inline void sock_rps_save_rxhash(struct sock *sk,
|
|||
const struct sk_buff *skb)
|
||||
{
|
||||
#ifdef CONFIG_RPS
|
||||
if (unlikely(sk->sk_rxhash != skb->hash))
|
||||
sk->sk_rxhash = skb->hash;
|
||||
/* The following WRITE_ONCE() is paired with the READ_ONCE()
|
||||
* here, and another one in sock_rps_record_flow().
|
||||
*/
|
||||
if (unlikely(READ_ONCE(sk->sk_rxhash) != skb->hash))
|
||||
WRITE_ONCE(sk->sk_rxhash, skb->hash);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void sock_rps_reset_rxhash(struct sock *sk)
|
||||
{
|
||||
#ifdef CONFIG_RPS
|
||||
sk->sk_rxhash = 0;
|
||||
/* Paired with READ_ONCE() in sock_rps_record_flow() */
|
||||
WRITE_ONCE(sk->sk_rxhash, 0);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
|
@ -1035,6 +1035,7 @@ enum bpf_attach_type {
|
|||
BPF_TRACE_KPROBE_MULTI,
|
||||
BPF_LSM_CGROUP,
|
||||
BPF_STRUCT_OPS,
|
||||
BPF_NETFILTER,
|
||||
__MAX_BPF_ATTACH_TYPE
|
||||
};
|
||||
|
||||
|
|
|
@ -69,9 +69,13 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd)
|
|||
/* Misc members not needed in bpf_map_meta_equal() check. */
|
||||
inner_map_meta->ops = inner_map->ops;
|
||||
if (inner_map->ops == &array_map_ops) {
|
||||
struct bpf_array *inner_array_meta =
|
||||
container_of(inner_map_meta, struct bpf_array, map);
|
||||
struct bpf_array *inner_array = container_of(inner_map, struct bpf_array, map);
|
||||
|
||||
inner_array_meta->index_mask = inner_array->index_mask;
|
||||
inner_array_meta->elem_size = inner_array->elem_size;
|
||||
inner_map_meta->bypass_spec_v1 = inner_map->bypass_spec_v1;
|
||||
container_of(inner_map_meta, struct bpf_array, map)->index_mask =
|
||||
container_of(inner_map, struct bpf_array, map)->index_mask;
|
||||
}
|
||||
|
||||
fdput(f);
|
||||
|
|
|
@ -2433,6 +2433,10 @@ bpf_prog_load_check_attach(enum bpf_prog_type prog_type,
|
|||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
case BPF_PROG_TYPE_NETFILTER:
|
||||
if (expected_attach_type == BPF_NETFILTER)
|
||||
return 0;
|
||||
return -EINVAL;
|
||||
case BPF_PROG_TYPE_SYSCALL:
|
||||
case BPF_PROG_TYPE_EXT:
|
||||
if (expected_attach_type)
|
||||
|
@ -4590,7 +4594,12 @@ static int link_create(union bpf_attr *attr, bpfptr_t uattr)
|
|||
|
||||
switch (prog->type) {
|
||||
case BPF_PROG_TYPE_EXT:
|
||||
break;
|
||||
case BPF_PROG_TYPE_NETFILTER:
|
||||
if (attr->link_create.attach_type != BPF_NETFILTER) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
break;
|
||||
case BPF_PROG_TYPE_PERF_EVENT:
|
||||
case BPF_PROG_TYPE_TRACEPOINT:
|
||||
|
|
|
@ -627,6 +627,7 @@ void free_task(struct task_struct *tsk)
|
|||
arch_release_task_struct(tsk);
|
||||
if (tsk->flags & PF_KTHREAD)
|
||||
free_kthread_struct(tsk);
|
||||
bpf_task_storage_free(tsk);
|
||||
free_task_struct(tsk);
|
||||
}
|
||||
EXPORT_SYMBOL(free_task);
|
||||
|
@ -979,7 +980,6 @@ void __put_task_struct(struct task_struct *tsk)
|
|||
cgroup_free(tsk);
|
||||
task_numa_free(tsk, true);
|
||||
security_task_free(tsk);
|
||||
bpf_task_storage_free(tsk);
|
||||
exit_creds(tsk);
|
||||
delayacct_tsk_free(tsk);
|
||||
put_signal_struct(tsk->signal);
|
||||
|
|
|
@ -900,13 +900,23 @@ static const struct bpf_func_proto bpf_send_signal_thread_proto = {
|
|||
|
||||
BPF_CALL_3(bpf_d_path, struct path *, path, char *, buf, u32, sz)
|
||||
{
|
||||
struct path copy;
|
||||
long len;
|
||||
char *p;
|
||||
|
||||
if (!sz)
|
||||
return 0;
|
||||
|
||||
p = d_path(path, buf, sz);
|
||||
/*
|
||||
* The path pointer is verified as trusted and safe to use,
|
||||
* but let's double check it's valid anyway to workaround
|
||||
* potentially broken verifier.
|
||||
*/
|
||||
len = copy_from_kernel_nofault(©, path, sizeof(*path));
|
||||
if (len < 0)
|
||||
return len;
|
||||
|
||||
p = d_path(©, buf, sz);
|
||||
if (IS_ERR(p)) {
|
||||
len = PTR_ERR(p);
|
||||
} else {
|
||||
|
|
|
@ -280,8 +280,8 @@ static void irq_cpu_rmap_release(struct kref *ref)
|
|||
struct irq_glue *glue =
|
||||
container_of(ref, struct irq_glue, notify.kref);
|
||||
|
||||
cpu_rmap_put(glue->rmap);
|
||||
glue->rmap->obj[glue->index] = NULL;
|
||||
cpu_rmap_put(glue->rmap);
|
||||
kfree(glue);
|
||||
}
|
||||
|
||||
|
|
|
@ -101,7 +101,6 @@ static void batadv_dat_purge(struct work_struct *work);
|
|||
*/
|
||||
static void batadv_dat_start_timer(struct batadv_priv *bat_priv)
|
||||
{
|
||||
INIT_DELAYED_WORK(&bat_priv->dat.work, batadv_dat_purge);
|
||||
queue_delayed_work(batadv_event_workqueue, &bat_priv->dat.work,
|
||||
msecs_to_jiffies(10000));
|
||||
}
|
||||
|
@ -819,6 +818,7 @@ int batadv_dat_init(struct batadv_priv *bat_priv)
|
|||
if (!bat_priv->dat.hash)
|
||||
return -ENOMEM;
|
||||
|
||||
INIT_DELAYED_WORK(&bat_priv->dat.work, batadv_dat_purge);
|
||||
batadv_dat_start_timer(bat_priv);
|
||||
|
||||
batadv_tvlv_handler_register(bat_priv, batadv_dat_tvlv_ogm_handler_v1,
|
||||
|
|
|
@ -947,8 +947,8 @@ static void find_cis(struct hci_conn *conn, void *data)
|
|||
{
|
||||
struct iso_list_data *d = data;
|
||||
|
||||
/* Ignore broadcast */
|
||||
if (!bacmp(&conn->dst, BDADDR_ANY))
|
||||
/* Ignore broadcast or if CIG don't match */
|
||||
if (!bacmp(&conn->dst, BDADDR_ANY) || d->cig != conn->iso_qos.ucast.cig)
|
||||
return;
|
||||
|
||||
d->count++;
|
||||
|
@ -963,12 +963,17 @@ static void cis_cleanup(struct hci_conn *conn)
|
|||
struct hci_dev *hdev = conn->hdev;
|
||||
struct iso_list_data d;
|
||||
|
||||
if (conn->iso_qos.ucast.cig == BT_ISO_QOS_CIG_UNSET)
|
||||
return;
|
||||
|
||||
memset(&d, 0, sizeof(d));
|
||||
d.cig = conn->iso_qos.ucast.cig;
|
||||
|
||||
/* Check if ISO connection is a CIS and remove CIG if there are
|
||||
* no other connections using it.
|
||||
*/
|
||||
hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_BOUND, &d);
|
||||
hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_CONNECT, &d);
|
||||
hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_CONNECTED, &d);
|
||||
if (d.count)
|
||||
return;
|
||||
|
@ -1766,24 +1771,23 @@ static bool hci_le_set_cig_params(struct hci_conn *conn, struct bt_iso_qos *qos)
|
|||
|
||||
memset(&data, 0, sizeof(data));
|
||||
|
||||
/* Allocate a CIG if not set */
|
||||
/* Allocate first still reconfigurable CIG if not set */
|
||||
if (qos->ucast.cig == BT_ISO_QOS_CIG_UNSET) {
|
||||
for (data.cig = 0x00; data.cig < 0xff; data.cig++) {
|
||||
for (data.cig = 0x00; data.cig < 0xf0; data.cig++) {
|
||||
data.count = 0;
|
||||
data.cis = 0xff;
|
||||
|
||||
hci_conn_hash_list_state(hdev, cis_list, ISO_LINK,
|
||||
BT_BOUND, &data);
|
||||
hci_conn_hash_list_state(hdev, find_cis, ISO_LINK,
|
||||
BT_CONNECT, &data);
|
||||
if (data.count)
|
||||
continue;
|
||||
|
||||
hci_conn_hash_list_state(hdev, cis_list, ISO_LINK,
|
||||
hci_conn_hash_list_state(hdev, find_cis, ISO_LINK,
|
||||
BT_CONNECTED, &data);
|
||||
if (!data.count)
|
||||
break;
|
||||
}
|
||||
|
||||
if (data.cig == 0xff)
|
||||
if (data.cig == 0xf0)
|
||||
return false;
|
||||
|
||||
/* Update CIG */
|
||||
|
|
|
@ -1416,10 +1416,10 @@ int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
|
|||
|
||||
int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
|
||||
{
|
||||
struct smp_ltk *k;
|
||||
struct smp_ltk *k, *tmp;
|
||||
int removed = 0;
|
||||
|
||||
list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
|
||||
list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
|
||||
if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
|
||||
continue;
|
||||
|
||||
|
@ -1435,9 +1435,9 @@ int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
|
|||
|
||||
void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
|
||||
{
|
||||
struct smp_irk *k;
|
||||
struct smp_irk *k, *tmp;
|
||||
|
||||
list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
|
||||
list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
|
||||
if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
|
||||
continue;
|
||||
|
||||
|
@ -2686,7 +2686,9 @@ void hci_unregister_dev(struct hci_dev *hdev)
|
|||
{
|
||||
BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
|
||||
|
||||
mutex_lock(&hdev->unregister_lock);
|
||||
hci_dev_set_flag(hdev, HCI_UNREGISTER);
|
||||
mutex_unlock(&hdev->unregister_lock);
|
||||
|
||||
write_lock(&hci_dev_list_lock);
|
||||
list_del(&hdev->list);
|
||||
|
|
|
@ -3804,48 +3804,56 @@ static u8 hci_cc_le_set_cig_params(struct hci_dev *hdev, void *data,
|
|||
struct sk_buff *skb)
|
||||
{
|
||||
struct hci_rp_le_set_cig_params *rp = data;
|
||||
struct hci_cp_le_set_cig_params *cp;
|
||||
struct hci_conn *conn;
|
||||
int i = 0;
|
||||
u8 status = rp->status;
|
||||
int i;
|
||||
|
||||
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
|
||||
|
||||
cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_CIG_PARAMS);
|
||||
if (!cp || rp->num_handles != cp->num_cis || rp->cig_id != cp->cig_id) {
|
||||
bt_dev_err(hdev, "unexpected Set CIG Parameters response data");
|
||||
status = HCI_ERROR_UNSPECIFIED;
|
||||
}
|
||||
|
||||
hci_dev_lock(hdev);
|
||||
|
||||
if (rp->status) {
|
||||
if (status) {
|
||||
while ((conn = hci_conn_hash_lookup_cig(hdev, rp->cig_id))) {
|
||||
conn->state = BT_CLOSED;
|
||||
hci_connect_cfm(conn, rp->status);
|
||||
hci_connect_cfm(conn, status);
|
||||
hci_conn_del(conn);
|
||||
}
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
|
||||
if (conn->type != ISO_LINK ||
|
||||
conn->iso_qos.ucast.cig != rp->cig_id ||
|
||||
conn->state == BT_CONNECTED)
|
||||
/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E page 2553
|
||||
*
|
||||
* If the Status return parameter is zero, then the Controller shall
|
||||
* set the Connection_Handle arrayed return parameter to the connection
|
||||
* handle(s) corresponding to the CIS configurations specified in
|
||||
* the CIS_IDs command parameter, in the same order.
|
||||
*/
|
||||
for (i = 0; i < rp->num_handles; ++i) {
|
||||
conn = hci_conn_hash_lookup_cis(hdev, NULL, 0, rp->cig_id,
|
||||
cp->cis[i].cis_id);
|
||||
if (!conn || !bacmp(&conn->dst, BDADDR_ANY))
|
||||
continue;
|
||||
|
||||
conn->handle = __le16_to_cpu(rp->handle[i++]);
|
||||
if (conn->state != BT_BOUND && conn->state != BT_CONNECT)
|
||||
continue;
|
||||
|
||||
conn->handle = __le16_to_cpu(rp->handle[i]);
|
||||
|
||||
bt_dev_dbg(hdev, "%p handle 0x%4.4x parent %p", conn,
|
||||
conn->handle, conn->parent);
|
||||
|
||||
/* Create CIS if LE is already connected */
|
||||
if (conn->parent && conn->parent->state == BT_CONNECTED) {
|
||||
rcu_read_unlock();
|
||||
if (conn->parent && conn->parent->state == BT_CONNECTED)
|
||||
hci_le_create_cis(conn);
|
||||
rcu_read_lock();
|
||||
}
|
||||
|
||||
if (i == rp->num_handles)
|
||||
break;
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
unlock:
|
||||
hci_dev_unlock(hdev);
|
||||
|
||||
|
|
|
@ -629,6 +629,7 @@ void hci_cmd_sync_init(struct hci_dev *hdev)
|
|||
INIT_WORK(&hdev->cmd_sync_work, hci_cmd_sync_work);
|
||||
INIT_LIST_HEAD(&hdev->cmd_sync_work_list);
|
||||
mutex_init(&hdev->cmd_sync_work_lock);
|
||||
mutex_init(&hdev->unregister_lock);
|
||||
|
||||
INIT_WORK(&hdev->cmd_sync_cancel_work, hci_cmd_sync_cancel_work);
|
||||
INIT_WORK(&hdev->reenable_adv_work, reenable_adv);
|
||||
|
@ -692,14 +693,19 @@ int hci_cmd_sync_submit(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
|
|||
void *data, hci_cmd_sync_work_destroy_t destroy)
|
||||
{
|
||||
struct hci_cmd_sync_work_entry *entry;
|
||||
int err = 0;
|
||||
|
||||
if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
|
||||
return -ENODEV;
|
||||
mutex_lock(&hdev->unregister_lock);
|
||||
if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
|
||||
err = -ENODEV;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
|
||||
if (!entry)
|
||||
return -ENOMEM;
|
||||
|
||||
if (!entry) {
|
||||
err = -ENOMEM;
|
||||
goto unlock;
|
||||
}
|
||||
entry->func = func;
|
||||
entry->data = data;
|
||||
entry->destroy = destroy;
|
||||
|
@ -710,7 +716,9 @@ int hci_cmd_sync_submit(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
|
|||
|
||||
queue_work(hdev->req_workqueue, &hdev->cmd_sync_work);
|
||||
|
||||
return 0;
|
||||
unlock:
|
||||
mutex_unlock(&hdev->unregister_lock);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(hci_cmd_sync_submit);
|
||||
|
||||
|
@ -4543,6 +4551,9 @@ static int hci_init_sync(struct hci_dev *hdev)
|
|||
!hci_dev_test_flag(hdev, HCI_CONFIG))
|
||||
return 0;
|
||||
|
||||
if (hci_dev_test_and_set_flag(hdev, HCI_DEBUGFS_CREATED))
|
||||
return 0;
|
||||
|
||||
hci_debugfs_create_common(hdev);
|
||||
|
||||
if (lmp_bredr_capable(hdev))
|
||||
|
|
|
@ -4306,6 +4306,10 @@ static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
|
|||
result = __le16_to_cpu(rsp->result);
|
||||
status = __le16_to_cpu(rsp->status);
|
||||
|
||||
if (result == L2CAP_CR_SUCCESS && (dcid < L2CAP_CID_DYN_START ||
|
||||
dcid > L2CAP_CID_DYN_END))
|
||||
return -EPROTO;
|
||||
|
||||
BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
|
||||
dcid, scid, result, status);
|
||||
|
||||
|
@ -4337,6 +4341,11 @@ static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
|
|||
|
||||
switch (result) {
|
||||
case L2CAP_CR_SUCCESS:
|
||||
if (__l2cap_get_chan_by_dcid(conn, dcid)) {
|
||||
err = -EBADSLT;
|
||||
break;
|
||||
}
|
||||
|
||||
l2cap_state_change(chan, BT_CONFIG);
|
||||
chan->ident = 0;
|
||||
chan->dcid = dcid;
|
||||
|
@ -4663,7 +4672,9 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
|
|||
|
||||
chan->ops->set_shutdown(chan);
|
||||
|
||||
l2cap_chan_unlock(chan);
|
||||
mutex_lock(&conn->chan_lock);
|
||||
l2cap_chan_lock(chan);
|
||||
l2cap_chan_del(chan, ECONNRESET);
|
||||
mutex_unlock(&conn->chan_lock);
|
||||
|
||||
|
@ -4702,7 +4713,9 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
|
|||
return 0;
|
||||
}
|
||||
|
||||
l2cap_chan_unlock(chan);
|
||||
mutex_lock(&conn->chan_lock);
|
||||
l2cap_chan_lock(chan);
|
||||
l2cap_chan_del(chan, 0);
|
||||
mutex_unlock(&conn->chan_lock);
|
||||
|
||||
|
|
|
@ -126,7 +126,7 @@ static void j1939_can_recv(struct sk_buff *iskb, void *data)
|
|||
#define J1939_CAN_ID CAN_EFF_FLAG
|
||||
#define J1939_CAN_MASK (CAN_EFF_FLAG | CAN_RTR_FLAG)
|
||||
|
||||
static DEFINE_SPINLOCK(j1939_netdev_lock);
|
||||
static DEFINE_MUTEX(j1939_netdev_lock);
|
||||
|
||||
static struct j1939_priv *j1939_priv_create(struct net_device *ndev)
|
||||
{
|
||||
|
@ -220,7 +220,7 @@ static void __j1939_rx_release(struct kref *kref)
|
|||
j1939_can_rx_unregister(priv);
|
||||
j1939_ecu_unmap_all(priv);
|
||||
j1939_priv_set(priv->ndev, NULL);
|
||||
spin_unlock(&j1939_netdev_lock);
|
||||
mutex_unlock(&j1939_netdev_lock);
|
||||
}
|
||||
|
||||
/* get pointer to priv without increasing ref counter */
|
||||
|
@ -248,9 +248,9 @@ static struct j1939_priv *j1939_priv_get_by_ndev(struct net_device *ndev)
|
|||
{
|
||||
struct j1939_priv *priv;
|
||||
|
||||
spin_lock(&j1939_netdev_lock);
|
||||
mutex_lock(&j1939_netdev_lock);
|
||||
priv = j1939_priv_get_by_ndev_locked(ndev);
|
||||
spin_unlock(&j1939_netdev_lock);
|
||||
mutex_unlock(&j1939_netdev_lock);
|
||||
|
||||
return priv;
|
||||
}
|
||||
|
@ -260,14 +260,14 @@ struct j1939_priv *j1939_netdev_start(struct net_device *ndev)
|
|||
struct j1939_priv *priv, *priv_new;
|
||||
int ret;
|
||||
|
||||
spin_lock(&j1939_netdev_lock);
|
||||
mutex_lock(&j1939_netdev_lock);
|
||||
priv = j1939_priv_get_by_ndev_locked(ndev);
|
||||
if (priv) {
|
||||
kref_get(&priv->rx_kref);
|
||||
spin_unlock(&j1939_netdev_lock);
|
||||
mutex_unlock(&j1939_netdev_lock);
|
||||
return priv;
|
||||
}
|
||||
spin_unlock(&j1939_netdev_lock);
|
||||
mutex_unlock(&j1939_netdev_lock);
|
||||
|
||||
priv = j1939_priv_create(ndev);
|
||||
if (!priv)
|
||||
|
@ -277,29 +277,31 @@ struct j1939_priv *j1939_netdev_start(struct net_device *ndev)
|
|||
spin_lock_init(&priv->j1939_socks_lock);
|
||||
INIT_LIST_HEAD(&priv->j1939_socks);
|
||||
|
||||
spin_lock(&j1939_netdev_lock);
|
||||
mutex_lock(&j1939_netdev_lock);
|
||||
priv_new = j1939_priv_get_by_ndev_locked(ndev);
|
||||
if (priv_new) {
|
||||
/* Someone was faster than us, use their priv and roll
|
||||
* back our's.
|
||||
*/
|
||||
kref_get(&priv_new->rx_kref);
|
||||
spin_unlock(&j1939_netdev_lock);
|
||||
mutex_unlock(&j1939_netdev_lock);
|
||||
dev_put(ndev);
|
||||
kfree(priv);
|
||||
return priv_new;
|
||||
}
|
||||
j1939_priv_set(ndev, priv);
|
||||
spin_unlock(&j1939_netdev_lock);
|
||||
|
||||
ret = j1939_can_rx_register(priv);
|
||||
if (ret < 0)
|
||||
goto out_priv_put;
|
||||
|
||||
mutex_unlock(&j1939_netdev_lock);
|
||||
return priv;
|
||||
|
||||
out_priv_put:
|
||||
j1939_priv_set(ndev, NULL);
|
||||
mutex_unlock(&j1939_netdev_lock);
|
||||
|
||||
dev_put(ndev);
|
||||
kfree(priv);
|
||||
|
||||
|
@ -308,7 +310,7 @@ struct j1939_priv *j1939_netdev_start(struct net_device *ndev)
|
|||
|
||||
void j1939_netdev_stop(struct j1939_priv *priv)
|
||||
{
|
||||
kref_put_lock(&priv->rx_kref, __j1939_rx_release, &j1939_netdev_lock);
|
||||
kref_put_mutex(&priv->rx_kref, __j1939_rx_release, &j1939_netdev_lock);
|
||||
j1939_priv_put(priv);
|
||||
}
|
||||
|
||||
|
|
|
@ -1088,6 +1088,11 @@ void j1939_sk_errqueue(struct j1939_session *session,
|
|||
|
||||
void j1939_sk_send_loop_abort(struct sock *sk, int err)
|
||||
{
|
||||
struct j1939_sock *jsk = j1939_sk(sk);
|
||||
|
||||
if (jsk->state & J1939_SOCK_ERRQUEUE)
|
||||
return;
|
||||
|
||||
sk->sk_err = err;
|
||||
|
||||
sk_error_report(sk);
|
||||
|
|
|
@ -4471,8 +4471,10 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
|
|||
u32 next_cpu;
|
||||
u32 ident;
|
||||
|
||||
/* First check into global flow table if there is a match */
|
||||
ident = sock_flow_table->ents[hash & sock_flow_table->mask];
|
||||
/* First check into global flow table if there is a match.
|
||||
* This READ_ONCE() pairs with WRITE_ONCE() from rps_record_sock_flow().
|
||||
*/
|
||||
ident = READ_ONCE(sock_flow_table->ents[hash & sock_flow_table->mask]);
|
||||
if ((ident ^ hash) & ~rps_cpu_mask)
|
||||
goto try_rps;
|
||||
|
||||
|
@ -10541,7 +10543,7 @@ struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
|
|||
return NULL;
|
||||
netdev_init_one_queue(dev, queue, NULL);
|
||||
RCU_INIT_POINTER(queue->qdisc, &noop_qdisc);
|
||||
queue->qdisc_sleeping = &noop_qdisc;
|
||||
RCU_INIT_POINTER(queue->qdisc_sleeping, &noop_qdisc);
|
||||
rcu_assign_pointer(dev->ingress_queue, queue);
|
||||
#endif
|
||||
return queue;
|
||||
|
|
|
@ -1210,7 +1210,8 @@ static void sk_psock_verdict_data_ready(struct sock *sk)
|
|||
|
||||
rcu_read_lock();
|
||||
psock = sk_psock(sk);
|
||||
psock->saved_data_ready(sk);
|
||||
if (psock)
|
||||
psock->saved_data_ready(sk);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -34,8 +34,8 @@ static int ip_ttl_min = 1;
|
|||
static int ip_ttl_max = 255;
|
||||
static int tcp_syn_retries_min = 1;
|
||||
static int tcp_syn_retries_max = MAX_TCP_SYNCNT;
|
||||
static int ip_ping_group_range_min[] = { 0, 0 };
|
||||
static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
|
||||
static unsigned long ip_ping_group_range_min[] = { 0, 0 };
|
||||
static unsigned long ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
|
||||
static u32 u32_max_div_HZ = UINT_MAX / HZ;
|
||||
static int one_day_secs = 24 * 3600;
|
||||
static u32 fib_multipath_hash_fields_all_mask __maybe_unused =
|
||||
|
@ -165,7 +165,7 @@ static int ipv4_ping_group_range(struct ctl_table *table, int write,
|
|||
{
|
||||
struct user_namespace *user_ns = current_user_ns();
|
||||
int ret;
|
||||
gid_t urange[2];
|
||||
unsigned long urange[2];
|
||||
kgid_t low, high;
|
||||
struct ctl_table tmp = {
|
||||
.data = &urange,
|
||||
|
@ -178,7 +178,7 @@ static int ipv4_ping_group_range(struct ctl_table *table, int write,
|
|||
inet_get_ping_group_range_table(table, &low, &high);
|
||||
urange[0] = from_kgid_munged(user_ns, low);
|
||||
urange[1] = from_kgid_munged(user_ns, high);
|
||||
ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
|
||||
ret = proc_doulongvec_minmax(&tmp, write, buffer, lenp, ppos);
|
||||
|
||||
if (write && ret == 0) {
|
||||
low = make_kgid(user_ns, urange[0]);
|
||||
|
|
|
@ -60,12 +60,12 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
|
|||
struct tcphdr *th;
|
||||
unsigned int thlen;
|
||||
unsigned int seq;
|
||||
__be32 delta;
|
||||
unsigned int oldlen;
|
||||
unsigned int mss;
|
||||
struct sk_buff *gso_skb = skb;
|
||||
__sum16 newcheck;
|
||||
bool ooo_okay, copy_destructor;
|
||||
__wsum delta;
|
||||
|
||||
th = tcp_hdr(skb);
|
||||
thlen = th->doff * 4;
|
||||
|
@ -75,7 +75,7 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
|
|||
if (!pskb_may_pull(skb, thlen))
|
||||
goto out;
|
||||
|
||||
oldlen = (u16)~skb->len;
|
||||
oldlen = ~skb->len;
|
||||
__skb_pull(skb, thlen);
|
||||
|
||||
mss = skb_shinfo(skb)->gso_size;
|
||||
|
@ -110,7 +110,7 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
|
|||
if (skb_is_gso(segs))
|
||||
mss *= skb_shinfo(segs)->gso_segs;
|
||||
|
||||
delta = htonl(oldlen + (thlen + mss));
|
||||
delta = (__force __wsum)htonl(oldlen + thlen + mss);
|
||||
|
||||
skb = segs;
|
||||
th = tcp_hdr(skb);
|
||||
|
@ -119,8 +119,7 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
|
|||
if (unlikely(skb_shinfo(gso_skb)->tx_flags & SKBTX_SW_TSTAMP))
|
||||
tcp_gso_tstamp(segs, skb_shinfo(gso_skb)->tskey, seq, mss);
|
||||
|
||||
newcheck = ~csum_fold((__force __wsum)((__force u32)th->check +
|
||||
(__force u32)delta));
|
||||
newcheck = ~csum_fold(csum_add(csum_unfold(th->check), delta));
|
||||
|
||||
while (skb->next) {
|
||||
th->fin = th->psh = 0;
|
||||
|
@ -165,11 +164,11 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
|
|||
WARN_ON_ONCE(refcount_sub_and_test(-delta, &skb->sk->sk_wmem_alloc));
|
||||
}
|
||||
|
||||
delta = htonl(oldlen + (skb_tail_pointer(skb) -
|
||||
skb_transport_header(skb)) +
|
||||
skb->data_len);
|
||||
th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
|
||||
(__force u32)delta));
|
||||
delta = (__force __wsum)htonl(oldlen +
|
||||
(skb_tail_pointer(skb) -
|
||||
skb_transport_header(skb)) +
|
||||
skb->data_len);
|
||||
th->check = ~csum_fold(csum_add(csum_unfold(th->check), delta));
|
||||
if (skb->ip_summed == CHECKSUM_PARTIAL)
|
||||
gso_reset_checksum(skb, ~th->check);
|
||||
else
|
||||
|
|
|
@ -569,24 +569,6 @@ looped_back:
|
|||
return -1;
|
||||
}
|
||||
|
||||
if (skb_cloned(skb)) {
|
||||
if (pskb_expand_head(skb, IPV6_RPL_SRH_WORST_SWAP_SIZE, 0,
|
||||
GFP_ATOMIC)) {
|
||||
__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
|
||||
IPSTATS_MIB_OUTDISCARDS);
|
||||
kfree_skb(skb);
|
||||
return -1;
|
||||
}
|
||||
} else {
|
||||
err = skb_cow_head(skb, IPV6_RPL_SRH_WORST_SWAP_SIZE);
|
||||
if (unlikely(err)) {
|
||||
kfree_skb(skb);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
hdr = (struct ipv6_rpl_sr_hdr *)skb_transport_header(skb);
|
||||
|
||||
if (!pskb_may_pull(skb, ipv6_rpl_srh_size(n, hdr->cmpri,
|
||||
hdr->cmpre))) {
|
||||
kfree_skb(skb);
|
||||
|
@ -630,6 +612,17 @@ looped_back:
|
|||
skb_pull(skb, ((hdr->hdrlen + 1) << 3));
|
||||
skb_postpull_rcsum(skb, oldhdr,
|
||||
sizeof(struct ipv6hdr) + ((hdr->hdrlen + 1) << 3));
|
||||
if (unlikely(!hdr->segments_left)) {
|
||||
if (pskb_expand_head(skb, sizeof(struct ipv6hdr) + ((chdr->hdrlen + 1) << 3), 0,
|
||||
GFP_ATOMIC)) {
|
||||
__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_OUTDISCARDS);
|
||||
kfree_skb(skb);
|
||||
kfree(buf);
|
||||
return -1;
|
||||
}
|
||||
|
||||
oldhdr = ipv6_hdr(skb);
|
||||
}
|
||||
skb_push(skb, ((chdr->hdrlen + 1) << 3) + sizeof(struct ipv6hdr));
|
||||
skb_reset_network_header(skb);
|
||||
skb_mac_header_rebuild(skb);
|
||||
|
|
|
@ -6412,9 +6412,9 @@ static struct ctl_table ipv6_route_table_template[] = {
|
|||
{
|
||||
.procname = "skip_notify_on_dev_down",
|
||||
.data = &init_net.ipv6.sysctl.skip_notify_on_dev_down,
|
||||
.maxlen = sizeof(int),
|
||||
.maxlen = sizeof(u8),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec_minmax,
|
||||
.proc_handler = proc_dou8vec_minmax,
|
||||
.extra1 = SYSCTL_ZERO,
|
||||
.extra2 = SYSCTL_ONE,
|
||||
},
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
* HE handling
|
||||
*
|
||||
* Copyright(c) 2017 Intel Deutschland GmbH
|
||||
* Copyright(c) 2019 - 2022 Intel Corporation
|
||||
* Copyright(c) 2019 - 2023 Intel Corporation
|
||||
*/
|
||||
|
||||
#include "ieee80211_i.h"
|
||||
|
@ -114,6 +114,7 @@ ieee80211_he_cap_ie_to_sta_he_cap(struct ieee80211_sub_if_data *sdata,
|
|||
struct link_sta_info *link_sta)
|
||||
{
|
||||
struct ieee80211_sta_he_cap *he_cap = &link_sta->pub->he_cap;
|
||||
const struct ieee80211_sta_he_cap *own_he_cap_ptr;
|
||||
struct ieee80211_sta_he_cap own_he_cap;
|
||||
struct ieee80211_he_cap_elem *he_cap_ie_elem = (void *)he_cap_ie;
|
||||
u8 he_ppe_size;
|
||||
|
@ -123,12 +124,16 @@ ieee80211_he_cap_ie_to_sta_he_cap(struct ieee80211_sub_if_data *sdata,
|
|||
|
||||
memset(he_cap, 0, sizeof(*he_cap));
|
||||
|
||||
if (!he_cap_ie ||
|
||||
!ieee80211_get_he_iftype_cap(sband,
|
||||
ieee80211_vif_type_p2p(&sdata->vif)))
|
||||
if (!he_cap_ie)
|
||||
return;
|
||||
|
||||
own_he_cap = sband->iftype_data->he_cap;
|
||||
own_he_cap_ptr =
|
||||
ieee80211_get_he_iftype_cap(sband,
|
||||
ieee80211_vif_type_p2p(&sdata->vif));
|
||||
if (!own_he_cap_ptr)
|
||||
return;
|
||||
|
||||
own_he_cap = *own_he_cap_ptr;
|
||||
|
||||
/* Make sure size is OK */
|
||||
mcs_nss_size = ieee80211_he_mcs_nss_size(he_cap_ie_elem);
|
||||
|
|
|
@ -1217,6 +1217,7 @@ static void ieee80211_add_non_inheritance_elem(struct sk_buff *skb,
|
|||
const u16 *inner)
|
||||
{
|
||||
unsigned int skb_len = skb->len;
|
||||
bool at_extension = false;
|
||||
bool added = false;
|
||||
int i, j;
|
||||
u8 *len, *list_len = NULL;
|
||||
|
@ -1228,7 +1229,6 @@ static void ieee80211_add_non_inheritance_elem(struct sk_buff *skb,
|
|||
for (i = 0; i < PRESENT_ELEMS_MAX && outer[i]; i++) {
|
||||
u16 elem = outer[i];
|
||||
bool have_inner = false;
|
||||
bool at_extension = false;
|
||||
|
||||
/* should at least be sorted in the sense of normal -> ext */
|
||||
WARN_ON(at_extension && elem < PRESENT_ELEM_EXT_OFFS);
|
||||
|
@ -1257,8 +1257,14 @@ static void ieee80211_add_non_inheritance_elem(struct sk_buff *skb,
|
|||
}
|
||||
*list_len += 1;
|
||||
skb_put_u8(skb, (u8)elem);
|
||||
added = true;
|
||||
}
|
||||
|
||||
/* if we added a list but no extension list, make a zero-len one */
|
||||
if (added && (!at_extension || !list_len))
|
||||
skb_put_u8(skb, 0);
|
||||
|
||||
/* if nothing added remove extension element completely */
|
||||
if (!added)
|
||||
skb_trim(skb, skb_len);
|
||||
else
|
||||
|
|
|
@ -4965,7 +4965,9 @@ static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
|
|||
}
|
||||
|
||||
if (unlikely(rx->sta && rx->sta->sta.mlo) &&
|
||||
is_unicast_ether_addr(hdr->addr1)) {
|
||||
is_unicast_ether_addr(hdr->addr1) &&
|
||||
!ieee80211_is_probe_resp(hdr->frame_control) &&
|
||||
!ieee80211_is_beacon(hdr->frame_control)) {
|
||||
/* translate to MLD addresses */
|
||||
if (ether_addr_equal(link->conf->addr, hdr->addr1))
|
||||
ether_addr_copy(hdr->addr1, rx->sdata->vif.addr);
|
||||
|
|
|
@ -5528,7 +5528,7 @@ ieee80211_beacon_get_template_ema_list(struct ieee80211_hw *hw,
|
|||
{
|
||||
struct ieee80211_ema_beacons *ema_beacons = NULL;
|
||||
|
||||
WARN_ON(__ieee80211_beacon_get(hw, vif, NULL, false, link_id, 0,
|
||||
WARN_ON(__ieee80211_beacon_get(hw, vif, NULL, true, link_id, 0,
|
||||
&ema_beacons));
|
||||
|
||||
return ema_beacons;
|
||||
|
|
|
@ -87,8 +87,15 @@ bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk)
|
|||
unsigned int subflows_max;
|
||||
int ret = 0;
|
||||
|
||||
if (mptcp_pm_is_userspace(msk))
|
||||
return mptcp_userspace_pm_active(msk);
|
||||
if (mptcp_pm_is_userspace(msk)) {
|
||||
if (mptcp_userspace_pm_active(msk)) {
|
||||
spin_lock_bh(&pm->lock);
|
||||
pm->subflows++;
|
||||
spin_unlock_bh(&pm->lock);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
subflows_max = mptcp_pm_get_subflows_max(msk);
|
||||
|
||||
|
@ -181,8 +188,16 @@ void mptcp_pm_subflow_check_next(struct mptcp_sock *msk, const struct sock *ssk,
|
|||
struct mptcp_pm_data *pm = &msk->pm;
|
||||
bool update_subflows;
|
||||
|
||||
update_subflows = (subflow->request_join || subflow->mp_join) &&
|
||||
mptcp_pm_is_kernel(msk);
|
||||
update_subflows = subflow->request_join || subflow->mp_join;
|
||||
if (mptcp_pm_is_userspace(msk)) {
|
||||
if (update_subflows) {
|
||||
spin_lock_bh(&pm->lock);
|
||||
pm->subflows--;
|
||||
spin_unlock_bh(&pm->lock);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
if (!READ_ONCE(pm->work_pending) && !update_subflows)
|
||||
return;
|
||||
|
||||
|
|
|
@ -1558,6 +1558,24 @@ static int mptcp_nl_cmd_del_addr(struct sk_buff *skb, struct genl_info *info)
|
|||
return ret;
|
||||
}
|
||||
|
||||
void mptcp_pm_remove_addrs(struct mptcp_sock *msk, struct list_head *rm_list)
|
||||
{
|
||||
struct mptcp_rm_list alist = { .nr = 0 };
|
||||
struct mptcp_pm_addr_entry *entry;
|
||||
|
||||
list_for_each_entry(entry, rm_list, list) {
|
||||
remove_anno_list_by_saddr(msk, &entry->addr);
|
||||
if (alist.nr < MPTCP_RM_IDS_MAX)
|
||||
alist.ids[alist.nr++] = entry->addr.id;
|
||||
}
|
||||
|
||||
if (alist.nr) {
|
||||
spin_lock_bh(&msk->pm.lock);
|
||||
mptcp_pm_remove_addr(msk, &alist);
|
||||
spin_unlock_bh(&msk->pm.lock);
|
||||
}
|
||||
}
|
||||
|
||||
void mptcp_pm_remove_addrs_and_subflows(struct mptcp_sock *msk,
|
||||
struct list_head *rm_list)
|
||||
{
|
||||
|
|
|
@ -69,6 +69,7 @@ static int mptcp_userspace_pm_append_new_local_addr(struct mptcp_sock *msk,
|
|||
MPTCP_PM_MAX_ADDR_ID + 1,
|
||||
1);
|
||||
list_add_tail_rcu(&e->list, &msk->pm.userspace_pm_local_addr_list);
|
||||
msk->pm.local_addr_used++;
|
||||
ret = e->addr.id;
|
||||
} else if (match) {
|
||||
ret = entry->addr.id;
|
||||
|
@ -79,6 +80,31 @@ append_err:
|
|||
return ret;
|
||||
}
|
||||
|
||||
/* If the subflow is closed from the other peer (not via a
|
||||
* subflow destroy command then), we want to keep the entry
|
||||
* not to assign the same ID to another address and to be
|
||||
* able to send RM_ADDR after the removal of the subflow.
|
||||
*/
|
||||
static int mptcp_userspace_pm_delete_local_addr(struct mptcp_sock *msk,
|
||||
struct mptcp_pm_addr_entry *addr)
|
||||
{
|
||||
struct mptcp_pm_addr_entry *entry, *tmp;
|
||||
|
||||
list_for_each_entry_safe(entry, tmp, &msk->pm.userspace_pm_local_addr_list, list) {
|
||||
if (mptcp_addresses_equal(&entry->addr, &addr->addr, false)) {
|
||||
/* TODO: a refcount is needed because the entry can
|
||||
* be used multiple times (e.g. fullmesh mode).
|
||||
*/
|
||||
list_del_rcu(&entry->list);
|
||||
kfree(entry);
|
||||
msk->pm.local_addr_used--;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
int mptcp_userspace_pm_get_flags_and_ifindex_by_id(struct mptcp_sock *msk,
|
||||
unsigned int id,
|
||||
u8 *flags, int *ifindex)
|
||||
|
@ -171,6 +197,7 @@ int mptcp_nl_cmd_announce(struct sk_buff *skb, struct genl_info *info)
|
|||
spin_lock_bh(&msk->pm.lock);
|
||||
|
||||
if (mptcp_pm_alloc_anno_list(msk, &addr_val)) {
|
||||
msk->pm.add_addr_signaled++;
|
||||
mptcp_pm_announce_addr(msk, &addr_val.addr, false);
|
||||
mptcp_pm_nl_addr_send_ack(msk);
|
||||
}
|
||||
|
@ -232,7 +259,7 @@ int mptcp_nl_cmd_remove(struct sk_buff *skb, struct genl_info *info)
|
|||
|
||||
list_move(&match->list, &free_list);
|
||||
|
||||
mptcp_pm_remove_addrs_and_subflows(msk, &free_list);
|
||||
mptcp_pm_remove_addrs(msk, &free_list);
|
||||
|
||||
release_sock((struct sock *)msk);
|
||||
|
||||
|
@ -251,6 +278,7 @@ int mptcp_nl_cmd_sf_create(struct sk_buff *skb, struct genl_info *info)
|
|||
struct nlattr *raddr = info->attrs[MPTCP_PM_ATTR_ADDR_REMOTE];
|
||||
struct nlattr *token = info->attrs[MPTCP_PM_ATTR_TOKEN];
|
||||
struct nlattr *laddr = info->attrs[MPTCP_PM_ATTR_ADDR];
|
||||
struct mptcp_pm_addr_entry local = { 0 };
|
||||
struct mptcp_addr_info addr_r;
|
||||
struct mptcp_addr_info addr_l;
|
||||
struct mptcp_sock *msk;
|
||||
|
@ -302,12 +330,26 @@ int mptcp_nl_cmd_sf_create(struct sk_buff *skb, struct genl_info *info)
|
|||
goto create_err;
|
||||
}
|
||||
|
||||
local.addr = addr_l;
|
||||
err = mptcp_userspace_pm_append_new_local_addr(msk, &local);
|
||||
if (err < 0) {
|
||||
GENL_SET_ERR_MSG(info, "did not match address and id");
|
||||
goto create_err;
|
||||
}
|
||||
|
||||
lock_sock(sk);
|
||||
|
||||
err = __mptcp_subflow_connect(sk, &addr_l, &addr_r);
|
||||
|
||||
release_sock(sk);
|
||||
|
||||
spin_lock_bh(&msk->pm.lock);
|
||||
if (err)
|
||||
mptcp_userspace_pm_delete_local_addr(msk, &local);
|
||||
else
|
||||
msk->pm.subflows++;
|
||||
spin_unlock_bh(&msk->pm.lock);
|
||||
|
||||
create_err:
|
||||
sock_put((struct sock *)msk);
|
||||
return err;
|
||||
|
@ -420,7 +462,11 @@ int mptcp_nl_cmd_sf_destroy(struct sk_buff *skb, struct genl_info *info)
|
|||
ssk = mptcp_nl_find_ssk(msk, &addr_l, &addr_r);
|
||||
if (ssk) {
|
||||
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
|
||||
struct mptcp_pm_addr_entry entry = { .addr = addr_l };
|
||||
|
||||
spin_lock_bh(&msk->pm.lock);
|
||||
mptcp_userspace_pm_delete_local_addr(msk, &entry);
|
||||
spin_unlock_bh(&msk->pm.lock);
|
||||
mptcp_subflow_shutdown(sk, ssk, RCV_SHUTDOWN | SEND_SHUTDOWN);
|
||||
mptcp_close_ssk(sk, ssk, subflow);
|
||||
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RMSUBFLOW);
|
||||
|
|
|
@ -832,6 +832,7 @@ int mptcp_pm_announce_addr(struct mptcp_sock *msk,
|
|||
bool echo);
|
||||
int mptcp_pm_remove_addr(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list);
|
||||
int mptcp_pm_remove_subflow(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list);
|
||||
void mptcp_pm_remove_addrs(struct mptcp_sock *msk, struct list_head *rm_list);
|
||||
void mptcp_pm_remove_addrs_and_subflows(struct mptcp_sock *msk,
|
||||
struct list_head *rm_list);
|
||||
|
||||
|
|
|
@ -1694,6 +1694,14 @@ call_ad(struct net *net, struct sock *ctnl, struct sk_buff *skb,
|
|||
bool eexist = flags & IPSET_FLAG_EXIST, retried = false;
|
||||
|
||||
do {
|
||||
if (retried) {
|
||||
__ip_set_get(set);
|
||||
nfnl_unlock(NFNL_SUBSYS_IPSET);
|
||||
cond_resched();
|
||||
nfnl_lock(NFNL_SUBSYS_IPSET);
|
||||
__ip_set_put(set);
|
||||
}
|
||||
|
||||
ip_set_lock(set);
|
||||
ret = set->variant->uadt(set, tb, adt, &lineno, flags, retried);
|
||||
ip_set_unlock(set);
|
||||
|
|
|
@ -2260,6 +2260,9 @@ static int nf_confirm_cthelper(struct sk_buff *skb, struct nf_conn *ct,
|
|||
return 0;
|
||||
|
||||
helper = rcu_dereference(help->helper);
|
||||
if (!helper)
|
||||
return 0;
|
||||
|
||||
if (!(helper->flags & NF_CT_HELPER_F_USERSPACE))
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -1600,6 +1600,8 @@ static int nft_dump_basechain_hook(struct sk_buff *skb, int family,
|
|||
|
||||
if (nft_base_chain_netdev(family, ops->hooknum)) {
|
||||
nest_devs = nla_nest_start_noflag(skb, NFTA_HOOK_DEVS);
|
||||
if (!nest_devs)
|
||||
goto nla_put_failure;
|
||||
|
||||
if (!hook_list)
|
||||
hook_list = &basechain->hook_list;
|
||||
|
@ -9005,7 +9007,7 @@ static int nf_tables_commit_chain_prepare(struct net *net, struct nft_chain *cha
|
|||
continue;
|
||||
}
|
||||
|
||||
if (WARN_ON_ONCE(data + expr->ops->size > data_boundary))
|
||||
if (WARN_ON_ONCE(data + size + expr->ops->size > data_boundary))
|
||||
return -ENOMEM;
|
||||
|
||||
memcpy(data + size, expr, expr->ops->size);
|
||||
|
|
|
@ -323,7 +323,7 @@ static bool nft_bitwise_reduce(struct nft_regs_track *track,
|
|||
dreg = priv->dreg;
|
||||
regcount = DIV_ROUND_UP(priv->len, NFT_REG32_SIZE);
|
||||
for (i = 0; i < regcount; i++, dreg++)
|
||||
track->regs[priv->dreg].bitwise = expr;
|
||||
track->regs[dreg].bitwise = expr;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -236,9 +236,6 @@ void ovs_dp_detach_port(struct vport *p)
|
|||
/* First drop references to device. */
|
||||
hlist_del_rcu(&p->dp_hash_node);
|
||||
|
||||
/* Free percpu memory */
|
||||
free_percpu(p->upcall_stats);
|
||||
|
||||
/* Then destroy it. */
|
||||
ovs_vport_del(p);
|
||||
}
|
||||
|
@ -1858,12 +1855,6 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
|
|||
goto err_destroy_portids;
|
||||
}
|
||||
|
||||
vport->upcall_stats = netdev_alloc_pcpu_stats(struct vport_upcall_stats_percpu);
|
||||
if (!vport->upcall_stats) {
|
||||
err = -ENOMEM;
|
||||
goto err_destroy_vport;
|
||||
}
|
||||
|
||||
err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
|
||||
info->snd_seq, 0, OVS_DP_CMD_NEW);
|
||||
BUG_ON(err < 0);
|
||||
|
@ -1876,8 +1867,6 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
|
|||
ovs_notify(&dp_datapath_genl_family, reply, info);
|
||||
return 0;
|
||||
|
||||
err_destroy_vport:
|
||||
ovs_dp_detach_port(vport);
|
||||
err_destroy_portids:
|
||||
kfree(rcu_dereference_raw(dp->upcall_portids));
|
||||
err_unlock_and_destroy_meters:
|
||||
|
@ -2322,12 +2311,6 @@ restart:
|
|||
goto exit_unlock_free;
|
||||
}
|
||||
|
||||
vport->upcall_stats = netdev_alloc_pcpu_stats(struct vport_upcall_stats_percpu);
|
||||
if (!vport->upcall_stats) {
|
||||
err = -ENOMEM;
|
||||
goto exit_unlock_free_vport;
|
||||
}
|
||||
|
||||
err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
|
||||
info->snd_portid, info->snd_seq, 0,
|
||||
OVS_VPORT_CMD_NEW, GFP_KERNEL);
|
||||
|
@ -2345,8 +2328,6 @@ restart:
|
|||
ovs_notify(&dp_vport_genl_family, reply, info);
|
||||
return 0;
|
||||
|
||||
exit_unlock_free_vport:
|
||||
ovs_dp_detach_port(vport);
|
||||
exit_unlock_free:
|
||||
ovs_unlock();
|
||||
kfree_skb(reply);
|
||||
|
|
|
@ -124,6 +124,7 @@ struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *ops,
|
|||
{
|
||||
struct vport *vport;
|
||||
size_t alloc_size;
|
||||
int err;
|
||||
|
||||
alloc_size = sizeof(struct vport);
|
||||
if (priv_size) {
|
||||
|
@ -135,17 +136,29 @@ struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *ops,
|
|||
if (!vport)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
vport->upcall_stats = netdev_alloc_pcpu_stats(struct vport_upcall_stats_percpu);
|
||||
if (!vport->upcall_stats) {
|
||||
err = -ENOMEM;
|
||||
goto err_kfree_vport;
|
||||
}
|
||||
|
||||
vport->dp = parms->dp;
|
||||
vport->port_no = parms->port_no;
|
||||
vport->ops = ops;
|
||||
INIT_HLIST_NODE(&vport->dp_hash_node);
|
||||
|
||||
if (ovs_vport_set_upcall_portids(vport, parms->upcall_portids)) {
|
||||
kfree(vport);
|
||||
return ERR_PTR(-EINVAL);
|
||||
err = -EINVAL;
|
||||
goto err_free_percpu;
|
||||
}
|
||||
|
||||
return vport;
|
||||
|
||||
err_free_percpu:
|
||||
free_percpu(vport->upcall_stats);
|
||||
err_kfree_vport:
|
||||
kfree(vport);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ovs_vport_alloc);
|
||||
|
||||
|
@ -165,6 +178,7 @@ void ovs_vport_free(struct vport *vport)
|
|||
* it is safe to use raw dereference.
|
||||
*/
|
||||
kfree(rcu_dereference_raw(vport->upcall_portids));
|
||||
free_percpu(vport->upcall_stats);
|
||||
kfree(vport);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ovs_vport_free);
|
||||
|
|
|
@ -357,23 +357,23 @@ static int tcf_police_dump(struct sk_buff *skb, struct tc_action *a,
|
|||
opt.burst = PSCHED_NS2TICKS(p->tcfp_burst);
|
||||
if (p->rate_present) {
|
||||
psched_ratecfg_getrate(&opt.rate, &p->rate);
|
||||
if ((police->params->rate.rate_bytes_ps >= (1ULL << 32)) &&
|
||||
if ((p->rate.rate_bytes_ps >= (1ULL << 32)) &&
|
||||
nla_put_u64_64bit(skb, TCA_POLICE_RATE64,
|
||||
police->params->rate.rate_bytes_ps,
|
||||
p->rate.rate_bytes_ps,
|
||||
TCA_POLICE_PAD))
|
||||
goto nla_put_failure;
|
||||
}
|
||||
if (p->peak_present) {
|
||||
psched_ratecfg_getrate(&opt.peakrate, &p->peak);
|
||||
if ((police->params->peak.rate_bytes_ps >= (1ULL << 32)) &&
|
||||
if ((p->peak.rate_bytes_ps >= (1ULL << 32)) &&
|
||||
nla_put_u64_64bit(skb, TCA_POLICE_PEAKRATE64,
|
||||
police->params->peak.rate_bytes_ps,
|
||||
p->peak.rate_bytes_ps,
|
||||
TCA_POLICE_PAD))
|
||||
goto nla_put_failure;
|
||||
}
|
||||
if (p->pps_present) {
|
||||
if (nla_put_u64_64bit(skb, TCA_POLICE_PKTRATE64,
|
||||
police->params->ppsrate.rate_pkts_ps,
|
||||
p->ppsrate.rate_pkts_ps,
|
||||
TCA_POLICE_PAD))
|
||||
goto nla_put_failure;
|
||||
if (nla_put_u64_64bit(skb, TCA_POLICE_PKTBURST64,
|
||||
|
|
|
@ -43,8 +43,6 @@
|
|||
#include <net/flow_offload.h>
|
||||
#include <net/tc_wrapper.h>
|
||||
|
||||
extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1];
|
||||
|
||||
/* The list of all installed classifier types */
|
||||
static LIST_HEAD(tcf_proto_base);
|
||||
|
||||
|
@ -2952,6 +2950,7 @@ static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net,
|
|||
return PTR_ERR(ops);
|
||||
if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) {
|
||||
NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier");
|
||||
module_put(ops->owner);
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
|
|
|
@ -309,7 +309,7 @@ struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
|
|||
|
||||
if (dev_ingress_queue(dev))
|
||||
q = qdisc_match_from_root(
|
||||
dev_ingress_queue(dev)->qdisc_sleeping,
|
||||
rtnl_dereference(dev_ingress_queue(dev)->qdisc_sleeping),
|
||||
handle);
|
||||
out:
|
||||
return q;
|
||||
|
@ -328,7 +328,8 @@ struct Qdisc *qdisc_lookup_rcu(struct net_device *dev, u32 handle)
|
|||
|
||||
nq = dev_ingress_queue_rcu(dev);
|
||||
if (nq)
|
||||
q = qdisc_match_from_root(nq->qdisc_sleeping, handle);
|
||||
q = qdisc_match_from_root(rcu_dereference(nq->qdisc_sleeping),
|
||||
handle);
|
||||
out:
|
||||
return q;
|
||||
}
|
||||
|
@ -634,8 +635,13 @@ EXPORT_SYMBOL(qdisc_watchdog_init);
|
|||
void qdisc_watchdog_schedule_range_ns(struct qdisc_watchdog *wd, u64 expires,
|
||||
u64 delta_ns)
|
||||
{
|
||||
if (test_bit(__QDISC_STATE_DEACTIVATED,
|
||||
&qdisc_root_sleeping(wd->qdisc)->state))
|
||||
bool deactivated;
|
||||
|
||||
rcu_read_lock();
|
||||
deactivated = test_bit(__QDISC_STATE_DEACTIVATED,
|
||||
&qdisc_root_sleeping(wd->qdisc)->state);
|
||||
rcu_read_unlock();
|
||||
if (deactivated)
|
||||
return;
|
||||
|
||||
if (hrtimer_is_queued(&wd->timer)) {
|
||||
|
@ -1478,7 +1484,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
|
|||
}
|
||||
q = qdisc_leaf(p, clid);
|
||||
} else if (dev_ingress_queue(dev)) {
|
||||
q = dev_ingress_queue(dev)->qdisc_sleeping;
|
||||
q = rtnl_dereference(dev_ingress_queue(dev)->qdisc_sleeping);
|
||||
}
|
||||
} else {
|
||||
q = rtnl_dereference(dev->qdisc);
|
||||
|
@ -1564,7 +1570,7 @@ replay:
|
|||
}
|
||||
q = qdisc_leaf(p, clid);
|
||||
} else if (dev_ingress_queue_create(dev)) {
|
||||
q = dev_ingress_queue(dev)->qdisc_sleeping;
|
||||
q = rtnl_dereference(dev_ingress_queue(dev)->qdisc_sleeping);
|
||||
}
|
||||
} else {
|
||||
q = rtnl_dereference(dev->qdisc);
|
||||
|
@ -1805,8 +1811,8 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
|
|||
|
||||
dev_queue = dev_ingress_queue(dev);
|
||||
if (dev_queue &&
|
||||
tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb,
|
||||
&q_idx, s_q_idx, false,
|
||||
tc_dump_qdisc_root(rtnl_dereference(dev_queue->qdisc_sleeping),
|
||||
skb, cb, &q_idx, s_q_idx, false,
|
||||
tca[TCA_DUMP_INVISIBLE]) < 0)
|
||||
goto done;
|
||||
|
||||
|
@ -2249,8 +2255,8 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
|
|||
|
||||
dev_queue = dev_ingress_queue(dev);
|
||||
if (dev_queue &&
|
||||
tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb,
|
||||
&t, s_t, false) < 0)
|
||||
tc_dump_tclass_root(rtnl_dereference(dev_queue->qdisc_sleeping),
|
||||
skb, tcm, cb, &t, s_t, false) < 0)
|
||||
goto done;
|
||||
|
||||
done:
|
||||
|
@ -2302,7 +2308,9 @@ static struct pernet_operations psched_net_ops = {
|
|||
.exit = psched_net_exit,
|
||||
};
|
||||
|
||||
#if IS_ENABLED(CONFIG_RETPOLINE)
|
||||
DEFINE_STATIC_KEY_FALSE(tc_skip_wrapper);
|
||||
#endif
|
||||
|
||||
static int __init pktsched_init(void)
|
||||
{
|
||||
|
|
|
@ -201,6 +201,11 @@ out:
|
|||
return NET_XMIT_CN;
|
||||
}
|
||||
|
||||
static struct netlink_range_validation fq_pie_q_range = {
|
||||
.min = 1,
|
||||
.max = 1 << 20,
|
||||
};
|
||||
|
||||
static const struct nla_policy fq_pie_policy[TCA_FQ_PIE_MAX + 1] = {
|
||||
[TCA_FQ_PIE_LIMIT] = {.type = NLA_U32},
|
||||
[TCA_FQ_PIE_FLOWS] = {.type = NLA_U32},
|
||||
|
@ -208,7 +213,8 @@ static const struct nla_policy fq_pie_policy[TCA_FQ_PIE_MAX + 1] = {
|
|||
[TCA_FQ_PIE_TUPDATE] = {.type = NLA_U32},
|
||||
[TCA_FQ_PIE_ALPHA] = {.type = NLA_U32},
|
||||
[TCA_FQ_PIE_BETA] = {.type = NLA_U32},
|
||||
[TCA_FQ_PIE_QUANTUM] = {.type = NLA_U32},
|
||||
[TCA_FQ_PIE_QUANTUM] =
|
||||
NLA_POLICY_FULL_RANGE(NLA_U32, &fq_pie_q_range),
|
||||
[TCA_FQ_PIE_MEMORY_LIMIT] = {.type = NLA_U32},
|
||||
[TCA_FQ_PIE_ECN_PROB] = {.type = NLA_U32},
|
||||
[TCA_FQ_PIE_ECN] = {.type = NLA_U32},
|
||||
|
@ -373,6 +379,7 @@ static void fq_pie_timer(struct timer_list *t)
|
|||
spinlock_t *root_lock; /* to lock qdisc for probability calculations */
|
||||
u32 idx;
|
||||
|
||||
rcu_read_lock();
|
||||
root_lock = qdisc_lock(qdisc_root_sleeping(sch));
|
||||
spin_lock(root_lock);
|
||||
|
||||
|
@ -385,6 +392,7 @@ static void fq_pie_timer(struct timer_list *t)
|
|||
mod_timer(&q->adapt_timer, jiffies + q->p_params.tupdate);
|
||||
|
||||
spin_unlock(root_lock);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static int fq_pie_init(struct Qdisc *sch, struct nlattr *opt,
|
||||
|
|
|
@ -648,7 +648,7 @@ struct Qdisc_ops noop_qdisc_ops __read_mostly = {
|
|||
|
||||
static struct netdev_queue noop_netdev_queue = {
|
||||
RCU_POINTER_INITIALIZER(qdisc, &noop_qdisc),
|
||||
.qdisc_sleeping = &noop_qdisc,
|
||||
RCU_POINTER_INITIALIZER(qdisc_sleeping, &noop_qdisc),
|
||||
};
|
||||
|
||||
struct Qdisc noop_qdisc = {
|
||||
|
@ -1103,7 +1103,7 @@ EXPORT_SYMBOL(qdisc_put_unlocked);
|
|||
struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
|
||||
struct Qdisc *qdisc)
|
||||
{
|
||||
struct Qdisc *oqdisc = dev_queue->qdisc_sleeping;
|
||||
struct Qdisc *oqdisc = rtnl_dereference(dev_queue->qdisc_sleeping);
|
||||
spinlock_t *root_lock;
|
||||
|
||||
root_lock = qdisc_lock(oqdisc);
|
||||
|
@ -1112,7 +1112,7 @@ struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
|
|||
/* ... and graft new one */
|
||||
if (qdisc == NULL)
|
||||
qdisc = &noop_qdisc;
|
||||
dev_queue->qdisc_sleeping = qdisc;
|
||||
rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc);
|
||||
rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc);
|
||||
|
||||
spin_unlock_bh(root_lock);
|
||||
|
@ -1125,12 +1125,12 @@ static void shutdown_scheduler_queue(struct net_device *dev,
|
|||
struct netdev_queue *dev_queue,
|
||||
void *_qdisc_default)
|
||||
{
|
||||
struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
|
||||
struct Qdisc *qdisc = rtnl_dereference(dev_queue->qdisc_sleeping);
|
||||
struct Qdisc *qdisc_default = _qdisc_default;
|
||||
|
||||
if (qdisc) {
|
||||
rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
|
||||
dev_queue->qdisc_sleeping = qdisc_default;
|
||||
rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc_default);
|
||||
|
||||
qdisc_put(qdisc);
|
||||
}
|
||||
|
@ -1154,7 +1154,7 @@ static void attach_one_default_qdisc(struct net_device *dev,
|
|||
|
||||
if (!netif_is_multiqueue(dev))
|
||||
qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
|
||||
dev_queue->qdisc_sleeping = qdisc;
|
||||
rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc);
|
||||
}
|
||||
|
||||
static void attach_default_qdiscs(struct net_device *dev)
|
||||
|
@ -1167,7 +1167,7 @@ static void attach_default_qdiscs(struct net_device *dev)
|
|||
if (!netif_is_multiqueue(dev) ||
|
||||
dev->priv_flags & IFF_NO_QUEUE) {
|
||||
netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
|
||||
qdisc = txq->qdisc_sleeping;
|
||||
qdisc = rtnl_dereference(txq->qdisc_sleeping);
|
||||
rcu_assign_pointer(dev->qdisc, qdisc);
|
||||
qdisc_refcount_inc(qdisc);
|
||||
} else {
|
||||
|
@ -1186,7 +1186,7 @@ static void attach_default_qdiscs(struct net_device *dev)
|
|||
netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
|
||||
dev->priv_flags |= IFF_NO_QUEUE;
|
||||
netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
|
||||
qdisc = txq->qdisc_sleeping;
|
||||
qdisc = rtnl_dereference(txq->qdisc_sleeping);
|
||||
rcu_assign_pointer(dev->qdisc, qdisc);
|
||||
qdisc_refcount_inc(qdisc);
|
||||
dev->priv_flags ^= IFF_NO_QUEUE;
|
||||
|
@ -1202,7 +1202,7 @@ static void transition_one_qdisc(struct net_device *dev,
|
|||
struct netdev_queue *dev_queue,
|
||||
void *_need_watchdog)
|
||||
{
|
||||
struct Qdisc *new_qdisc = dev_queue->qdisc_sleeping;
|
||||
struct Qdisc *new_qdisc = rtnl_dereference(dev_queue->qdisc_sleeping);
|
||||
int *need_watchdog_p = _need_watchdog;
|
||||
|
||||
if (!(new_qdisc->flags & TCQ_F_BUILTIN))
|
||||
|
@ -1272,7 +1272,7 @@ static void dev_reset_queue(struct net_device *dev,
|
|||
struct Qdisc *qdisc;
|
||||
bool nolock;
|
||||
|
||||
qdisc = dev_queue->qdisc_sleeping;
|
||||
qdisc = rtnl_dereference(dev_queue->qdisc_sleeping);
|
||||
if (!qdisc)
|
||||
return;
|
||||
|
||||
|
@ -1303,7 +1303,7 @@ static bool some_qdisc_is_busy(struct net_device *dev)
|
|||
int val;
|
||||
|
||||
dev_queue = netdev_get_tx_queue(dev, i);
|
||||
q = dev_queue->qdisc_sleeping;
|
||||
q = rtnl_dereference(dev_queue->qdisc_sleeping);
|
||||
|
||||
root_lock = qdisc_lock(q);
|
||||
spin_lock_bh(root_lock);
|
||||
|
@ -1379,7 +1379,7 @@ EXPORT_SYMBOL(dev_deactivate);
|
|||
static int qdisc_change_tx_queue_len(struct net_device *dev,
|
||||
struct netdev_queue *dev_queue)
|
||||
{
|
||||
struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
|
||||
struct Qdisc *qdisc = rtnl_dereference(dev_queue->qdisc_sleeping);
|
||||
const struct Qdisc_ops *ops = qdisc->ops;
|
||||
|
||||
if (ops->change_tx_queue_len)
|
||||
|
@ -1404,7 +1404,7 @@ void mq_change_real_num_tx(struct Qdisc *sch, unsigned int new_real_tx)
|
|||
unsigned int i;
|
||||
|
||||
for (i = new_real_tx; i < dev->real_num_tx_queues; i++) {
|
||||
qdisc = netdev_get_tx_queue(dev, i)->qdisc_sleeping;
|
||||
qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc_sleeping);
|
||||
/* Only update the default qdiscs we created,
|
||||
* qdiscs with handles are always hashed.
|
||||
*/
|
||||
|
@ -1412,7 +1412,7 @@ void mq_change_real_num_tx(struct Qdisc *sch, unsigned int new_real_tx)
|
|||
qdisc_hash_del(qdisc);
|
||||
}
|
||||
for (i = dev->real_num_tx_queues; i < new_real_tx; i++) {
|
||||
qdisc = netdev_get_tx_queue(dev, i)->qdisc_sleeping;
|
||||
qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc_sleeping);
|
||||
if (qdisc != &noop_qdisc && !qdisc->handle)
|
||||
qdisc_hash_add(qdisc, false);
|
||||
}
|
||||
|
@ -1449,7 +1449,7 @@ static void dev_init_scheduler_queue(struct net_device *dev,
|
|||
struct Qdisc *qdisc = _qdisc;
|
||||
|
||||
rcu_assign_pointer(dev_queue->qdisc, qdisc);
|
||||
dev_queue->qdisc_sleeping = qdisc;
|
||||
rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc);
|
||||
}
|
||||
|
||||
void dev_init_scheduler(struct net_device *dev)
|
||||
|
|
|
@ -141,7 +141,7 @@ static int mq_dump(struct Qdisc *sch, struct sk_buff *skb)
|
|||
* qdisc totals are added at end.
|
||||
*/
|
||||
for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
|
||||
qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping;
|
||||
qdisc = rtnl_dereference(netdev_get_tx_queue(dev, ntx)->qdisc_sleeping);
|
||||
spin_lock_bh(qdisc_lock(qdisc));
|
||||
|
||||
gnet_stats_add_basic(&sch->bstats, qdisc->cpu_bstats,
|
||||
|
@ -202,7 +202,7 @@ static struct Qdisc *mq_leaf(struct Qdisc *sch, unsigned long cl)
|
|||
{
|
||||
struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
|
||||
|
||||
return dev_queue->qdisc_sleeping;
|
||||
return rtnl_dereference(dev_queue->qdisc_sleeping);
|
||||
}
|
||||
|
||||
static unsigned long mq_find(struct Qdisc *sch, u32 classid)
|
||||
|
@ -221,7 +221,7 @@ static int mq_dump_class(struct Qdisc *sch, unsigned long cl,
|
|||
|
||||
tcm->tcm_parent = TC_H_ROOT;
|
||||
tcm->tcm_handle |= TC_H_MIN(cl);
|
||||
tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
|
||||
tcm->tcm_info = rtnl_dereference(dev_queue->qdisc_sleeping)->handle;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -230,7 +230,7 @@ static int mq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
|
|||
{
|
||||
struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
|
||||
|
||||
sch = dev_queue->qdisc_sleeping;
|
||||
sch = rtnl_dereference(dev_queue->qdisc_sleeping);
|
||||
if (gnet_stats_copy_basic(d, sch->cpu_bstats, &sch->bstats, true) < 0 ||
|
||||
qdisc_qstats_copy(d, sch) < 0)
|
||||
return -1;
|
||||
|
|
|
@ -557,7 +557,7 @@ static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb)
|
|||
* qdisc totals are added at end.
|
||||
*/
|
||||
for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
|
||||
qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping;
|
||||
qdisc = rtnl_dereference(netdev_get_tx_queue(dev, ntx)->qdisc_sleeping);
|
||||
spin_lock_bh(qdisc_lock(qdisc));
|
||||
|
||||
gnet_stats_add_basic(&sch->bstats, qdisc->cpu_bstats,
|
||||
|
@ -604,7 +604,7 @@ static struct Qdisc *mqprio_leaf(struct Qdisc *sch, unsigned long cl)
|
|||
if (!dev_queue)
|
||||
return NULL;
|
||||
|
||||
return dev_queue->qdisc_sleeping;
|
||||
return rtnl_dereference(dev_queue->qdisc_sleeping);
|
||||
}
|
||||
|
||||
static unsigned long mqprio_find(struct Qdisc *sch, u32 classid)
|
||||
|
@ -637,7 +637,7 @@ static int mqprio_dump_class(struct Qdisc *sch, unsigned long cl,
|
|||
tcm->tcm_parent = (tc < 0) ? 0 :
|
||||
TC_H_MAKE(TC_H_MAJ(sch->handle),
|
||||
TC_H_MIN(tc + TC_H_MIN_PRIORITY));
|
||||
tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
|
||||
tcm->tcm_info = rtnl_dereference(dev_queue->qdisc_sleeping)->handle;
|
||||
} else {
|
||||
tcm->tcm_parent = TC_H_ROOT;
|
||||
tcm->tcm_info = 0;
|
||||
|
@ -693,7 +693,7 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
|
|||
} else {
|
||||
struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
|
||||
|
||||
sch = dev_queue->qdisc_sleeping;
|
||||
sch = rtnl_dereference(dev_queue->qdisc_sleeping);
|
||||
if (gnet_stats_copy_basic(d, sch->cpu_bstats,
|
||||
&sch->bstats, true) < 0 ||
|
||||
qdisc_qstats_copy(d, sch) < 0)
|
||||
|
|
|
@ -421,8 +421,10 @@ static void pie_timer(struct timer_list *t)
|
|||
{
|
||||
struct pie_sched_data *q = from_timer(q, t, adapt_timer);
|
||||
struct Qdisc *sch = q->sch;
|
||||
spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
|
||||
spinlock_t *root_lock;
|
||||
|
||||
rcu_read_lock();
|
||||
root_lock = qdisc_lock(qdisc_root_sleeping(sch));
|
||||
spin_lock(root_lock);
|
||||
pie_calculate_probability(&q->params, &q->vars, sch->qstats.backlog);
|
||||
|
||||
|
@ -430,6 +432,7 @@ static void pie_timer(struct timer_list *t)
|
|||
if (q->params.tupdate)
|
||||
mod_timer(&q->adapt_timer, jiffies + q->params.tupdate);
|
||||
spin_unlock(root_lock);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static int pie_init(struct Qdisc *sch, struct nlattr *opt,
|
||||
|
|
|
@ -321,12 +321,15 @@ static inline void red_adaptative_timer(struct timer_list *t)
|
|||
{
|
||||
struct red_sched_data *q = from_timer(q, t, adapt_timer);
|
||||
struct Qdisc *sch = q->sch;
|
||||
spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
|
||||
spinlock_t *root_lock;
|
||||
|
||||
rcu_read_lock();
|
||||
root_lock = qdisc_lock(qdisc_root_sleeping(sch));
|
||||
spin_lock(root_lock);
|
||||
red_adaptative_algo(&q->parms, &q->vars);
|
||||
mod_timer(&q->adapt_timer, jiffies + HZ/2);
|
||||
spin_unlock(root_lock);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static int red_init(struct Qdisc *sch, struct nlattr *opt,
|
||||
|
|
|
@ -606,10 +606,12 @@ static void sfq_perturbation(struct timer_list *t)
|
|||
{
|
||||
struct sfq_sched_data *q = from_timer(q, t, perturb_timer);
|
||||
struct Qdisc *sch = q->sch;
|
||||
spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
|
||||
spinlock_t *root_lock;
|
||||
siphash_key_t nkey;
|
||||
|
||||
get_random_bytes(&nkey, sizeof(nkey));
|
||||
rcu_read_lock();
|
||||
root_lock = qdisc_lock(qdisc_root_sleeping(sch));
|
||||
spin_lock(root_lock);
|
||||
q->perturbation = nkey;
|
||||
if (!q->filter_list && q->tail)
|
||||
|
@ -618,6 +620,7 @@ static void sfq_perturbation(struct timer_list *t)
|
|||
|
||||
if (q->perturb_period)
|
||||
mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
|
||||
|
|
|
@ -2358,7 +2358,7 @@ static struct Qdisc *taprio_leaf(struct Qdisc *sch, unsigned long cl)
|
|||
if (!dev_queue)
|
||||
return NULL;
|
||||
|
||||
return dev_queue->qdisc_sleeping;
|
||||
return rtnl_dereference(dev_queue->qdisc_sleeping);
|
||||
}
|
||||
|
||||
static unsigned long taprio_find(struct Qdisc *sch, u32 classid)
|
||||
|
@ -2377,7 +2377,7 @@ static int taprio_dump_class(struct Qdisc *sch, unsigned long cl,
|
|||
|
||||
tcm->tcm_parent = TC_H_ROOT;
|
||||
tcm->tcm_handle |= TC_H_MIN(cl);
|
||||
tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
|
||||
tcm->tcm_info = rtnl_dereference(dev_queue->qdisc_sleeping)->handle;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -2389,7 +2389,7 @@ static int taprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
|
|||
{
|
||||
struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
|
||||
|
||||
sch = dev_queue->qdisc_sleeping;
|
||||
sch = rtnl_dereference(dev_queue->qdisc_sleeping);
|
||||
if (gnet_stats_copy_basic(d, NULL, &sch->bstats, true) < 0 ||
|
||||
qdisc_qstats_copy(d, sch) < 0)
|
||||
return -1;
|
||||
|
|
|
@ -297,7 +297,7 @@ restart:
|
|||
struct net_device *slave = qdisc_dev(q);
|
||||
struct netdev_queue *slave_txq = netdev_get_tx_queue(slave, 0);
|
||||
|
||||
if (slave_txq->qdisc_sleeping != q)
|
||||
if (rcu_access_pointer(slave_txq->qdisc_sleeping) != q)
|
||||
continue;
|
||||
if (netif_xmit_stopped(netdev_get_tx_queue(slave, subq)) ||
|
||||
!netif_running(slave)) {
|
||||
|
|
|
@ -851,6 +851,8 @@ static int smc_llc_add_link_cont(struct smc_link *link,
|
|||
addc_llc->num_rkeys = *num_rkeys_todo;
|
||||
n = *num_rkeys_todo;
|
||||
for (i = 0; i < min_t(u8, n, SMC_LLC_RKEYS_PER_CONT_MSG); i++) {
|
||||
while (*buf_pos && !(*buf_pos)->used)
|
||||
*buf_pos = smc_llc_get_next_rmb(lgr, buf_lst, *buf_pos);
|
||||
if (!*buf_pos) {
|
||||
addc_llc->num_rkeys = addc_llc->num_rkeys -
|
||||
*num_rkeys_todo;
|
||||
|
@ -867,8 +869,6 @@ static int smc_llc_add_link_cont(struct smc_link *link,
|
|||
|
||||
(*num_rkeys_todo)--;
|
||||
*buf_pos = smc_llc_get_next_rmb(lgr, buf_lst, *buf_pos);
|
||||
while (*buf_pos && !(*buf_pos)->used)
|
||||
*buf_pos = smc_llc_get_next_rmb(lgr, buf_lst, *buf_pos);
|
||||
}
|
||||
addc_llc->hd.common.llc_type = SMC_LLC_ADD_LINK_CONT;
|
||||
addc_llc->hd.length = sizeof(struct smc_llc_msg_add_link_cont);
|
||||
|
|
|
@ -368,12 +368,12 @@ static void cfg80211_sched_scan_stop_wk(struct work_struct *work)
|
|||
rdev = container_of(work, struct cfg80211_registered_device,
|
||||
sched_scan_stop_wk);
|
||||
|
||||
rtnl_lock();
|
||||
wiphy_lock(&rdev->wiphy);
|
||||
list_for_each_entry_safe(req, tmp, &rdev->sched_scan_req_list, list) {
|
||||
if (req->nl_owner_dead)
|
||||
cfg80211_stop_sched_scan_req(rdev, req, false);
|
||||
}
|
||||
rtnl_unlock();
|
||||
wiphy_unlock(&rdev->wiphy);
|
||||
}
|
||||
|
||||
static void cfg80211_propagate_radar_detect_wk(struct work_struct *work)
|
||||
|
|
|
@ -10723,6 +10723,8 @@ static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info)
|
|||
if (!info->attrs[NL80211_ATTR_MLD_ADDR])
|
||||
return -EINVAL;
|
||||
req.ap_mld_addr = nla_data(info->attrs[NL80211_ATTR_MLD_ADDR]);
|
||||
if (!is_valid_ether_addr(req.ap_mld_addr))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
req.bss = cfg80211_get_bss(&rdev->wiphy, chan, bssid, ssid, ssid_len,
|
||||
|
|
|
@ -2440,11 +2440,11 @@ static void reg_leave_invalid_chans(struct wiphy *wiphy)
|
|||
struct wireless_dev *wdev;
|
||||
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
|
||||
|
||||
ASSERT_RTNL();
|
||||
|
||||
wiphy_lock(wiphy);
|
||||
list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list)
|
||||
if (!reg_wdev_chan_valid(wiphy, wdev))
|
||||
cfg80211_leave(rdev, wdev);
|
||||
wiphy_unlock(wiphy);
|
||||
}
|
||||
|
||||
static void reg_check_chans_work(struct work_struct *work)
|
||||
|
|
|
@ -1035,6 +1035,7 @@ enum bpf_attach_type {
|
|||
BPF_TRACE_KPROBE_MULTI,
|
||||
BPF_LSM_CGROUP,
|
||||
BPF_STRUCT_OPS,
|
||||
BPF_NETFILTER,
|
||||
__MAX_BPF_ATTACH_TYPE
|
||||
};
|
||||
|
||||
|
|
|
@ -117,6 +117,7 @@ static const char * const attach_type_name[] = {
|
|||
[BPF_PERF_EVENT] = "perf_event",
|
||||
[BPF_TRACE_KPROBE_MULTI] = "trace_kprobe_multi",
|
||||
[BPF_STRUCT_OPS] = "struct_ops",
|
||||
[BPF_NETFILTER] = "netfilter",
|
||||
};
|
||||
|
||||
static const char * const link_type_name[] = {
|
||||
|
@ -8712,7 +8713,7 @@ static const struct bpf_sec_def section_defs[] = {
|
|||
SEC_DEF("struct_ops+", STRUCT_OPS, 0, SEC_NONE),
|
||||
SEC_DEF("struct_ops.s+", STRUCT_OPS, 0, SEC_SLEEPABLE),
|
||||
SEC_DEF("sk_lookup", SK_LOOKUP, BPF_SK_LOOKUP, SEC_ATTACHABLE),
|
||||
SEC_DEF("netfilter", NETFILTER, 0, SEC_NONE),
|
||||
SEC_DEF("netfilter", NETFILTER, BPF_NETFILTER, SEC_NONE),
|
||||
};
|
||||
|
||||
static size_t custom_sec_def_cnt;
|
||||
|
|
|
@ -180,7 +180,9 @@ static int probe_prog_load(enum bpf_prog_type prog_type,
|
|||
case BPF_PROG_TYPE_SK_REUSEPORT:
|
||||
case BPF_PROG_TYPE_FLOW_DISSECTOR:
|
||||
case BPF_PROG_TYPE_CGROUP_SYSCTL:
|
||||
break;
|
||||
case BPF_PROG_TYPE_NETFILTER:
|
||||
opts.expected_attach_type = BPF_NETFILTER;
|
||||
break;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue