Networking fixes for 6.0-rc5, including fixes from rxrpc, netfilter,
wireless and bluetooth subtrees Current release - regressions: - skb: export skb drop reaons to user by TRACE_DEFINE_ENUM - bluetooth: fix regression preventing ACL packet transmission Current release - new code bugs: - dsa: microchip: fix kernel oops on ksz8 switches - dsa: qca8k: fix NULL pointer dereference for of_device_get_match_data Previous releases - regressions: - netfilter: clean up hook list when offload flags check fails - wifi: mt76: fix crash in chip reset fail - rxrpc: fix ICMP/ICMP6 error handling - ice: fix DMA mappings leak - i40e: fix kernel crash during module removal Previous releases - always broken: - ipv6: sr: fix out-of-bounds read when setting HMAC data. - tcp: TX zerocopy should not sense pfmemalloc status - sch_sfb: don't assume the skb is still around after enqueueing to child - netfilter: drop dst references before setting - wifi: wilc1000: fix DMA on stack objects - rxrpc: fix an insufficiently large sglist in rxkad_verify_packet_2() - fec: use a spinlock to guard `fep->ptp_clk_on` Misc: - usb: qmi_wwan: add Quectel RM520N Signed-off-by: Paolo Abeni <pabeni@redhat.com> -----BEGIN PGP SIGNATURE----- iQJGBAABCAAwFiEEg1AjqC77wbdLX2LbKSR5jcyPE6QFAmMZwOMSHHBhYmVuaUBy ZWRoYXQuY29tAAoJECkkeY3MjxOkCVMP/jUOqbRTzPOqV7+HhDM65Ww9Prb1WYKS 4o9Vi9DAL/DH8tXSDDtacE0GbtN6Mlr0kEYJE+lwn1hFsfGY1mzH2pcVjJqtZ7fh 6o2joaQMJ5lJe4dsr2k0TRtYhCzdeCrvzoTs2qLSeb5KYFOsxMtBikSF3kOJ1TZq 3bOK3OomiT/XO4FCnyPJvg8VBghkp69oNnefOavM8x7FzrMh6MY7VQem/IjnlIU2 9sHvMPLai81B1NXu2eybThEYSqutZHzM1PLuqIMhSMwdiSrVRlLFq7/BNP0FxCTR /Mby+fnU4xIu+TK1bRoUM0fsipNceVEkXln4pQrRmxu1Yw62RFn5p+MGCM+Sc1UB 8HZzjtNRtlD+ZfRyQKs2m1+WJtbuESyzAeJaKoQ2eO2StmLqlnZLEYGl6oVQj6Uj l3Bn1aBIOYrSM0T/qTNmAv7BTJYvpomlwqoQ5A1+5b0jKhn39Un3Yj3eU2aEv8eX mJ80KAWk26Cc7ZA8WbL70Ac8wV76+TcpWCezVvrcZqrFoYAvicLrwEc92Fq0IiG5 bkA84zhd1TOFSuYVH7f79lSUPYHIs8FxFXIV8SZZGLbhj6nk607cwgbpA9IXizZT CUajNoJclS1tELPG7VOv6DJS+VnpdsCm0+q3dnYTYiLlrEX71Bgc/ofgGOBKTeS3 iRyS2V0GLYoW =DeAB -----END PGP SIGNATURE----- Merge tag 'net-6.0-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net Pull networking fixes from Paolo Abeni: "Including fixes from rxrpc, netfilter, wireless and bluetooth subtrees. Current release - regressions: - skb: export skb drop reaons to user by TRACE_DEFINE_ENUM - bluetooth: fix regression preventing ACL packet transmission Current release - new code bugs: - dsa: microchip: fix kernel oops on ksz8 switches - dsa: qca8k: fix NULL pointer dereference for of_device_get_match_data Previous releases - regressions: - netfilter: clean up hook list when offload flags check fails - wifi: mt76: fix crash in chip reset fail - rxrpc: fix ICMP/ICMP6 error handling - ice: fix DMA mappings leak - i40e: fix kernel crash during module removal Previous releases - always broken: - ipv6: sr: fix out-of-bounds read when setting HMAC data. - tcp: TX zerocopy should not sense pfmemalloc status - sch_sfb: don't assume the skb is still around after enqueueing to child - netfilter: drop dst references before setting - wifi: wilc1000: fix DMA on stack objects - rxrpc: fix an insufficiently large sglist in rxkad_verify_packet_2() - fec: use a spinlock to guard `fep->ptp_clk_on` Misc: - usb: qmi_wwan: add Quectel RM520N" * tag 'net-6.0-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (50 commits) sch_sfb: Also store skb len before calling child enqueue net: phy: lan87xx: change interrupt src of link_up to comm_ready net/smc: Fix possible access to freed memory in link clear net: ethernet: mtk_eth_soc: check max allowed hash in mtk_ppe_check_skb net: skb: export skb drop reaons to user by TRACE_DEFINE_ENUM net: ethernet: mtk_eth_soc: fix typo in __mtk_foe_entry_clear net: dsa: felix: access QSYS_TAG_CONFIG under tas_lock in vsc9959_sched_speed_set net: dsa: felix: disable cut-through forwarding for frames oversized for tc-taprio net: dsa: felix: tc-taprio intervals smaller than MTU should send at least one packet net: usb: qmi_wwan: add Quectel RM520N net: dsa: qca8k: fix NULL pointer dereference for of_device_get_match_data tcp: fix early ETIMEDOUT after spurious non-SACK RTO stmmac: intel: Simplify intel_eth_pci_remove() net: mvpp2: debugfs: fix memory leak when using debugfs_lookup() ipv6: sr: fix out-of-bounds read when setting HMAC data. bonding: accept unsolicited NA message bonding: add all node mcast address when slave up bonding: use unspecified address if no available link local address wifi: use struct_group to copy addresses wifi: mac80211_hwsim: check length for virtio packets ...
This commit is contained in:
commit
26b1224903
|
@ -1055,17 +1055,6 @@ The kernel interface functions are as follows:
|
|||
first function to change. Note that this must be called in TASK_RUNNING
|
||||
state.
|
||||
|
||||
(#) Get reply timestamp::
|
||||
|
||||
bool rxrpc_kernel_get_reply_time(struct socket *sock,
|
||||
struct rxrpc_call *call,
|
||||
ktime_t *_ts)
|
||||
|
||||
This allows the timestamp on the first DATA packet of the reply of a
|
||||
client call to be queried, provided that it is still in the Rx ring. If
|
||||
successful, the timestamp will be stored into ``*_ts`` and true will be
|
||||
returned; false will be returned otherwise.
|
||||
|
||||
(#) Get remote client epoch::
|
||||
|
||||
u32 rxrpc_kernel_get_epoch(struct socket *sock,
|
||||
|
|
|
@ -3167,6 +3167,9 @@ static void bond_ns_send_all(struct bonding *bond, struct slave *slave)
|
|||
found:
|
||||
if (!ipv6_dev_get_saddr(dev_net(dst->dev), dst->dev, &targets[i], 0, &saddr))
|
||||
bond_ns_send(slave, &targets[i], &saddr, tags);
|
||||
else
|
||||
bond_ns_send(slave, &targets[i], &in6addr_any, tags);
|
||||
|
||||
dst_release(dst);
|
||||
kfree(tags);
|
||||
}
|
||||
|
@ -3198,12 +3201,19 @@ static bool bond_has_this_ip6(struct bonding *bond, struct in6_addr *addr)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void bond_validate_ns(struct bonding *bond, struct slave *slave,
|
||||
static void bond_validate_na(struct bonding *bond, struct slave *slave,
|
||||
struct in6_addr *saddr, struct in6_addr *daddr)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (ipv6_addr_any(saddr) || !bond_has_this_ip6(bond, daddr)) {
|
||||
/* Ignore NAs that:
|
||||
* 1. Source address is unspecified address.
|
||||
* 2. Dest address is neither all-nodes multicast address nor
|
||||
* exist on bond interface.
|
||||
*/
|
||||
if (ipv6_addr_any(saddr) ||
|
||||
(!ipv6_addr_equal(daddr, &in6addr_linklocal_allnodes) &&
|
||||
!bond_has_this_ip6(bond, daddr))) {
|
||||
slave_dbg(bond->dev, slave->dev, "%s: sip %pI6c tip %pI6c not found\n",
|
||||
__func__, saddr, daddr);
|
||||
return;
|
||||
|
@ -3246,14 +3256,14 @@ static int bond_na_rcv(const struct sk_buff *skb, struct bonding *bond,
|
|||
* see bond_arp_rcv().
|
||||
*/
|
||||
if (bond_is_active_slave(slave))
|
||||
bond_validate_ns(bond, slave, saddr, daddr);
|
||||
bond_validate_na(bond, slave, saddr, daddr);
|
||||
else if (curr_active_slave &&
|
||||
time_after(slave_last_rx(bond, curr_active_slave),
|
||||
curr_active_slave->last_link_up))
|
||||
bond_validate_ns(bond, slave, saddr, daddr);
|
||||
bond_validate_na(bond, slave, saddr, daddr);
|
||||
else if (curr_arp_slave &&
|
||||
bond_time_in_interval(bond, slave_last_tx(curr_arp_slave), 1))
|
||||
bond_validate_ns(bond, slave, saddr, daddr);
|
||||
bond_validate_na(bond, slave, saddr, daddr);
|
||||
|
||||
out:
|
||||
return RX_HANDLER_ANOTHER;
|
||||
|
|
|
@ -170,6 +170,13 @@ static const struct ksz_dev_ops ksz8_dev_ops = {
|
|||
.exit = ksz8_switch_exit,
|
||||
};
|
||||
|
||||
static void ksz9477_phylink_mac_link_up(struct ksz_device *dev, int port,
|
||||
unsigned int mode,
|
||||
phy_interface_t interface,
|
||||
struct phy_device *phydev, int speed,
|
||||
int duplex, bool tx_pause,
|
||||
bool rx_pause);
|
||||
|
||||
static const struct ksz_dev_ops ksz9477_dev_ops = {
|
||||
.setup = ksz9477_setup,
|
||||
.get_port_addr = ksz9477_get_port_addr,
|
||||
|
@ -196,6 +203,7 @@ static const struct ksz_dev_ops ksz9477_dev_ops = {
|
|||
.mdb_del = ksz9477_mdb_del,
|
||||
.change_mtu = ksz9477_change_mtu,
|
||||
.max_mtu = ksz9477_max_mtu,
|
||||
.phylink_mac_link_up = ksz9477_phylink_mac_link_up,
|
||||
.config_cpu_port = ksz9477_config_cpu_port,
|
||||
.enable_stp_addr = ksz9477_enable_stp_addr,
|
||||
.reset = ksz9477_reset_switch,
|
||||
|
@ -230,6 +238,7 @@ static const struct ksz_dev_ops lan937x_dev_ops = {
|
|||
.mdb_del = ksz9477_mdb_del,
|
||||
.change_mtu = lan937x_change_mtu,
|
||||
.max_mtu = ksz9477_max_mtu,
|
||||
.phylink_mac_link_up = ksz9477_phylink_mac_link_up,
|
||||
.config_cpu_port = lan937x_config_cpu_port,
|
||||
.enable_stp_addr = ksz9477_enable_stp_addr,
|
||||
.reset = lan937x_reset_switch,
|
||||
|
@ -1656,13 +1665,13 @@ static void ksz_duplex_flowctrl(struct ksz_device *dev, int port, int duplex,
|
|||
ksz_prmw8(dev, port, regs[P_XMII_CTRL_0], mask, val);
|
||||
}
|
||||
|
||||
static void ksz_phylink_mac_link_up(struct dsa_switch *ds, int port,
|
||||
unsigned int mode,
|
||||
phy_interface_t interface,
|
||||
struct phy_device *phydev, int speed,
|
||||
int duplex, bool tx_pause, bool rx_pause)
|
||||
static void ksz9477_phylink_mac_link_up(struct ksz_device *dev, int port,
|
||||
unsigned int mode,
|
||||
phy_interface_t interface,
|
||||
struct phy_device *phydev, int speed,
|
||||
int duplex, bool tx_pause,
|
||||
bool rx_pause)
|
||||
{
|
||||
struct ksz_device *dev = ds->priv;
|
||||
struct ksz_port *p;
|
||||
|
||||
p = &dev->ports[port];
|
||||
|
@ -1676,6 +1685,15 @@ static void ksz_phylink_mac_link_up(struct dsa_switch *ds, int port,
|
|||
ksz_port_set_xmii_speed(dev, port, speed);
|
||||
|
||||
ksz_duplex_flowctrl(dev, port, duplex, tx_pause, rx_pause);
|
||||
}
|
||||
|
||||
static void ksz_phylink_mac_link_up(struct dsa_switch *ds, int port,
|
||||
unsigned int mode,
|
||||
phy_interface_t interface,
|
||||
struct phy_device *phydev, int speed,
|
||||
int duplex, bool tx_pause, bool rx_pause)
|
||||
{
|
||||
struct ksz_device *dev = ds->priv;
|
||||
|
||||
if (dev->dev_ops->phylink_mac_link_up)
|
||||
dev->dev_ops->phylink_mac_link_up(dev, port, mode, interface,
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#define VSC9959_NUM_PORTS 6
|
||||
|
||||
#define VSC9959_TAS_GCL_ENTRY_MAX 63
|
||||
#define VSC9959_TAS_MIN_GATE_LEN_NS 33
|
||||
#define VSC9959_VCAP_POLICER_BASE 63
|
||||
#define VSC9959_VCAP_POLICER_MAX 383
|
||||
#define VSC9959_SWITCH_PCI_BAR 4
|
||||
|
@ -1478,6 +1479,23 @@ static void vsc9959_mdio_bus_free(struct ocelot *ocelot)
|
|||
mdiobus_free(felix->imdio);
|
||||
}
|
||||
|
||||
/* The switch considers any frame (regardless of size) as eligible for
|
||||
* transmission if the traffic class gate is open for at least 33 ns.
|
||||
* Overruns are prevented by cropping an interval at the end of the gate time
|
||||
* slot for which egress scheduling is blocked, but we need to still keep 33 ns
|
||||
* available for one packet to be transmitted, otherwise the port tc will hang.
|
||||
* This function returns the size of a gate interval that remains available for
|
||||
* setting the guard band, after reserving the space for one egress frame.
|
||||
*/
|
||||
static u64 vsc9959_tas_remaining_gate_len_ps(u64 gate_len_ns)
|
||||
{
|
||||
/* Gate always open */
|
||||
if (gate_len_ns == U64_MAX)
|
||||
return U64_MAX;
|
||||
|
||||
return (gate_len_ns - VSC9959_TAS_MIN_GATE_LEN_NS) * PSEC_PER_NSEC;
|
||||
}
|
||||
|
||||
/* Extract shortest continuous gate open intervals in ns for each traffic class
|
||||
* of a cyclic tc-taprio schedule. If a gate is always open, the duration is
|
||||
* considered U64_MAX. If the gate is always closed, it is considered 0.
|
||||
|
@ -1539,6 +1557,65 @@ static void vsc9959_tas_min_gate_lengths(struct tc_taprio_qopt_offload *taprio,
|
|||
min_gate_len[tc] = 0;
|
||||
}
|
||||
|
||||
/* ocelot_write_rix is a macro that concatenates QSYS_MAXSDU_CFG_* with _RSZ,
|
||||
* so we need to spell out the register access to each traffic class in helper
|
||||
* functions, to simplify callers
|
||||
*/
|
||||
static void vsc9959_port_qmaxsdu_set(struct ocelot *ocelot, int port, int tc,
|
||||
u32 max_sdu)
|
||||
{
|
||||
switch (tc) {
|
||||
case 0:
|
||||
ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_0,
|
||||
port);
|
||||
break;
|
||||
case 1:
|
||||
ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_1,
|
||||
port);
|
||||
break;
|
||||
case 2:
|
||||
ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_2,
|
||||
port);
|
||||
break;
|
||||
case 3:
|
||||
ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_3,
|
||||
port);
|
||||
break;
|
||||
case 4:
|
||||
ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_4,
|
||||
port);
|
||||
break;
|
||||
case 5:
|
||||
ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_5,
|
||||
port);
|
||||
break;
|
||||
case 6:
|
||||
ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_6,
|
||||
port);
|
||||
break;
|
||||
case 7:
|
||||
ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_7,
|
||||
port);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static u32 vsc9959_port_qmaxsdu_get(struct ocelot *ocelot, int port, int tc)
|
||||
{
|
||||
switch (tc) {
|
||||
case 0: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_0, port);
|
||||
case 1: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_1, port);
|
||||
case 2: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_2, port);
|
||||
case 3: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_3, port);
|
||||
case 4: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_4, port);
|
||||
case 5: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_5, port);
|
||||
case 6: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_6, port);
|
||||
case 7: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_7, port);
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* Update QSYS_PORT_MAX_SDU to make sure the static guard bands added by the
|
||||
* switch (see the ALWAYS_GUARD_BAND_SCH_Q comment) are correct at all MTU
|
||||
* values (the default value is 1518). Also, for traffic class windows smaller
|
||||
|
@ -1595,11 +1672,16 @@ static void vsc9959_tas_guard_bands_update(struct ocelot *ocelot, int port)
|
|||
|
||||
vsc9959_tas_min_gate_lengths(ocelot_port->taprio, min_gate_len);
|
||||
|
||||
mutex_lock(&ocelot->fwd_domain_lock);
|
||||
|
||||
for (tc = 0; tc < OCELOT_NUM_TC; tc++) {
|
||||
u64 remaining_gate_len_ps;
|
||||
u32 max_sdu;
|
||||
|
||||
if (min_gate_len[tc] == U64_MAX /* Gate always open */ ||
|
||||
min_gate_len[tc] * PSEC_PER_NSEC > needed_bit_time_ps) {
|
||||
remaining_gate_len_ps =
|
||||
vsc9959_tas_remaining_gate_len_ps(min_gate_len[tc]);
|
||||
|
||||
if (remaining_gate_len_ps > needed_bit_time_ps) {
|
||||
/* Setting QMAXSDU_CFG to 0 disables oversized frame
|
||||
* dropping.
|
||||
*/
|
||||
|
@ -1612,9 +1694,15 @@ static void vsc9959_tas_guard_bands_update(struct ocelot *ocelot, int port)
|
|||
/* If traffic class doesn't support a full MTU sized
|
||||
* frame, make sure to enable oversize frame dropping
|
||||
* for frames larger than the smallest that would fit.
|
||||
*
|
||||
* However, the exact same register, QSYS_QMAXSDU_CFG_*,
|
||||
* controls not only oversized frame dropping, but also
|
||||
* per-tc static guard band lengths, so it reduces the
|
||||
* useful gate interval length. Therefore, be careful
|
||||
* to calculate a guard band (and therefore max_sdu)
|
||||
* that still leaves 33 ns available in the time slot.
|
||||
*/
|
||||
max_sdu = div_u64(min_gate_len[tc] * PSEC_PER_NSEC,
|
||||
picos_per_byte);
|
||||
max_sdu = div_u64(remaining_gate_len_ps, picos_per_byte);
|
||||
/* A TC gate may be completely closed, which is a
|
||||
* special case where all packets are oversized.
|
||||
* Any limit smaller than 64 octets accomplishes this
|
||||
|
@ -1637,47 +1725,14 @@ static void vsc9959_tas_guard_bands_update(struct ocelot *ocelot, int port)
|
|||
max_sdu);
|
||||
}
|
||||
|
||||
/* ocelot_write_rix is a macro that concatenates
|
||||
* QSYS_MAXSDU_CFG_* with _RSZ, so we need to spell out
|
||||
* the writes to each traffic class
|
||||
*/
|
||||
switch (tc) {
|
||||
case 0:
|
||||
ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_0,
|
||||
port);
|
||||
break;
|
||||
case 1:
|
||||
ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_1,
|
||||
port);
|
||||
break;
|
||||
case 2:
|
||||
ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_2,
|
||||
port);
|
||||
break;
|
||||
case 3:
|
||||
ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_3,
|
||||
port);
|
||||
break;
|
||||
case 4:
|
||||
ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_4,
|
||||
port);
|
||||
break;
|
||||
case 5:
|
||||
ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_5,
|
||||
port);
|
||||
break;
|
||||
case 6:
|
||||
ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_6,
|
||||
port);
|
||||
break;
|
||||
case 7:
|
||||
ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_7,
|
||||
port);
|
||||
break;
|
||||
}
|
||||
vsc9959_port_qmaxsdu_set(ocelot, port, tc, max_sdu);
|
||||
}
|
||||
|
||||
ocelot_write_rix(ocelot, maxlen, QSYS_PORT_MAX_SDU, port);
|
||||
|
||||
ocelot->ops->cut_through_fwd(ocelot);
|
||||
|
||||
mutex_unlock(&ocelot->fwd_domain_lock);
|
||||
}
|
||||
|
||||
static void vsc9959_sched_speed_set(struct ocelot *ocelot, int port,
|
||||
|
@ -1704,13 +1759,13 @@ static void vsc9959_sched_speed_set(struct ocelot *ocelot, int port,
|
|||
break;
|
||||
}
|
||||
|
||||
mutex_lock(&ocelot->tas_lock);
|
||||
|
||||
ocelot_rmw_rix(ocelot,
|
||||
QSYS_TAG_CONFIG_LINK_SPEED(tas_speed),
|
||||
QSYS_TAG_CONFIG_LINK_SPEED_M,
|
||||
QSYS_TAG_CONFIG, port);
|
||||
|
||||
mutex_lock(&ocelot->tas_lock);
|
||||
|
||||
if (ocelot_port->taprio)
|
||||
vsc9959_tas_guard_bands_update(ocelot, port);
|
||||
|
||||
|
@ -2770,7 +2825,7 @@ static void vsc9959_cut_through_fwd(struct ocelot *ocelot)
|
|||
{
|
||||
struct felix *felix = ocelot_to_felix(ocelot);
|
||||
struct dsa_switch *ds = felix->ds;
|
||||
int port, other_port;
|
||||
int tc, port, other_port;
|
||||
|
||||
lockdep_assert_held(&ocelot->fwd_domain_lock);
|
||||
|
||||
|
@ -2814,19 +2869,27 @@ static void vsc9959_cut_through_fwd(struct ocelot *ocelot)
|
|||
min_speed = other_ocelot_port->speed;
|
||||
}
|
||||
|
||||
/* Enable cut-through forwarding for all traffic classes. */
|
||||
if (ocelot_port->speed == min_speed)
|
||||
/* Enable cut-through forwarding for all traffic classes that
|
||||
* don't have oversized dropping enabled, since this check is
|
||||
* bypassed in cut-through mode.
|
||||
*/
|
||||
if (ocelot_port->speed == min_speed) {
|
||||
val = GENMASK(7, 0);
|
||||
|
||||
for (tc = 0; tc < OCELOT_NUM_TC; tc++)
|
||||
if (vsc9959_port_qmaxsdu_get(ocelot, port, tc))
|
||||
val &= ~BIT(tc);
|
||||
}
|
||||
|
||||
set:
|
||||
tmp = ocelot_read_rix(ocelot, ANA_CUT_THRU_CFG, port);
|
||||
if (tmp == val)
|
||||
continue;
|
||||
|
||||
dev_dbg(ocelot->dev,
|
||||
"port %d fwd mask 0x%lx speed %d min_speed %d, %s cut-through forwarding\n",
|
||||
"port %d fwd mask 0x%lx speed %d min_speed %d, %s cut-through forwarding on TC mask 0x%x\n",
|
||||
port, mask, ocelot_port->speed, min_speed,
|
||||
val ? "enabling" : "disabling");
|
||||
val ? "enabling" : "disabling", val);
|
||||
|
||||
ocelot_write_rix(ocelot, val, ANA_CUT_THRU_CFG, port);
|
||||
}
|
||||
|
|
|
@ -1889,9 +1889,9 @@ qca8k_sw_probe(struct mdio_device *mdiodev)
|
|||
if (!priv)
|
||||
return -ENOMEM;
|
||||
|
||||
priv->info = of_device_get_match_data(priv->dev);
|
||||
priv->bus = mdiodev->bus;
|
||||
priv->dev = &mdiodev->dev;
|
||||
priv->info = of_device_get_match_data(priv->dev);
|
||||
|
||||
priv->reset_gpio = devm_gpiod_get_optional(priv->dev, "reset",
|
||||
GPIOD_ASIS);
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
|
||||
#include <linux/clocksource.h>
|
||||
#include <linux/net_tstamp.h>
|
||||
#include <linux/pm_qos.h>
|
||||
#include <linux/ptp_clock_kernel.h>
|
||||
#include <linux/timecounter.h>
|
||||
|
||||
|
@ -498,6 +499,9 @@ struct bufdesc_ex {
|
|||
/* i.MX8MQ SoC integration mix wakeup interrupt signal into "int2" interrupt line. */
|
||||
#define FEC_QUIRK_WAKEUP_FROM_INT2 (1 << 22)
|
||||
|
||||
/* i.MX6Q adds pm_qos support */
|
||||
#define FEC_QUIRK_HAS_PMQOS BIT(23)
|
||||
|
||||
struct bufdesc_prop {
|
||||
int qid;
|
||||
/* Address of Rx and Tx buffers */
|
||||
|
@ -557,7 +561,6 @@ struct fec_enet_private {
|
|||
struct clk *clk_2x_txclk;
|
||||
|
||||
bool ptp_clk_on;
|
||||
struct mutex ptp_clk_mutex;
|
||||
unsigned int num_tx_queues;
|
||||
unsigned int num_rx_queues;
|
||||
|
||||
|
@ -608,6 +611,7 @@ struct fec_enet_private {
|
|||
struct delayed_work time_keep;
|
||||
struct regulator *reg_phy;
|
||||
struct fec_stop_mode_gpr stop_gpr;
|
||||
struct pm_qos_request pm_qos_req;
|
||||
|
||||
unsigned int tx_align;
|
||||
unsigned int rx_align;
|
||||
|
|
|
@ -111,7 +111,8 @@ static const struct fec_devinfo fec_imx6q_info = {
|
|||
.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
|
||||
FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
|
||||
FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 |
|
||||
FEC_QUIRK_HAS_RACC | FEC_QUIRK_CLEAR_SETUP_MII,
|
||||
FEC_QUIRK_HAS_RACC | FEC_QUIRK_CLEAR_SETUP_MII |
|
||||
FEC_QUIRK_HAS_PMQOS,
|
||||
};
|
||||
|
||||
static const struct fec_devinfo fec_mvf600_info = {
|
||||
|
@ -2028,6 +2029,7 @@ static void fec_enet_phy_reset_after_clk_enable(struct net_device *ndev)
|
|||
static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
|
||||
{
|
||||
struct fec_enet_private *fep = netdev_priv(ndev);
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
if (enable) {
|
||||
|
@ -2036,15 +2038,15 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
|
|||
return ret;
|
||||
|
||||
if (fep->clk_ptp) {
|
||||
mutex_lock(&fep->ptp_clk_mutex);
|
||||
spin_lock_irqsave(&fep->tmreg_lock, flags);
|
||||
ret = clk_prepare_enable(fep->clk_ptp);
|
||||
if (ret) {
|
||||
mutex_unlock(&fep->ptp_clk_mutex);
|
||||
spin_unlock_irqrestore(&fep->tmreg_lock, flags);
|
||||
goto failed_clk_ptp;
|
||||
} else {
|
||||
fep->ptp_clk_on = true;
|
||||
}
|
||||
mutex_unlock(&fep->ptp_clk_mutex);
|
||||
spin_unlock_irqrestore(&fep->tmreg_lock, flags);
|
||||
}
|
||||
|
||||
ret = clk_prepare_enable(fep->clk_ref);
|
||||
|
@ -2059,10 +2061,10 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
|
|||
} else {
|
||||
clk_disable_unprepare(fep->clk_enet_out);
|
||||
if (fep->clk_ptp) {
|
||||
mutex_lock(&fep->ptp_clk_mutex);
|
||||
spin_lock_irqsave(&fep->tmreg_lock, flags);
|
||||
clk_disable_unprepare(fep->clk_ptp);
|
||||
fep->ptp_clk_on = false;
|
||||
mutex_unlock(&fep->ptp_clk_mutex);
|
||||
spin_unlock_irqrestore(&fep->tmreg_lock, flags);
|
||||
}
|
||||
clk_disable_unprepare(fep->clk_ref);
|
||||
clk_disable_unprepare(fep->clk_2x_txclk);
|
||||
|
@ -2075,10 +2077,10 @@ failed_clk_2x_txclk:
|
|||
clk_disable_unprepare(fep->clk_ref);
|
||||
failed_clk_ref:
|
||||
if (fep->clk_ptp) {
|
||||
mutex_lock(&fep->ptp_clk_mutex);
|
||||
spin_lock_irqsave(&fep->tmreg_lock, flags);
|
||||
clk_disable_unprepare(fep->clk_ptp);
|
||||
fep->ptp_clk_on = false;
|
||||
mutex_unlock(&fep->ptp_clk_mutex);
|
||||
spin_unlock_irqrestore(&fep->tmreg_lock, flags);
|
||||
}
|
||||
failed_clk_ptp:
|
||||
clk_disable_unprepare(fep->clk_enet_out);
|
||||
|
@ -3244,6 +3246,9 @@ fec_enet_open(struct net_device *ndev)
|
|||
if (fep->quirks & FEC_QUIRK_ERR006687)
|
||||
imx6q_cpuidle_fec_irqs_used();
|
||||
|
||||
if (fep->quirks & FEC_QUIRK_HAS_PMQOS)
|
||||
cpu_latency_qos_add_request(&fep->pm_qos_req, 0);
|
||||
|
||||
napi_enable(&fep->napi);
|
||||
phy_start(ndev->phydev);
|
||||
netif_tx_start_all_queues(ndev);
|
||||
|
@ -3285,6 +3290,9 @@ fec_enet_close(struct net_device *ndev)
|
|||
fec_enet_update_ethtool_stats(ndev);
|
||||
|
||||
fec_enet_clk_enable(ndev, false);
|
||||
if (fep->quirks & FEC_QUIRK_HAS_PMQOS)
|
||||
cpu_latency_qos_remove_request(&fep->pm_qos_req);
|
||||
|
||||
pinctrl_pm_select_sleep_state(&fep->pdev->dev);
|
||||
pm_runtime_mark_last_busy(&fep->pdev->dev);
|
||||
pm_runtime_put_autosuspend(&fep->pdev->dev);
|
||||
|
@ -3907,7 +3915,7 @@ fec_probe(struct platform_device *pdev)
|
|||
}
|
||||
|
||||
fep->ptp_clk_on = false;
|
||||
mutex_init(&fep->ptp_clk_mutex);
|
||||
spin_lock_init(&fep->tmreg_lock);
|
||||
|
||||
/* clk_ref is optional, depends on board */
|
||||
fep->clk_ref = devm_clk_get_optional(&pdev->dev, "enet_clk_ref");
|
||||
|
|
|
@ -365,21 +365,19 @@ static int fec_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
|
|||
*/
|
||||
static int fec_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
|
||||
{
|
||||
struct fec_enet_private *adapter =
|
||||
struct fec_enet_private *fep =
|
||||
container_of(ptp, struct fec_enet_private, ptp_caps);
|
||||
u64 ns;
|
||||
unsigned long flags;
|
||||
|
||||
mutex_lock(&adapter->ptp_clk_mutex);
|
||||
spin_lock_irqsave(&fep->tmreg_lock, flags);
|
||||
/* Check the ptp clock */
|
||||
if (!adapter->ptp_clk_on) {
|
||||
mutex_unlock(&adapter->ptp_clk_mutex);
|
||||
if (!fep->ptp_clk_on) {
|
||||
spin_unlock_irqrestore(&fep->tmreg_lock, flags);
|
||||
return -EINVAL;
|
||||
}
|
||||
spin_lock_irqsave(&adapter->tmreg_lock, flags);
|
||||
ns = timecounter_read(&adapter->tc);
|
||||
spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
|
||||
mutex_unlock(&adapter->ptp_clk_mutex);
|
||||
ns = timecounter_read(&fep->tc);
|
||||
spin_unlock_irqrestore(&fep->tmreg_lock, flags);
|
||||
|
||||
*ts = ns_to_timespec64(ns);
|
||||
|
||||
|
@ -404,10 +402,10 @@ static int fec_ptp_settime(struct ptp_clock_info *ptp,
|
|||
unsigned long flags;
|
||||
u32 counter;
|
||||
|
||||
mutex_lock(&fep->ptp_clk_mutex);
|
||||
spin_lock_irqsave(&fep->tmreg_lock, flags);
|
||||
/* Check the ptp clock */
|
||||
if (!fep->ptp_clk_on) {
|
||||
mutex_unlock(&fep->ptp_clk_mutex);
|
||||
spin_unlock_irqrestore(&fep->tmreg_lock, flags);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -417,11 +415,9 @@ static int fec_ptp_settime(struct ptp_clock_info *ptp,
|
|||
*/
|
||||
counter = ns & fep->cc.mask;
|
||||
|
||||
spin_lock_irqsave(&fep->tmreg_lock, flags);
|
||||
writel(counter, fep->hwp + FEC_ATIME);
|
||||
timecounter_init(&fep->tc, &fep->cc, ns);
|
||||
spin_unlock_irqrestore(&fep->tmreg_lock, flags);
|
||||
mutex_unlock(&fep->ptp_clk_mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -518,13 +514,11 @@ static void fec_time_keep(struct work_struct *work)
|
|||
struct fec_enet_private *fep = container_of(dwork, struct fec_enet_private, time_keep);
|
||||
unsigned long flags;
|
||||
|
||||
mutex_lock(&fep->ptp_clk_mutex);
|
||||
spin_lock_irqsave(&fep->tmreg_lock, flags);
|
||||
if (fep->ptp_clk_on) {
|
||||
spin_lock_irqsave(&fep->tmreg_lock, flags);
|
||||
timecounter_read(&fep->tc);
|
||||
spin_unlock_irqrestore(&fep->tmreg_lock, flags);
|
||||
}
|
||||
mutex_unlock(&fep->ptp_clk_mutex);
|
||||
spin_unlock_irqrestore(&fep->tmreg_lock, flags);
|
||||
|
||||
schedule_delayed_work(&fep->time_keep, HZ);
|
||||
}
|
||||
|
@ -599,8 +593,6 @@ void fec_ptp_init(struct platform_device *pdev, int irq_idx)
|
|||
}
|
||||
fep->ptp_inc = NSEC_PER_SEC / fep->cycle_speed;
|
||||
|
||||
spin_lock_init(&fep->tmreg_lock);
|
||||
|
||||
fec_ptp_start_cyclecounter(ndev);
|
||||
|
||||
INIT_DELAYED_WORK(&fep->time_keep, fec_time_keep);
|
||||
|
|
|
@ -177,6 +177,10 @@ void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset)
|
|||
"Cannot locate client instance close routine\n");
|
||||
return;
|
||||
}
|
||||
if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) {
|
||||
dev_dbg(&pf->pdev->dev, "Client is not open, abort close\n");
|
||||
return;
|
||||
}
|
||||
cdev->client->ops->close(&cdev->lan_info, cdev->client, reset);
|
||||
clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
|
||||
i40e_client_release_qvlist(&cdev->lan_info);
|
||||
|
@ -429,7 +433,6 @@ void i40e_client_subtask(struct i40e_pf *pf)
|
|||
/* Remove failed client instance */
|
||||
clear_bit(__I40E_CLIENT_INSTANCE_OPENED,
|
||||
&cdev->state);
|
||||
i40e_client_del_instance(pf);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -6659,6 +6659,9 @@ static int i40e_configure_queue_channels(struct i40e_vsi *vsi)
|
|||
vsi->tc_seid_map[i] = ch->seid;
|
||||
}
|
||||
}
|
||||
|
||||
/* reset to reconfigure TX queue contexts */
|
||||
i40e_do_reset(vsi->back, I40E_PF_RESET_FLAG, true);
|
||||
return ret;
|
||||
|
||||
err_free:
|
||||
|
|
|
@ -3688,7 +3688,8 @@ u16 i40e_lan_select_queue(struct net_device *netdev,
|
|||
u8 prio;
|
||||
|
||||
/* is DCB enabled at all? */
|
||||
if (vsi->tc_config.numtc == 1)
|
||||
if (vsi->tc_config.numtc == 1 ||
|
||||
i40e_is_tc_mqprio_enabled(vsi->back))
|
||||
return netdev_pick_tx(netdev, skb, sb_dev);
|
||||
|
||||
prio = skb->priority;
|
||||
|
|
|
@ -2877,6 +2877,11 @@ static void iavf_reset_task(struct work_struct *work)
|
|||
int i = 0, err;
|
||||
bool running;
|
||||
|
||||
/* Detach interface to avoid subsequent NDO callbacks */
|
||||
rtnl_lock();
|
||||
netif_device_detach(netdev);
|
||||
rtnl_unlock();
|
||||
|
||||
/* When device is being removed it doesn't make sense to run the reset
|
||||
* task, just return in such a case.
|
||||
*/
|
||||
|
@ -2884,7 +2889,7 @@ static void iavf_reset_task(struct work_struct *work)
|
|||
if (adapter->state != __IAVF_REMOVE)
|
||||
queue_work(iavf_wq, &adapter->reset_task);
|
||||
|
||||
return;
|
||||
goto reset_finish;
|
||||
}
|
||||
|
||||
while (!mutex_trylock(&adapter->client_lock))
|
||||
|
@ -2954,7 +2959,6 @@ continue_reset:
|
|||
|
||||
if (running) {
|
||||
netif_carrier_off(netdev);
|
||||
netif_tx_stop_all_queues(netdev);
|
||||
adapter->link_up = false;
|
||||
iavf_napi_disable_all(adapter);
|
||||
}
|
||||
|
@ -3084,7 +3088,7 @@ continue_reset:
|
|||
mutex_unlock(&adapter->client_lock);
|
||||
mutex_unlock(&adapter->crit_lock);
|
||||
|
||||
return;
|
||||
goto reset_finish;
|
||||
reset_err:
|
||||
if (running) {
|
||||
set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
|
||||
|
@ -3095,6 +3099,10 @@ reset_err:
|
|||
mutex_unlock(&adapter->client_lock);
|
||||
mutex_unlock(&adapter->crit_lock);
|
||||
dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
|
||||
reset_finish:
|
||||
rtnl_lock();
|
||||
netif_device_attach(netdev);
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -7,18 +7,6 @@
|
|||
#include "ice_dcb_lib.h"
|
||||
#include "ice_sriov.h"
|
||||
|
||||
static bool ice_alloc_rx_buf_zc(struct ice_rx_ring *rx_ring)
|
||||
{
|
||||
rx_ring->xdp_buf = kcalloc(rx_ring->count, sizeof(*rx_ring->xdp_buf), GFP_KERNEL);
|
||||
return !!rx_ring->xdp_buf;
|
||||
}
|
||||
|
||||
static bool ice_alloc_rx_buf(struct ice_rx_ring *rx_ring)
|
||||
{
|
||||
rx_ring->rx_buf = kcalloc(rx_ring->count, sizeof(*rx_ring->rx_buf), GFP_KERNEL);
|
||||
return !!rx_ring->rx_buf;
|
||||
}
|
||||
|
||||
/**
|
||||
* __ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI
|
||||
* @qs_cfg: gathered variables needed for PF->VSI queues assignment
|
||||
|
@ -519,11 +507,8 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
|
|||
xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
|
||||
ring->q_index, ring->q_vector->napi.napi_id);
|
||||
|
||||
kfree(ring->rx_buf);
|
||||
ring->xsk_pool = ice_xsk_pool(ring);
|
||||
if (ring->xsk_pool) {
|
||||
if (!ice_alloc_rx_buf_zc(ring))
|
||||
return -ENOMEM;
|
||||
xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
|
||||
|
||||
ring->rx_buf_len =
|
||||
|
@ -538,8 +523,6 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
|
|||
dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
|
||||
ring->q_index);
|
||||
} else {
|
||||
if (!ice_alloc_rx_buf(ring))
|
||||
return -ENOMEM;
|
||||
if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
|
||||
/* coverity[check_return] */
|
||||
xdp_rxq_info_reg(&ring->xdp_rxq,
|
||||
|
|
|
@ -2898,10 +2898,18 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
|
|||
if (xdp_ring_err)
|
||||
NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
|
||||
}
|
||||
/* reallocate Rx queues that are used for zero-copy */
|
||||
xdp_ring_err = ice_realloc_zc_buf(vsi, true);
|
||||
if (xdp_ring_err)
|
||||
NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Rx resources failed");
|
||||
} else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
|
||||
xdp_ring_err = ice_destroy_xdp_rings(vsi);
|
||||
if (xdp_ring_err)
|
||||
NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
|
||||
/* reallocate Rx queues that were used for zero-copy */
|
||||
xdp_ring_err = ice_realloc_zc_buf(vsi, false);
|
||||
if (xdp_ring_err)
|
||||
NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Rx resources failed");
|
||||
} else {
|
||||
/* safe to call even when prog == vsi->xdp_prog as
|
||||
* dev_xdp_install in net/core/dev.c incremented prog's
|
||||
|
@ -3905,7 +3913,7 @@ static int ice_init_pf(struct ice_pf *pf)
|
|||
|
||||
pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL);
|
||||
if (!pf->avail_rxqs) {
|
||||
devm_kfree(ice_pf_to_dev(pf), pf->avail_txqs);
|
||||
bitmap_free(pf->avail_txqs);
|
||||
pf->avail_txqs = NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
|
|
@ -192,6 +192,7 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
|
|||
err = ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, true);
|
||||
if (err)
|
||||
return err;
|
||||
ice_clean_rx_ring(rx_ring);
|
||||
|
||||
ice_qvec_toggle_napi(vsi, q_vector, false);
|
||||
ice_qp_clean_rings(vsi, q_idx);
|
||||
|
@ -316,6 +317,62 @@ ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_realloc_rx_xdp_bufs - reallocate for either XSK or normal buffer
|
||||
* @rx_ring: Rx ring
|
||||
* @pool_present: is pool for XSK present
|
||||
*
|
||||
* Try allocating memory and return ENOMEM, if failed to allocate.
|
||||
* If allocation was successful, substitute buffer with allocated one.
|
||||
* Returns 0 on success, negative on failure
|
||||
*/
|
||||
static int
|
||||
ice_realloc_rx_xdp_bufs(struct ice_rx_ring *rx_ring, bool pool_present)
|
||||
{
|
||||
size_t elem_size = pool_present ? sizeof(*rx_ring->xdp_buf) :
|
||||
sizeof(*rx_ring->rx_buf);
|
||||
void *sw_ring = kcalloc(rx_ring->count, elem_size, GFP_KERNEL);
|
||||
|
||||
if (!sw_ring)
|
||||
return -ENOMEM;
|
||||
|
||||
if (pool_present) {
|
||||
kfree(rx_ring->rx_buf);
|
||||
rx_ring->rx_buf = NULL;
|
||||
rx_ring->xdp_buf = sw_ring;
|
||||
} else {
|
||||
kfree(rx_ring->xdp_buf);
|
||||
rx_ring->xdp_buf = NULL;
|
||||
rx_ring->rx_buf = sw_ring;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_realloc_zc_buf - reallocate XDP ZC queue pairs
|
||||
* @vsi: Current VSI
|
||||
* @zc: is zero copy set
|
||||
*
|
||||
* Reallocate buffer for rx_rings that might be used by XSK.
|
||||
* XDP requires more memory, than rx_buf provides.
|
||||
* Returns 0 on success, negative on failure
|
||||
*/
|
||||
int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc)
|
||||
{
|
||||
struct ice_rx_ring *rx_ring;
|
||||
unsigned long q;
|
||||
|
||||
for_each_set_bit(q, vsi->af_xdp_zc_qps,
|
||||
max_t(int, vsi->alloc_txq, vsi->alloc_rxq)) {
|
||||
rx_ring = vsi->rx_rings[q];
|
||||
if (ice_realloc_rx_xdp_bufs(rx_ring, zc))
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_xsk_pool_setup - enable/disable a buffer pool region depending on its state
|
||||
* @vsi: Current VSI
|
||||
|
@ -345,11 +402,17 @@ int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
|
|||
if_running = netif_running(vsi->netdev) && ice_is_xdp_ena_vsi(vsi);
|
||||
|
||||
if (if_running) {
|
||||
struct ice_rx_ring *rx_ring = vsi->rx_rings[qid];
|
||||
|
||||
ret = ice_qp_dis(vsi, qid);
|
||||
if (ret) {
|
||||
netdev_err(vsi->netdev, "ice_qp_dis error = %d\n", ret);
|
||||
goto xsk_pool_if_up;
|
||||
}
|
||||
|
||||
ret = ice_realloc_rx_xdp_bufs(rx_ring, pool_present);
|
||||
if (ret)
|
||||
goto xsk_pool_if_up;
|
||||
}
|
||||
|
||||
pool_failure = pool_present ? ice_xsk_pool_enable(vsi, pool, qid) :
|
||||
|
|
|
@ -27,6 +27,7 @@ bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi);
|
|||
void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring);
|
||||
void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring);
|
||||
bool ice_xmit_zc(struct ice_tx_ring *xdp_ring, u32 budget, int napi_budget);
|
||||
int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc);
|
||||
#else
|
||||
static inline bool
|
||||
ice_xmit_zc(struct ice_tx_ring __always_unused *xdp_ring,
|
||||
|
@ -72,5 +73,12 @@ ice_xsk_wakeup(struct net_device __always_unused *netdev,
|
|||
|
||||
static inline void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring) { }
|
||||
static inline void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring) { }
|
||||
|
||||
static inline int
|
||||
ice_realloc_zc_buf(struct ice_vsi __always_unused *vsi,
|
||||
bool __always_unused zc)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_XDP_SOCKETS */
|
||||
#endif /* !_ICE_XSK_H_ */
|
||||
|
|
|
@ -700,10 +700,10 @@ void mvpp2_dbgfs_cleanup(struct mvpp2 *priv)
|
|||
|
||||
void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name)
|
||||
{
|
||||
struct dentry *mvpp2_dir, *mvpp2_root;
|
||||
static struct dentry *mvpp2_root;
|
||||
struct dentry *mvpp2_dir;
|
||||
int ret, i;
|
||||
|
||||
mvpp2_root = debugfs_lookup(MVPP2_DRIVER_NAME, NULL);
|
||||
if (!mvpp2_root)
|
||||
mvpp2_root = debugfs_create_dir(MVPP2_DRIVER_NAME, NULL);
|
||||
|
||||
|
|
|
@ -412,7 +412,7 @@ __mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
|
|||
if (entry->hash != 0xffff) {
|
||||
ppe->foe_table[entry->hash].ib1 &= ~MTK_FOE_IB1_STATE;
|
||||
ppe->foe_table[entry->hash].ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE,
|
||||
MTK_FOE_STATE_BIND);
|
||||
MTK_FOE_STATE_UNBIND);
|
||||
dma_wmb();
|
||||
}
|
||||
entry->hash = 0xffff;
|
||||
|
|
|
@ -293,6 +293,9 @@ mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
|
|||
if (!ppe)
|
||||
return;
|
||||
|
||||
if (hash > MTK_PPE_HASH_MASK)
|
||||
return;
|
||||
|
||||
now = (u16)jiffies;
|
||||
diff = now - ppe->foe_check_time[hash];
|
||||
if (diff < HZ / 10)
|
||||
|
|
|
@ -1136,8 +1136,6 @@ static void intel_eth_pci_remove(struct pci_dev *pdev)
|
|||
|
||||
clk_disable_unprepare(priv->plat->stmmac_clk);
|
||||
clk_unregister_fixed_rate(priv->plat->stmmac_clk);
|
||||
|
||||
pcim_iounmap_regions(pdev, BIT(0));
|
||||
}
|
||||
|
||||
static int __maybe_unused intel_eth_pci_suspend(struct device *dev)
|
||||
|
|
|
@ -243,13 +243,7 @@ static irqreturn_t meson_gxl_handle_interrupt(struct phy_device *phydev)
|
|||
irq_status == INTSRC_ENERGY_DETECT)
|
||||
return IRQ_HANDLED;
|
||||
|
||||
/* Give PHY some time before MAC starts sending data. This works
|
||||
* around an issue where network doesn't come up properly.
|
||||
*/
|
||||
if (!(irq_status & INTSRC_LINK_DOWN))
|
||||
phy_queue_state_machine(phydev, msecs_to_jiffies(100));
|
||||
else
|
||||
phy_trigger_machine(phydev);
|
||||
phy_trigger_machine(phydev);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
|
|
@ -28,12 +28,16 @@
|
|||
|
||||
/* Interrupt Source Register */
|
||||
#define LAN87XX_INTERRUPT_SOURCE (0x18)
|
||||
#define LAN87XX_INTERRUPT_SOURCE_2 (0x08)
|
||||
|
||||
/* Interrupt Mask Register */
|
||||
#define LAN87XX_INTERRUPT_MASK (0x19)
|
||||
#define LAN87XX_MASK_LINK_UP (0x0004)
|
||||
#define LAN87XX_MASK_LINK_DOWN (0x0002)
|
||||
|
||||
#define LAN87XX_INTERRUPT_MASK_2 (0x09)
|
||||
#define LAN87XX_MASK_COMM_RDY BIT(10)
|
||||
|
||||
/* MISC Control 1 Register */
|
||||
#define LAN87XX_CTRL_1 (0x11)
|
||||
#define LAN87XX_MASK_RGMII_TXC_DLY_EN (0x4000)
|
||||
|
@ -424,17 +428,55 @@ static int lan87xx_phy_config_intr(struct phy_device *phydev)
|
|||
int rc, val = 0;
|
||||
|
||||
if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
|
||||
/* unmask all source and clear them before enable */
|
||||
rc = phy_write(phydev, LAN87XX_INTERRUPT_MASK, 0x7FFF);
|
||||
rc = phy_read(phydev, LAN87XX_INTERRUPT_SOURCE);
|
||||
val = LAN87XX_MASK_LINK_UP | LAN87XX_MASK_LINK_DOWN;
|
||||
/* clear all interrupt */
|
||||
rc = phy_write(phydev, LAN87XX_INTERRUPT_MASK, val);
|
||||
} else {
|
||||
rc = phy_write(phydev, LAN87XX_INTERRUPT_MASK, val);
|
||||
if (rc)
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
|
||||
rc = phy_read(phydev, LAN87XX_INTERRUPT_SOURCE);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
|
||||
rc = access_ereg(phydev, PHYACC_ATTR_MODE_WRITE,
|
||||
PHYACC_ATTR_BANK_MISC,
|
||||
LAN87XX_INTERRUPT_MASK_2, val);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
|
||||
rc = access_ereg(phydev, PHYACC_ATTR_MODE_READ,
|
||||
PHYACC_ATTR_BANK_MISC,
|
||||
LAN87XX_INTERRUPT_SOURCE_2, 0);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
|
||||
/* enable link down and comm ready interrupt */
|
||||
val = LAN87XX_MASK_LINK_DOWN;
|
||||
rc = phy_write(phydev, LAN87XX_INTERRUPT_MASK, val);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
|
||||
val = LAN87XX_MASK_COMM_RDY;
|
||||
rc = access_ereg(phydev, PHYACC_ATTR_MODE_WRITE,
|
||||
PHYACC_ATTR_BANK_MISC,
|
||||
LAN87XX_INTERRUPT_MASK_2, val);
|
||||
} else {
|
||||
rc = phy_write(phydev, LAN87XX_INTERRUPT_MASK, val);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
|
||||
rc = phy_read(phydev, LAN87XX_INTERRUPT_SOURCE);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
|
||||
rc = access_ereg(phydev, PHYACC_ATTR_MODE_WRITE,
|
||||
PHYACC_ATTR_BANK_MISC,
|
||||
LAN87XX_INTERRUPT_MASK_2, val);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
|
||||
rc = access_ereg(phydev, PHYACC_ATTR_MODE_READ,
|
||||
PHYACC_ATTR_BANK_MISC,
|
||||
LAN87XX_INTERRUPT_SOURCE_2, 0);
|
||||
}
|
||||
|
||||
return rc < 0 ? rc : 0;
|
||||
|
@ -444,6 +486,14 @@ static irqreturn_t lan87xx_handle_interrupt(struct phy_device *phydev)
|
|||
{
|
||||
int irq_status;
|
||||
|
||||
irq_status = access_ereg(phydev, PHYACC_ATTR_MODE_READ,
|
||||
PHYACC_ATTR_BANK_MISC,
|
||||
LAN87XX_INTERRUPT_SOURCE_2, 0);
|
||||
if (irq_status < 0) {
|
||||
phy_error(phydev);
|
||||
return IRQ_NONE;
|
||||
}
|
||||
|
||||
irq_status = phy_read(phydev, LAN87XX_INTERRUPT_SOURCE);
|
||||
if (irq_status < 0) {
|
||||
phy_error(phydev);
|
||||
|
|
|
@ -1087,6 +1087,7 @@ static const struct usb_device_id products[] = {
|
|||
{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0512)}, /* Quectel EG12/EM12 */
|
||||
{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0620)}, /* Quectel EM160R-GL */
|
||||
{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0800)}, /* Quectel RM500Q-GL */
|
||||
{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0801)}, /* Quectel RM520N */
|
||||
|
||||
/* 3. Combined interface devices matching on interface number */
|
||||
{QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */
|
||||
|
|
|
@ -2403,7 +2403,7 @@ il4965_rs_fill_link_cmd(struct il_priv *il, struct il_lq_sta *lq_sta,
|
|||
/* Repeat initial/next rate.
|
||||
* For legacy IL_NUMBER_TRY == 1, this loop will not execute.
|
||||
* For HT IL_HT_NUMBER_TRY == 3, this executes twice. */
|
||||
while (repeat_rate > 0) {
|
||||
while (repeat_rate > 0 && idx < (LINK_QUAL_MAX_RETRY_NUM - 1)) {
|
||||
if (is_legacy(tbl_type.lq_type)) {
|
||||
if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
|
||||
ant_toggle_cnt++;
|
||||
|
@ -2422,8 +2422,6 @@ il4965_rs_fill_link_cmd(struct il_priv *il, struct il_lq_sta *lq_sta,
|
|||
cpu_to_le32(new_rate);
|
||||
repeat_rate--;
|
||||
idx++;
|
||||
if (idx >= LINK_QUAL_MAX_RETRY_NUM)
|
||||
goto out;
|
||||
}
|
||||
|
||||
il4965_rs_get_tbl_info_from_mcs(new_rate, lq_sta->band,
|
||||
|
@ -2468,7 +2466,6 @@ il4965_rs_fill_link_cmd(struct il_priv *il, struct il_lq_sta *lq_sta,
|
|||
repeat_rate--;
|
||||
}
|
||||
|
||||
out:
|
||||
lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
|
||||
lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
|
||||
|
||||
|
|
|
@ -5060,6 +5060,10 @@ static int hwsim_virtio_handle_cmd(struct sk_buff *skb)
|
|||
|
||||
nlh = nlmsg_hdr(skb);
|
||||
gnlh = nlmsg_data(nlh);
|
||||
|
||||
if (skb->len < nlh->nlmsg_len)
|
||||
return -EINVAL;
|
||||
|
||||
err = genlmsg_parse(nlh, &hwsim_genl_family, tb, HWSIM_ATTR_MAX,
|
||||
hwsim_genl_policy, NULL);
|
||||
if (err) {
|
||||
|
@ -5102,7 +5106,8 @@ static void hwsim_virtio_rx_work(struct work_struct *work)
|
|||
spin_unlock_irqrestore(&hwsim_virtio_lock, flags);
|
||||
|
||||
skb->data = skb->head;
|
||||
skb_set_tail_pointer(skb, len);
|
||||
skb_reset_tail_pointer(skb);
|
||||
skb_put(skb, len);
|
||||
hwsim_virtio_handle_cmd(skb);
|
||||
|
||||
spin_lock_irqsave(&hwsim_virtio_lock, flags);
|
||||
|
|
|
@ -261,7 +261,7 @@ int mt7921e_mac_reset(struct mt7921_dev *dev)
|
|||
|
||||
err = mt7921e_driver_own(dev);
|
||||
if (err)
|
||||
return err;
|
||||
goto out;
|
||||
|
||||
err = mt7921_run_firmware(dev);
|
||||
if (err)
|
||||
|
|
|
@ -245,6 +245,7 @@ struct wilc {
|
|||
u8 *rx_buffer;
|
||||
u32 rx_buffer_offset;
|
||||
u8 *tx_buffer;
|
||||
u32 *vmm_table;
|
||||
|
||||
struct txq_handle txq[NQUEUES];
|
||||
int txq_entries;
|
||||
|
|
|
@ -28,6 +28,7 @@ struct wilc_sdio {
|
|||
u32 block_size;
|
||||
bool isinit;
|
||||
int has_thrpt_enh3;
|
||||
u8 *cmd53_buf;
|
||||
};
|
||||
|
||||
struct sdio_cmd52 {
|
||||
|
@ -47,6 +48,7 @@ struct sdio_cmd53 {
|
|||
u32 count: 9;
|
||||
u8 *buffer;
|
||||
u32 block_size;
|
||||
bool use_global_buf;
|
||||
};
|
||||
|
||||
static const struct wilc_hif_func wilc_hif_sdio;
|
||||
|
@ -91,6 +93,8 @@ static int wilc_sdio_cmd53(struct wilc *wilc, struct sdio_cmd53 *cmd)
|
|||
{
|
||||
struct sdio_func *func = container_of(wilc->dev, struct sdio_func, dev);
|
||||
int size, ret;
|
||||
struct wilc_sdio *sdio_priv = wilc->bus_data;
|
||||
u8 *buf = cmd->buffer;
|
||||
|
||||
sdio_claim_host(func);
|
||||
|
||||
|
@ -101,12 +105,23 @@ static int wilc_sdio_cmd53(struct wilc *wilc, struct sdio_cmd53 *cmd)
|
|||
else
|
||||
size = cmd->count;
|
||||
|
||||
if (cmd->use_global_buf) {
|
||||
if (size > sizeof(u32))
|
||||
return -EINVAL;
|
||||
|
||||
buf = sdio_priv->cmd53_buf;
|
||||
}
|
||||
|
||||
if (cmd->read_write) { /* write */
|
||||
ret = sdio_memcpy_toio(func, cmd->address,
|
||||
(void *)cmd->buffer, size);
|
||||
if (cmd->use_global_buf)
|
||||
memcpy(buf, cmd->buffer, size);
|
||||
|
||||
ret = sdio_memcpy_toio(func, cmd->address, buf, size);
|
||||
} else { /* read */
|
||||
ret = sdio_memcpy_fromio(func, (void *)cmd->buffer,
|
||||
cmd->address, size);
|
||||
ret = sdio_memcpy_fromio(func, buf, cmd->address, size);
|
||||
|
||||
if (cmd->use_global_buf)
|
||||
memcpy(cmd->buffer, buf, size);
|
||||
}
|
||||
|
||||
sdio_release_host(func);
|
||||
|
@ -128,6 +143,12 @@ static int wilc_sdio_probe(struct sdio_func *func,
|
|||
if (!sdio_priv)
|
||||
return -ENOMEM;
|
||||
|
||||
sdio_priv->cmd53_buf = kzalloc(sizeof(u32), GFP_KERNEL);
|
||||
if (!sdio_priv->cmd53_buf) {
|
||||
ret = -ENOMEM;
|
||||
goto free;
|
||||
}
|
||||
|
||||
ret = wilc_cfg80211_init(&wilc, &func->dev, WILC_HIF_SDIO,
|
||||
&wilc_hif_sdio);
|
||||
if (ret)
|
||||
|
@ -161,6 +182,7 @@ dispose_irq:
|
|||
irq_dispose_mapping(wilc->dev_irq_num);
|
||||
wilc_netdev_cleanup(wilc);
|
||||
free:
|
||||
kfree(sdio_priv->cmd53_buf);
|
||||
kfree(sdio_priv);
|
||||
return ret;
|
||||
}
|
||||
|
@ -172,6 +194,7 @@ static void wilc_sdio_remove(struct sdio_func *func)
|
|||
|
||||
clk_disable_unprepare(wilc->rtc_clk);
|
||||
wilc_netdev_cleanup(wilc);
|
||||
kfree(sdio_priv->cmd53_buf);
|
||||
kfree(sdio_priv);
|
||||
}
|
||||
|
||||
|
@ -375,8 +398,9 @@ static int wilc_sdio_write_reg(struct wilc *wilc, u32 addr, u32 data)
|
|||
cmd.address = WILC_SDIO_FBR_DATA_REG;
|
||||
cmd.block_mode = 0;
|
||||
cmd.increment = 1;
|
||||
cmd.count = 4;
|
||||
cmd.count = sizeof(u32);
|
||||
cmd.buffer = (u8 *)&data;
|
||||
cmd.use_global_buf = true;
|
||||
cmd.block_size = sdio_priv->block_size;
|
||||
ret = wilc_sdio_cmd53(wilc, &cmd);
|
||||
if (ret)
|
||||
|
@ -414,6 +438,7 @@ static int wilc_sdio_write(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
|
|||
nblk = size / block_size;
|
||||
nleft = size % block_size;
|
||||
|
||||
cmd.use_global_buf = false;
|
||||
if (nblk > 0) {
|
||||
cmd.block_mode = 1;
|
||||
cmd.increment = 1;
|
||||
|
@ -492,8 +517,9 @@ static int wilc_sdio_read_reg(struct wilc *wilc, u32 addr, u32 *data)
|
|||
cmd.address = WILC_SDIO_FBR_DATA_REG;
|
||||
cmd.block_mode = 0;
|
||||
cmd.increment = 1;
|
||||
cmd.count = 4;
|
||||
cmd.count = sizeof(u32);
|
||||
cmd.buffer = (u8 *)data;
|
||||
cmd.use_global_buf = true;
|
||||
|
||||
cmd.block_size = sdio_priv->block_size;
|
||||
ret = wilc_sdio_cmd53(wilc, &cmd);
|
||||
|
@ -535,6 +561,7 @@ static int wilc_sdio_read(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
|
|||
nblk = size / block_size;
|
||||
nleft = size % block_size;
|
||||
|
||||
cmd.use_global_buf = false;
|
||||
if (nblk > 0) {
|
||||
cmd.block_mode = 1;
|
||||
cmd.increment = 1;
|
||||
|
|
|
@ -714,7 +714,7 @@ int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count)
|
|||
int ret = 0;
|
||||
int counter;
|
||||
int timeout;
|
||||
u32 vmm_table[WILC_VMM_TBL_SIZE];
|
||||
u32 *vmm_table = wilc->vmm_table;
|
||||
u8 ac_pkt_num_to_chip[NQUEUES] = {0, 0, 0, 0};
|
||||
const struct wilc_hif_func *func;
|
||||
int srcu_idx;
|
||||
|
@ -1252,6 +1252,8 @@ void wilc_wlan_cleanup(struct net_device *dev)
|
|||
while ((rqe = wilc_wlan_rxq_remove(wilc)))
|
||||
kfree(rqe);
|
||||
|
||||
kfree(wilc->vmm_table);
|
||||
wilc->vmm_table = NULL;
|
||||
kfree(wilc->rx_buffer);
|
||||
wilc->rx_buffer = NULL;
|
||||
kfree(wilc->tx_buffer);
|
||||
|
@ -1489,6 +1491,14 @@ int wilc_wlan_init(struct net_device *dev)
|
|||
goto fail;
|
||||
}
|
||||
|
||||
if (!wilc->vmm_table)
|
||||
wilc->vmm_table = kzalloc(WILC_VMM_TBL_SIZE, GFP_KERNEL);
|
||||
|
||||
if (!wilc->vmm_table) {
|
||||
ret = -ENOBUFS;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (!wilc->tx_buffer)
|
||||
wilc->tx_buffer = kmalloc(WILC_TX_BUFF_SIZE, GFP_KERNEL);
|
||||
|
||||
|
@ -1513,7 +1523,8 @@ int wilc_wlan_init(struct net_device *dev)
|
|||
return 0;
|
||||
|
||||
fail:
|
||||
|
||||
kfree(wilc->vmm_table);
|
||||
wilc->vmm_table = NULL;
|
||||
kfree(wilc->rx_buffer);
|
||||
wilc->rx_buffer = NULL;
|
||||
kfree(wilc->tx_buffer);
|
||||
|
|
|
@ -256,7 +256,6 @@ static void backend_disconnect(struct backend_info *be)
|
|||
unsigned int queue_index;
|
||||
|
||||
xen_unregister_watchers(vif);
|
||||
xenbus_rm(XBT_NIL, be->dev->nodename, "hotplug-status");
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
xenvif_debugfs_delif(vif);
|
||||
#endif /* CONFIG_DEBUG_FS */
|
||||
|
@ -984,6 +983,7 @@ static int netback_remove(struct xenbus_device *dev)
|
|||
struct backend_info *be = dev_get_drvdata(&dev->dev);
|
||||
|
||||
unregister_hotplug_status_watch(be);
|
||||
xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status");
|
||||
if (be->vif) {
|
||||
kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
|
||||
backend_disconnect(be);
|
||||
|
|
|
@ -76,7 +76,7 @@ void afs_lock_op_done(struct afs_call *call)
|
|||
if (call->error == 0) {
|
||||
spin_lock(&vnode->lock);
|
||||
trace_afs_flock_ev(vnode, NULL, afs_flock_timestamp, 0);
|
||||
vnode->locked_at = call->reply_time;
|
||||
vnode->locked_at = call->issue_time;
|
||||
afs_schedule_lock_extension(vnode);
|
||||
spin_unlock(&vnode->lock);
|
||||
}
|
||||
|
|
|
@ -131,7 +131,7 @@ bad:
|
|||
|
||||
static time64_t xdr_decode_expiry(struct afs_call *call, u32 expiry)
|
||||
{
|
||||
return ktime_divns(call->reply_time, NSEC_PER_SEC) + expiry;
|
||||
return ktime_divns(call->issue_time, NSEC_PER_SEC) + expiry;
|
||||
}
|
||||
|
||||
static void xdr_decode_AFSCallBack(const __be32 **_bp,
|
||||
|
|
|
@ -137,7 +137,6 @@ struct afs_call {
|
|||
bool need_attention; /* T if RxRPC poked us */
|
||||
bool async; /* T if asynchronous */
|
||||
bool upgrade; /* T to request service upgrade */
|
||||
bool have_reply_time; /* T if have got reply_time */
|
||||
bool intr; /* T if interruptible */
|
||||
bool unmarshalling_error; /* T if an unmarshalling error occurred */
|
||||
u16 service_id; /* Actual service ID (after upgrade) */
|
||||
|
@ -151,7 +150,7 @@ struct afs_call {
|
|||
} __attribute__((packed));
|
||||
__be64 tmp64;
|
||||
};
|
||||
ktime_t reply_time; /* Time of first reply packet */
|
||||
ktime_t issue_time; /* Time of issue of operation */
|
||||
};
|
||||
|
||||
struct afs_call_type {
|
||||
|
|
|
@ -351,6 +351,7 @@ void afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, gfp_t gfp)
|
|||
if (call->max_lifespan)
|
||||
rxrpc_kernel_set_max_life(call->net->socket, rxcall,
|
||||
call->max_lifespan);
|
||||
call->issue_time = ktime_get_real();
|
||||
|
||||
/* send the request */
|
||||
iov[0].iov_base = call->request;
|
||||
|
@ -501,12 +502,6 @@ static void afs_deliver_to_call(struct afs_call *call)
|
|||
return;
|
||||
}
|
||||
|
||||
if (!call->have_reply_time &&
|
||||
rxrpc_kernel_get_reply_time(call->net->socket,
|
||||
call->rxcall,
|
||||
&call->reply_time))
|
||||
call->have_reply_time = true;
|
||||
|
||||
ret = call->type->deliver(call);
|
||||
state = READ_ONCE(call->state);
|
||||
if (ret == 0 && call->unmarshalling_error)
|
||||
|
|
|
@ -232,8 +232,7 @@ static void xdr_decode_YFSCallBack(const __be32 **_bp,
|
|||
struct afs_callback *cb = &scb->callback;
|
||||
ktime_t cb_expiry;
|
||||
|
||||
cb_expiry = call->reply_time;
|
||||
cb_expiry = ktime_add(cb_expiry, xdr_to_u64(x->expiration_time) * 100);
|
||||
cb_expiry = ktime_add(call->issue_time, xdr_to_u64(x->expiration_time) * 100);
|
||||
cb->expires_at = ktime_divns(cb_expiry, NSEC_PER_SEC);
|
||||
scb->have_cb = true;
|
||||
*_bp += xdr_size(x);
|
||||
|
|
|
@ -310,9 +310,11 @@ static inline u16 ieee80211_sn_sub(u16 sn1, u16 sn2)
|
|||
struct ieee80211_hdr {
|
||||
__le16 frame_control;
|
||||
__le16 duration_id;
|
||||
u8 addr1[ETH_ALEN];
|
||||
u8 addr2[ETH_ALEN];
|
||||
u8 addr3[ETH_ALEN];
|
||||
struct_group(addrs,
|
||||
u8 addr1[ETH_ALEN];
|
||||
u8 addr2[ETH_ALEN];
|
||||
u8 addr3[ETH_ALEN];
|
||||
);
|
||||
__le16 seq_ctrl;
|
||||
u8 addr4[ETH_ALEN];
|
||||
} __packed __aligned(2);
|
||||
|
|
|
@ -2444,6 +2444,27 @@ static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
|
|||
skb_shinfo(skb)->nr_frags = i + 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* skb_fill_page_desc_noacc - initialise a paged fragment in an skb
|
||||
* @skb: buffer containing fragment to be initialised
|
||||
* @i: paged fragment index to initialise
|
||||
* @page: the page to use for this fragment
|
||||
* @off: the offset to the data with @page
|
||||
* @size: the length of the data
|
||||
*
|
||||
* Variant of skb_fill_page_desc() which does not deal with
|
||||
* pfmemalloc, if page is not owned by us.
|
||||
*/
|
||||
static inline void skb_fill_page_desc_noacc(struct sk_buff *skb, int i,
|
||||
struct page *page, int off,
|
||||
int size)
|
||||
{
|
||||
struct skb_shared_info *shinfo = skb_shinfo(skb);
|
||||
|
||||
__skb_fill_page_desc_noacc(shinfo, i, page, off, size);
|
||||
shinfo->nr_frags = i + 1;
|
||||
}
|
||||
|
||||
void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
|
||||
int size, unsigned int truesize);
|
||||
|
||||
|
|
|
@ -70,6 +70,7 @@ struct udp_sock {
|
|||
* For encapsulation sockets.
|
||||
*/
|
||||
int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
|
||||
void (*encap_err_rcv)(struct sock *sk, struct sk_buff *skb, unsigned int udp_offset);
|
||||
int (*encap_err_lookup)(struct sock *sk, struct sk_buff *skb);
|
||||
void (*encap_destroy)(struct sock *sk);
|
||||
|
||||
|
|
|
@ -66,8 +66,6 @@ int rxrpc_kernel_charge_accept(struct socket *, rxrpc_notify_rx_t,
|
|||
void rxrpc_kernel_set_tx_length(struct socket *, struct rxrpc_call *, s64);
|
||||
bool rxrpc_kernel_check_life(const struct socket *, const struct rxrpc_call *);
|
||||
u32 rxrpc_kernel_get_epoch(struct socket *, struct rxrpc_call *);
|
||||
bool rxrpc_kernel_get_reply_time(struct socket *, struct rxrpc_call *,
|
||||
ktime_t *);
|
||||
bool rxrpc_kernel_call_is_complete(struct rxrpc_call *);
|
||||
void rxrpc_kernel_set_max_life(struct socket *, struct rxrpc_call *,
|
||||
unsigned long);
|
||||
|
|
|
@ -3,6 +3,73 @@
|
|||
#ifndef _LINUX_DROPREASON_H
|
||||
#define _LINUX_DROPREASON_H
|
||||
|
||||
#define DEFINE_DROP_REASON(FN, FNe) \
|
||||
FN(NOT_SPECIFIED) \
|
||||
FN(NO_SOCKET) \
|
||||
FN(PKT_TOO_SMALL) \
|
||||
FN(TCP_CSUM) \
|
||||
FN(SOCKET_FILTER) \
|
||||
FN(UDP_CSUM) \
|
||||
FN(NETFILTER_DROP) \
|
||||
FN(OTHERHOST) \
|
||||
FN(IP_CSUM) \
|
||||
FN(IP_INHDR) \
|
||||
FN(IP_RPFILTER) \
|
||||
FN(UNICAST_IN_L2_MULTICAST) \
|
||||
FN(XFRM_POLICY) \
|
||||
FN(IP_NOPROTO) \
|
||||
FN(SOCKET_RCVBUFF) \
|
||||
FN(PROTO_MEM) \
|
||||
FN(TCP_MD5NOTFOUND) \
|
||||
FN(TCP_MD5UNEXPECTED) \
|
||||
FN(TCP_MD5FAILURE) \
|
||||
FN(SOCKET_BACKLOG) \
|
||||
FN(TCP_FLAGS) \
|
||||
FN(TCP_ZEROWINDOW) \
|
||||
FN(TCP_OLD_DATA) \
|
||||
FN(TCP_OVERWINDOW) \
|
||||
FN(TCP_OFOMERGE) \
|
||||
FN(TCP_RFC7323_PAWS) \
|
||||
FN(TCP_INVALID_SEQUENCE) \
|
||||
FN(TCP_RESET) \
|
||||
FN(TCP_INVALID_SYN) \
|
||||
FN(TCP_CLOSE) \
|
||||
FN(TCP_FASTOPEN) \
|
||||
FN(TCP_OLD_ACK) \
|
||||
FN(TCP_TOO_OLD_ACK) \
|
||||
FN(TCP_ACK_UNSENT_DATA) \
|
||||
FN(TCP_OFO_QUEUE_PRUNE) \
|
||||
FN(TCP_OFO_DROP) \
|
||||
FN(IP_OUTNOROUTES) \
|
||||
FN(BPF_CGROUP_EGRESS) \
|
||||
FN(IPV6DISABLED) \
|
||||
FN(NEIGH_CREATEFAIL) \
|
||||
FN(NEIGH_FAILED) \
|
||||
FN(NEIGH_QUEUEFULL) \
|
||||
FN(NEIGH_DEAD) \
|
||||
FN(TC_EGRESS) \
|
||||
FN(QDISC_DROP) \
|
||||
FN(CPU_BACKLOG) \
|
||||
FN(XDP) \
|
||||
FN(TC_INGRESS) \
|
||||
FN(UNHANDLED_PROTO) \
|
||||
FN(SKB_CSUM) \
|
||||
FN(SKB_GSO_SEG) \
|
||||
FN(SKB_UCOPY_FAULT) \
|
||||
FN(DEV_HDR) \
|
||||
FN(DEV_READY) \
|
||||
FN(FULL_RING) \
|
||||
FN(NOMEM) \
|
||||
FN(HDR_TRUNC) \
|
||||
FN(TAP_FILTER) \
|
||||
FN(TAP_TXFILTER) \
|
||||
FN(ICMP_CSUM) \
|
||||
FN(INVALID_PROTO) \
|
||||
FN(IP_INADDRERRORS) \
|
||||
FN(IP_INNOROUTES) \
|
||||
FN(PKT_TOO_BIG) \
|
||||
FNe(MAX)
|
||||
|
||||
/**
|
||||
* enum skb_drop_reason - the reasons of skb drops
|
||||
*
|
||||
|
|
|
@ -53,8 +53,6 @@ struct nf_conntrack_net {
|
|||
/* only used when new connection is allocated: */
|
||||
atomic_t count;
|
||||
unsigned int expect_count;
|
||||
u8 sysctl_auto_assign_helper;
|
||||
bool auto_assign_helper_warned;
|
||||
|
||||
/* only used from work queues, configuration plane, and so on: */
|
||||
unsigned int users4;
|
||||
|
|
|
@ -101,7 +101,6 @@ struct netns_ct {
|
|||
u8 sysctl_log_invalid; /* Log invalid packets */
|
||||
u8 sysctl_events;
|
||||
u8 sysctl_acct;
|
||||
u8 sysctl_auto_assign_helper;
|
||||
u8 sysctl_tstamp;
|
||||
u8 sysctl_checksum;
|
||||
|
||||
|
|
|
@ -67,6 +67,9 @@ static inline int udp_sock_create(struct net *net,
|
|||
typedef int (*udp_tunnel_encap_rcv_t)(struct sock *sk, struct sk_buff *skb);
|
||||
typedef int (*udp_tunnel_encap_err_lookup_t)(struct sock *sk,
|
||||
struct sk_buff *skb);
|
||||
typedef void (*udp_tunnel_encap_err_rcv_t)(struct sock *sk,
|
||||
struct sk_buff *skb,
|
||||
unsigned int udp_offset);
|
||||
typedef void (*udp_tunnel_encap_destroy_t)(struct sock *sk);
|
||||
typedef struct sk_buff *(*udp_tunnel_gro_receive_t)(struct sock *sk,
|
||||
struct list_head *head,
|
||||
|
@ -80,6 +83,7 @@ struct udp_tunnel_sock_cfg {
|
|||
__u8 encap_type;
|
||||
udp_tunnel_encap_rcv_t encap_rcv;
|
||||
udp_tunnel_encap_err_lookup_t encap_err_lookup;
|
||||
udp_tunnel_encap_err_rcv_t encap_err_rcv;
|
||||
udp_tunnel_encap_destroy_t encap_destroy;
|
||||
udp_tunnel_gro_receive_t gro_receive;
|
||||
udp_tunnel_gro_complete_t gro_complete;
|
||||
|
|
|
@ -9,6 +9,15 @@
|
|||
#include <linux/netdevice.h>
|
||||
#include <linux/tracepoint.h>
|
||||
|
||||
#undef FN
|
||||
#define FN(reason) TRACE_DEFINE_ENUM(SKB_DROP_REASON_##reason);
|
||||
DEFINE_DROP_REASON(FN, FN)
|
||||
|
||||
#undef FN
|
||||
#undef FNe
|
||||
#define FN(reason) { SKB_DROP_REASON_##reason, #reason },
|
||||
#define FNe(reason) { SKB_DROP_REASON_##reason, #reason }
|
||||
|
||||
/*
|
||||
* Tracepoint for free an sk_buff:
|
||||
*/
|
||||
|
@ -35,9 +44,13 @@ TRACE_EVENT(kfree_skb,
|
|||
|
||||
TP_printk("skbaddr=%p protocol=%u location=%p reason: %s",
|
||||
__entry->skbaddr, __entry->protocol, __entry->location,
|
||||
drop_reasons[__entry->reason])
|
||||
__print_symbolic(__entry->reason,
|
||||
DEFINE_DROP_REASON(FN, FNe)))
|
||||
);
|
||||
|
||||
#undef FN
|
||||
#undef FNe
|
||||
|
||||
TRACE_EVENT(consume_skb,
|
||||
|
||||
TP_PROTO(struct sk_buff *skb),
|
||||
|
|
|
@ -3018,12 +3018,6 @@ static const struct hci_init_stage amp_init2[] = {
|
|||
/* Read Buffer Size (ACL mtu, max pkt, etc.) */
|
||||
static int hci_read_buffer_size_sync(struct hci_dev *hdev)
|
||||
{
|
||||
/* Use Read LE Buffer Size V2 if supported */
|
||||
if (hdev->commands[41] & 0x20)
|
||||
return __hci_cmd_sync_status(hdev,
|
||||
HCI_OP_LE_READ_BUFFER_SIZE_V2,
|
||||
0, NULL, HCI_CMD_TIMEOUT);
|
||||
|
||||
return __hci_cmd_sync_status(hdev, HCI_OP_READ_BUFFER_SIZE,
|
||||
0, NULL, HCI_CMD_TIMEOUT);
|
||||
}
|
||||
|
@ -3237,6 +3231,12 @@ static const struct hci_init_stage hci_init2[] = {
|
|||
/* Read LE Buffer Size */
|
||||
static int hci_le_read_buffer_size_sync(struct hci_dev *hdev)
|
||||
{
|
||||
/* Use Read LE Buffer Size V2 if supported */
|
||||
if (hdev->commands[41] & 0x20)
|
||||
return __hci_cmd_sync_status(hdev,
|
||||
HCI_OP_LE_READ_BUFFER_SIZE_V2,
|
||||
0, NULL, HCI_CMD_TIMEOUT);
|
||||
|
||||
return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_BUFFER_SIZE,
|
||||
0, NULL, HCI_CMD_TIMEOUT);
|
||||
}
|
||||
|
|
|
@ -384,6 +384,7 @@ static int br_nf_pre_routing_finish(struct net *net, struct sock *sk, struct sk_
|
|||
/* - Bridged-and-DNAT'ed traffic doesn't
|
||||
* require ip_forwarding. */
|
||||
if (rt->dst.dev == dev) {
|
||||
skb_dst_drop(skb);
|
||||
skb_dst_set(skb, &rt->dst);
|
||||
goto bridged_dnat;
|
||||
}
|
||||
|
@ -413,6 +414,7 @@ bridged_dnat:
|
|||
kfree_skb(skb);
|
||||
return 0;
|
||||
}
|
||||
skb_dst_drop(skb);
|
||||
skb_dst_set_noref(skb, &rt->dst);
|
||||
}
|
||||
|
||||
|
|
|
@ -197,6 +197,7 @@ static int br_nf_pre_routing_finish_ipv6(struct net *net, struct sock *sk, struc
|
|||
kfree_skb(skb);
|
||||
return 0;
|
||||
}
|
||||
skb_dst_drop(skb);
|
||||
skb_dst_set_noref(skb, &rt->dst);
|
||||
}
|
||||
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
dropreason_str.c
|
|
@ -5,7 +5,7 @@
|
|||
|
||||
obj-y := sock.o request_sock.o skbuff.o datagram.o stream.o scm.o \
|
||||
gen_stats.o gen_estimator.o net_namespace.o secure_seq.o \
|
||||
flow_dissector.o dropreason_str.o
|
||||
flow_dissector.o
|
||||
|
||||
obj-$(CONFIG_SYSCTL) += sysctl_net_core.o
|
||||
|
||||
|
@ -40,23 +40,3 @@ obj-$(CONFIG_NET_SOCK_MSG) += skmsg.o
|
|||
obj-$(CONFIG_BPF_SYSCALL) += sock_map.o
|
||||
obj-$(CONFIG_BPF_SYSCALL) += bpf_sk_storage.o
|
||||
obj-$(CONFIG_OF) += of_net.o
|
||||
|
||||
clean-files := dropreason_str.c
|
||||
|
||||
quiet_cmd_dropreason_str = GEN $@
|
||||
cmd_dropreason_str = awk -F ',' 'BEGIN{ print "\#include <net/dropreason.h>\n"; \
|
||||
print "const char * const drop_reasons[] = {" }\
|
||||
/^enum skb_drop/ { dr=1; }\
|
||||
/^\};/ { dr=0; }\
|
||||
/^\tSKB_DROP_REASON_/ {\
|
||||
if (dr) {\
|
||||
sub(/\tSKB_DROP_REASON_/, "", $$1);\
|
||||
printf "\t[SKB_DROP_REASON_%s] = \"%s\",\n", $$1, $$1;\
|
||||
}\
|
||||
}\
|
||||
END{ print "};" }' $< > $@
|
||||
|
||||
$(obj)/dropreason_str.c: $(srctree)/include/net/dropreason.h
|
||||
$(call cmd,dropreason_str)
|
||||
|
||||
$(obj)/dropreason_str.o: $(obj)/dropreason_str.c
|
||||
|
|
|
@ -677,7 +677,7 @@ int __zerocopy_sg_from_iter(struct msghdr *msg, struct sock *sk,
|
|||
page_ref_sub(last_head, refs);
|
||||
refs = 0;
|
||||
}
|
||||
skb_fill_page_desc(skb, frag++, head, start, size);
|
||||
skb_fill_page_desc_noacc(skb, frag++, head, start, size);
|
||||
}
|
||||
if (refs)
|
||||
page_ref_sub(last_head, refs);
|
||||
|
|
|
@ -91,7 +91,11 @@ static struct kmem_cache *skbuff_ext_cache __ro_after_init;
|
|||
int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS;
|
||||
EXPORT_SYMBOL(sysctl_max_skb_frags);
|
||||
|
||||
/* The array 'drop_reasons' is auto-generated in dropreason_str.c */
|
||||
#undef FN
|
||||
#define FN(reason) [SKB_DROP_REASON_##reason] = #reason,
|
||||
const char * const drop_reasons[] = {
|
||||
DEFINE_DROP_REASON(FN, FN)
|
||||
};
|
||||
EXPORT_SYMBOL(drop_reasons);
|
||||
|
||||
/**
|
||||
|
|
|
@ -1015,7 +1015,7 @@ new_segment:
|
|||
skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
|
||||
} else {
|
||||
get_page(page);
|
||||
skb_fill_page_desc(skb, i, page, offset, copy);
|
||||
skb_fill_page_desc_noacc(skb, i, page, offset, copy);
|
||||
}
|
||||
|
||||
if (!(flags & MSG_NO_SHARED_FRAGS))
|
||||
|
|
|
@ -2513,6 +2513,21 @@ static inline bool tcp_may_undo(const struct tcp_sock *tp)
|
|||
return tp->undo_marker && (!tp->undo_retrans || tcp_packet_delayed(tp));
|
||||
}
|
||||
|
||||
static bool tcp_is_non_sack_preventing_reopen(struct sock *sk)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
|
||||
if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) {
|
||||
/* Hold old state until something *above* high_seq
|
||||
* is ACKed. For Reno it is MUST to prevent false
|
||||
* fast retransmits (RFC2582). SACK TCP is safe. */
|
||||
if (!tcp_any_retrans_done(sk))
|
||||
tp->retrans_stamp = 0;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/* People celebrate: "We love our President!" */
|
||||
static bool tcp_try_undo_recovery(struct sock *sk)
|
||||
{
|
||||
|
@ -2535,14 +2550,8 @@ static bool tcp_try_undo_recovery(struct sock *sk)
|
|||
} else if (tp->rack.reo_wnd_persist) {
|
||||
tp->rack.reo_wnd_persist--;
|
||||
}
|
||||
if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) {
|
||||
/* Hold old state until something *above* high_seq
|
||||
* is ACKed. For Reno it is MUST to prevent false
|
||||
* fast retransmits (RFC2582). SACK TCP is safe. */
|
||||
if (!tcp_any_retrans_done(sk))
|
||||
tp->retrans_stamp = 0;
|
||||
if (tcp_is_non_sack_preventing_reopen(sk))
|
||||
return true;
|
||||
}
|
||||
tcp_set_ca_state(sk, TCP_CA_Open);
|
||||
tp->is_sack_reneg = 0;
|
||||
return false;
|
||||
|
@ -2578,6 +2587,8 @@ static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo)
|
|||
NET_INC_STATS(sock_net(sk),
|
||||
LINUX_MIB_TCPSPURIOUSRTOS);
|
||||
inet_csk(sk)->icsk_retransmits = 0;
|
||||
if (tcp_is_non_sack_preventing_reopen(sk))
|
||||
return true;
|
||||
if (frto_undo || tcp_is_sack(tp)) {
|
||||
tcp_set_ca_state(sk, TCP_CA_Open);
|
||||
tp->is_sack_reneg = 0;
|
||||
|
|
|
@ -783,6 +783,8 @@ int __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
|
|||
*/
|
||||
if (tunnel) {
|
||||
/* ...not for tunnels though: we don't have a sending socket */
|
||||
if (udp_sk(sk)->encap_err_rcv)
|
||||
udp_sk(sk)->encap_err_rcv(sk, skb, iph->ihl << 2);
|
||||
goto out;
|
||||
}
|
||||
if (!inet->recverr) {
|
||||
|
|
|
@ -72,6 +72,7 @@ void setup_udp_tunnel_sock(struct net *net, struct socket *sock,
|
|||
|
||||
udp_sk(sk)->encap_type = cfg->encap_type;
|
||||
udp_sk(sk)->encap_rcv = cfg->encap_rcv;
|
||||
udp_sk(sk)->encap_err_rcv = cfg->encap_err_rcv;
|
||||
udp_sk(sk)->encap_err_lookup = cfg->encap_err_lookup;
|
||||
udp_sk(sk)->encap_destroy = cfg->encap_destroy;
|
||||
udp_sk(sk)->gro_receive = cfg->gro_receive;
|
||||
|
|
|
@ -3557,12 +3557,16 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
|
|||
fallthrough;
|
||||
case NETDEV_UP:
|
||||
case NETDEV_CHANGE:
|
||||
if (dev->flags & IFF_SLAVE)
|
||||
break;
|
||||
|
||||
if (idev && idev->cnf.disable_ipv6)
|
||||
break;
|
||||
|
||||
if (dev->flags & IFF_SLAVE) {
|
||||
if (event == NETDEV_UP && !IS_ERR_OR_NULL(idev) &&
|
||||
dev->flags & IFF_UP && dev->flags & IFF_MULTICAST)
|
||||
ipv6_mc_up(idev);
|
||||
break;
|
||||
}
|
||||
|
||||
if (event == NETDEV_UP) {
|
||||
/* restore routes for permanent addresses */
|
||||
addrconf_permanent_addr(net, dev);
|
||||
|
|
|
@ -191,6 +191,11 @@ static int seg6_genl_sethmac(struct sk_buff *skb, struct genl_info *info)
|
|||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (slen > nla_len(info->attrs[SEG6_ATTR_SECRET])) {
|
||||
err = -EINVAL;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (hinfo) {
|
||||
err = seg6_hmac_info_del(net, hmackeyid);
|
||||
if (err)
|
||||
|
|
|
@ -616,8 +616,11 @@ int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
|
|||
}
|
||||
|
||||
/* Tunnels don't have an application socket: don't pass errors back */
|
||||
if (tunnel)
|
||||
if (tunnel) {
|
||||
if (udp_sk(sk)->encap_err_rcv)
|
||||
udp_sk(sk)->encap_err_rcv(sk, skb, offset);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!np->recverr) {
|
||||
if (!harderr || sk->sk_state != TCP_ESTABLISHED)
|
||||
|
|
|
@ -3420,11 +3420,11 @@ static void ieee80211_destroy_auth_data(struct ieee80211_sub_if_data *sdata,
|
|||
ieee80211_link_info_change_notify(sdata, &sdata->deflink,
|
||||
BSS_CHANGED_BSSID);
|
||||
sdata->u.mgd.flags = 0;
|
||||
|
||||
mutex_lock(&sdata->local->mtx);
|
||||
ieee80211_link_release_channel(&sdata->deflink);
|
||||
mutex_unlock(&sdata->local->mtx);
|
||||
|
||||
ieee80211_vif_set_links(sdata, 0);
|
||||
mutex_unlock(&sdata->local->mtx);
|
||||
}
|
||||
|
||||
cfg80211_put_bss(sdata->local->hw.wiphy, auth_data->bss);
|
||||
|
@ -3462,10 +3462,6 @@ static void ieee80211_destroy_assoc_data(struct ieee80211_sub_if_data *sdata,
|
|||
sdata->u.mgd.flags = 0;
|
||||
sdata->vif.bss_conf.mu_mimo_owner = false;
|
||||
|
||||
mutex_lock(&sdata->local->mtx);
|
||||
ieee80211_link_release_channel(&sdata->deflink);
|
||||
mutex_unlock(&sdata->local->mtx);
|
||||
|
||||
if (status != ASSOC_REJECTED) {
|
||||
struct cfg80211_assoc_failure data = {
|
||||
.timeout = status == ASSOC_TIMEOUT,
|
||||
|
@ -3484,7 +3480,10 @@ static void ieee80211_destroy_assoc_data(struct ieee80211_sub_if_data *sdata,
|
|||
cfg80211_assoc_failure(sdata->dev, &data);
|
||||
}
|
||||
|
||||
mutex_lock(&sdata->local->mtx);
|
||||
ieee80211_link_release_channel(&sdata->deflink);
|
||||
ieee80211_vif_set_links(sdata, 0);
|
||||
mutex_unlock(&sdata->local->mtx);
|
||||
}
|
||||
|
||||
kfree(assoc_data);
|
||||
|
@ -6509,6 +6508,7 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
|
|||
return 0;
|
||||
|
||||
out_err:
|
||||
ieee80211_link_release_channel(&sdata->deflink);
|
||||
ieee80211_vif_set_links(sdata, 0);
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -4074,6 +4074,7 @@ void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
|
|||
.link_id = -1,
|
||||
};
|
||||
struct tid_ampdu_rx *tid_agg_rx;
|
||||
u8 link_id;
|
||||
|
||||
tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
|
||||
if (!tid_agg_rx)
|
||||
|
@ -4093,6 +4094,9 @@ void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
|
|||
};
|
||||
drv_event_callback(rx.local, rx.sdata, &event);
|
||||
}
|
||||
/* FIXME: statistics won't be right with this */
|
||||
link_id = sta->sta.valid_links ? ffs(sta->sta.valid_links) - 1 : 0;
|
||||
rx.link = rcu_dereference(sta->sdata->link[link_id]);
|
||||
|
||||
ieee80211_rx_handlers(&rx, &frames);
|
||||
}
|
||||
|
|
|
@ -351,7 +351,7 @@ static u8 ccmp_gcmp_aad(struct sk_buff *skb, u8 *aad)
|
|||
* FC | A1 | A2 | A3 | SC | [A4] | [QC] */
|
||||
put_unaligned_be16(len_a, &aad[0]);
|
||||
put_unaligned(mask_fc, (__le16 *)&aad[2]);
|
||||
memcpy(&aad[4], &hdr->addr1, 3 * ETH_ALEN);
|
||||
memcpy(&aad[4], &hdr->addrs, 3 * ETH_ALEN);
|
||||
|
||||
/* Mask Seq#, leave Frag# */
|
||||
aad[22] = *((u8 *) &hdr->seq_ctrl) & 0x0f;
|
||||
|
@ -792,7 +792,7 @@ static void bip_aad(struct sk_buff *skb, u8 *aad)
|
|||
IEEE80211_FCTL_MOREDATA);
|
||||
put_unaligned(mask_fc, (__le16 *) &aad[0]);
|
||||
/* A1 || A2 || A3 */
|
||||
memcpy(aad + 2, &hdr->addr1, 3 * ETH_ALEN);
|
||||
memcpy(aad + 2, &hdr->addrs, 3 * ETH_ALEN);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -1782,7 +1782,7 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
|
|||
}
|
||||
spin_unlock_bh(&nf_conntrack_expect_lock);
|
||||
}
|
||||
if (!exp)
|
||||
if (!exp && tmpl)
|
||||
__nf_ct_try_assign_helper(ct, tmpl, GFP_ATOMIC);
|
||||
|
||||
/* Other CPU might have obtained a pointer to this object before it was
|
||||
|
@ -2068,10 +2068,6 @@ void nf_conntrack_alter_reply(struct nf_conn *ct,
|
|||
ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply;
|
||||
if (ct->master || (help && !hlist_empty(&help->expectations)))
|
||||
return;
|
||||
|
||||
rcu_read_lock();
|
||||
__nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply);
|
||||
|
||||
|
@ -2797,7 +2793,6 @@ int nf_conntrack_init_net(struct net *net)
|
|||
nf_conntrack_acct_pernet_init(net);
|
||||
nf_conntrack_tstamp_pernet_init(net);
|
||||
nf_conntrack_ecache_pernet_init(net);
|
||||
nf_conntrack_helper_pernet_init(net);
|
||||
nf_conntrack_proto_pernet_init(net);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -35,11 +35,6 @@ unsigned int nf_ct_helper_hsize __read_mostly;
|
|||
EXPORT_SYMBOL_GPL(nf_ct_helper_hsize);
|
||||
static unsigned int nf_ct_helper_count __read_mostly;
|
||||
|
||||
static bool nf_ct_auto_assign_helper __read_mostly = false;
|
||||
module_param_named(nf_conntrack_helper, nf_ct_auto_assign_helper, bool, 0644);
|
||||
MODULE_PARM_DESC(nf_conntrack_helper,
|
||||
"Enable automatic conntrack helper assignment (default 0)");
|
||||
|
||||
static DEFINE_MUTEX(nf_ct_nat_helpers_mutex);
|
||||
static struct list_head nf_ct_nat_helpers __read_mostly;
|
||||
|
||||
|
@ -51,24 +46,6 @@ static unsigned int helper_hash(const struct nf_conntrack_tuple *tuple)
|
|||
(__force __u16)tuple->src.u.all) % nf_ct_helper_hsize;
|
||||
}
|
||||
|
||||
static struct nf_conntrack_helper *
|
||||
__nf_ct_helper_find(const struct nf_conntrack_tuple *tuple)
|
||||
{
|
||||
struct nf_conntrack_helper *helper;
|
||||
struct nf_conntrack_tuple_mask mask = { .src.u.all = htons(0xFFFF) };
|
||||
unsigned int h;
|
||||
|
||||
if (!nf_ct_helper_count)
|
||||
return NULL;
|
||||
|
||||
h = helper_hash(tuple);
|
||||
hlist_for_each_entry_rcu(helper, &nf_ct_helper_hash[h], hnode) {
|
||||
if (nf_ct_tuple_src_mask_cmp(tuple, &helper->tuple, &mask))
|
||||
return helper;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct nf_conntrack_helper *
|
||||
__nf_conntrack_helper_find(const char *name, u16 l3num, u8 protonum)
|
||||
{
|
||||
|
@ -209,33 +186,11 @@ nf_ct_helper_ext_add(struct nf_conn *ct, gfp_t gfp)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(nf_ct_helper_ext_add);
|
||||
|
||||
static struct nf_conntrack_helper *
|
||||
nf_ct_lookup_helper(struct nf_conn *ct, struct net *net)
|
||||
{
|
||||
struct nf_conntrack_net *cnet = nf_ct_pernet(net);
|
||||
|
||||
if (!cnet->sysctl_auto_assign_helper) {
|
||||
if (cnet->auto_assign_helper_warned)
|
||||
return NULL;
|
||||
if (!__nf_ct_helper_find(&ct->tuplehash[IP_CT_DIR_REPLY].tuple))
|
||||
return NULL;
|
||||
pr_info("nf_conntrack: default automatic helper assignment "
|
||||
"has been turned off for security reasons and CT-based "
|
||||
"firewall rule not found. Use the iptables CT target "
|
||||
"to attach helpers instead.\n");
|
||||
cnet->auto_assign_helper_warned = true;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return __nf_ct_helper_find(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
|
||||
}
|
||||
|
||||
int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl,
|
||||
gfp_t flags)
|
||||
{
|
||||
struct nf_conntrack_helper *helper = NULL;
|
||||
struct nf_conn_help *help;
|
||||
struct net *net = nf_ct_net(ct);
|
||||
|
||||
/* We already got a helper explicitly attached. The function
|
||||
* nf_conntrack_alter_reply - in case NAT is in use - asks for looking
|
||||
|
@ -246,23 +201,21 @@ int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl,
|
|||
if (test_bit(IPS_HELPER_BIT, &ct->status))
|
||||
return 0;
|
||||
|
||||
if (tmpl != NULL) {
|
||||
help = nfct_help(tmpl);
|
||||
if (help != NULL) {
|
||||
helper = rcu_dereference(help->helper);
|
||||
set_bit(IPS_HELPER_BIT, &ct->status);
|
||||
}
|
||||
if (WARN_ON_ONCE(!tmpl))
|
||||
return 0;
|
||||
|
||||
help = nfct_help(tmpl);
|
||||
if (help != NULL) {
|
||||
helper = rcu_dereference(help->helper);
|
||||
set_bit(IPS_HELPER_BIT, &ct->status);
|
||||
}
|
||||
|
||||
help = nfct_help(ct);
|
||||
|
||||
if (helper == NULL) {
|
||||
helper = nf_ct_lookup_helper(ct, net);
|
||||
if (helper == NULL) {
|
||||
if (help)
|
||||
RCU_INIT_POINTER(help->helper, NULL);
|
||||
return 0;
|
||||
}
|
||||
if (help)
|
||||
RCU_INIT_POINTER(help->helper, NULL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (help == NULL) {
|
||||
|
@ -545,19 +498,6 @@ void nf_nat_helper_unregister(struct nf_conntrack_nat_helper *nat)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(nf_nat_helper_unregister);
|
||||
|
||||
void nf_ct_set_auto_assign_helper_warned(struct net *net)
|
||||
{
|
||||
nf_ct_pernet(net)->auto_assign_helper_warned = true;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nf_ct_set_auto_assign_helper_warned);
|
||||
|
||||
void nf_conntrack_helper_pernet_init(struct net *net)
|
||||
{
|
||||
struct nf_conntrack_net *cnet = nf_ct_pernet(net);
|
||||
|
||||
cnet->sysctl_auto_assign_helper = nf_ct_auto_assign_helper;
|
||||
}
|
||||
|
||||
int nf_conntrack_helper_init(void)
|
||||
{
|
||||
nf_ct_helper_hsize = 1; /* gets rounded up to use one page */
|
||||
|
|
|
@ -194,8 +194,9 @@ static int help(struct sk_buff *skb, unsigned int protoff,
|
|||
|
||||
/* dcc_ip can be the internal OR external (NAT'ed) IP */
|
||||
tuple = &ct->tuplehash[dir].tuple;
|
||||
if (tuple->src.u3.ip != dcc_ip &&
|
||||
tuple->dst.u3.ip != dcc_ip) {
|
||||
if ((tuple->src.u3.ip != dcc_ip &&
|
||||
ct->tuplehash[!dir].tuple.dst.u3.ip != dcc_ip) ||
|
||||
dcc_port == 0) {
|
||||
net_warn_ratelimited("Forged DCC command from %pI4: %pI4:%u\n",
|
||||
&tuple->src.u3.ip,
|
||||
&dcc_ip, dcc_port);
|
||||
|
|
|
@ -2298,11 +2298,6 @@ ctnetlink_create_conntrack(struct net *net,
|
|||
ct->status |= IPS_HELPER;
|
||||
RCU_INIT_POINTER(help->helper, helper);
|
||||
}
|
||||
} else {
|
||||
/* try an implicit helper assignation */
|
||||
err = __nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC);
|
||||
if (err < 0)
|
||||
goto err2;
|
||||
}
|
||||
|
||||
err = ctnetlink_setup_nat(ct, cda);
|
||||
|
|
|
@ -561,7 +561,6 @@ enum nf_ct_sysctl_index {
|
|||
NF_SYSCTL_CT_LOG_INVALID,
|
||||
NF_SYSCTL_CT_EXPECT_MAX,
|
||||
NF_SYSCTL_CT_ACCT,
|
||||
NF_SYSCTL_CT_HELPER,
|
||||
#ifdef CONFIG_NF_CONNTRACK_EVENTS
|
||||
NF_SYSCTL_CT_EVENTS,
|
||||
#endif
|
||||
|
@ -680,14 +679,6 @@ static struct ctl_table nf_ct_sysctl_table[] = {
|
|||
.extra1 = SYSCTL_ZERO,
|
||||
.extra2 = SYSCTL_ONE,
|
||||
},
|
||||
[NF_SYSCTL_CT_HELPER] = {
|
||||
.procname = "nf_conntrack_helper",
|
||||
.maxlen = sizeof(u8),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dou8vec_minmax,
|
||||
.extra1 = SYSCTL_ZERO,
|
||||
.extra2 = SYSCTL_ONE,
|
||||
},
|
||||
#ifdef CONFIG_NF_CONNTRACK_EVENTS
|
||||
[NF_SYSCTL_CT_EVENTS] = {
|
||||
.procname = "nf_conntrack_events",
|
||||
|
@ -1100,7 +1091,6 @@ static int nf_conntrack_standalone_init_sysctl(struct net *net)
|
|||
table[NF_SYSCTL_CT_CHECKSUM].data = &net->ct.sysctl_checksum;
|
||||
table[NF_SYSCTL_CT_LOG_INVALID].data = &net->ct.sysctl_log_invalid;
|
||||
table[NF_SYSCTL_CT_ACCT].data = &net->ct.sysctl_acct;
|
||||
table[NF_SYSCTL_CT_HELPER].data = &cnet->sysctl_auto_assign_helper;
|
||||
#ifdef CONFIG_NF_CONNTRACK_EVENTS
|
||||
table[NF_SYSCTL_CT_EVENTS].data = &net->ct.sysctl_events;
|
||||
#endif
|
||||
|
|
|
@ -2166,8 +2166,10 @@ static int nft_basechain_init(struct nft_base_chain *basechain, u8 family,
|
|||
chain->flags |= NFT_CHAIN_BASE | flags;
|
||||
basechain->policy = NF_ACCEPT;
|
||||
if (chain->flags & NFT_CHAIN_HW_OFFLOAD &&
|
||||
!nft_chain_offload_support(basechain))
|
||||
!nft_chain_offload_support(basechain)) {
|
||||
list_splice_init(&basechain->hook_list, &hook->list);
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
flow_block_init(&basechain->flow_block);
|
||||
|
||||
|
|
|
@ -1089,9 +1089,6 @@ static int nft_ct_helper_obj_init(const struct nft_ctx *ctx,
|
|||
if (err < 0)
|
||||
goto err_put_helper;
|
||||
|
||||
/* Avoid the bogus warning, helper will be assigned after CT init */
|
||||
nf_ct_set_auto_assign_helper_warned(ctx->net);
|
||||
|
||||
return 0;
|
||||
|
||||
err_put_helper:
|
||||
|
|
|
@ -982,6 +982,7 @@ void rxrpc_send_keepalive(struct rxrpc_peer *);
|
|||
/*
|
||||
* peer_event.c
|
||||
*/
|
||||
void rxrpc_encap_err_rcv(struct sock *sk, struct sk_buff *skb, unsigned int udp_offset);
|
||||
void rxrpc_error_report(struct sock *);
|
||||
void rxrpc_peer_keepalive_worker(struct work_struct *);
|
||||
|
||||
|
|
|
@ -166,7 +166,7 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j)
|
|||
_enter("{%d,%d}", call->tx_hard_ack, call->tx_top);
|
||||
|
||||
now = ktime_get_real();
|
||||
max_age = ktime_sub(now, jiffies_to_usecs(call->peer->rto_j));
|
||||
max_age = ktime_sub_us(now, jiffies_to_usecs(call->peer->rto_j));
|
||||
|
||||
spin_lock_bh(&call->lock);
|
||||
|
||||
|
|
|
@ -137,6 +137,7 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
|
|||
|
||||
tuncfg.encap_type = UDP_ENCAP_RXRPC;
|
||||
tuncfg.encap_rcv = rxrpc_input_packet;
|
||||
tuncfg.encap_err_rcv = rxrpc_encap_err_rcv;
|
||||
tuncfg.sk_user_data = local;
|
||||
setup_udp_tunnel_sock(net, local->socket, &tuncfg);
|
||||
|
||||
|
@ -405,6 +406,9 @@ static void rxrpc_local_processor(struct work_struct *work)
|
|||
container_of(work, struct rxrpc_local, processor);
|
||||
bool again;
|
||||
|
||||
if (local->dead)
|
||||
return;
|
||||
|
||||
trace_rxrpc_local(local->debug_id, rxrpc_local_processing,
|
||||
refcount_read(&local->ref), NULL);
|
||||
|
||||
|
|
|
@ -16,18 +16,263 @@
|
|||
#include <net/sock.h>
|
||||
#include <net/af_rxrpc.h>
|
||||
#include <net/ip.h>
|
||||
#include <net/icmp.h>
|
||||
#include "ar-internal.h"
|
||||
|
||||
static void rxrpc_adjust_mtu(struct rxrpc_peer *, unsigned int);
|
||||
static void rxrpc_store_error(struct rxrpc_peer *, struct sock_exterr_skb *);
|
||||
static void rxrpc_distribute_error(struct rxrpc_peer *, int,
|
||||
enum rxrpc_call_completion);
|
||||
|
||||
/*
|
||||
* Find the peer associated with an ICMP packet.
|
||||
* Find the peer associated with an ICMPv4 packet.
|
||||
*/
|
||||
static struct rxrpc_peer *rxrpc_lookup_peer_icmp_rcu(struct rxrpc_local *local,
|
||||
const struct sk_buff *skb,
|
||||
struct sk_buff *skb,
|
||||
unsigned int udp_offset,
|
||||
unsigned int *info,
|
||||
struct sockaddr_rxrpc *srx)
|
||||
{
|
||||
struct iphdr *ip, *ip0 = ip_hdr(skb);
|
||||
struct icmphdr *icmp = icmp_hdr(skb);
|
||||
struct udphdr *udp = (struct udphdr *)(skb->data + udp_offset);
|
||||
|
||||
_enter("%u,%u,%u", ip0->protocol, icmp->type, icmp->code);
|
||||
|
||||
switch (icmp->type) {
|
||||
case ICMP_DEST_UNREACH:
|
||||
*info = ntohs(icmp->un.frag.mtu);
|
||||
fallthrough;
|
||||
case ICMP_TIME_EXCEEDED:
|
||||
case ICMP_PARAMETERPROB:
|
||||
ip = (struct iphdr *)((void *)icmp + 8);
|
||||
break;
|
||||
default:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
memset(srx, 0, sizeof(*srx));
|
||||
srx->transport_type = local->srx.transport_type;
|
||||
srx->transport_len = local->srx.transport_len;
|
||||
srx->transport.family = local->srx.transport.family;
|
||||
|
||||
/* Can we see an ICMP4 packet on an ICMP6 listening socket? and vice
|
||||
* versa?
|
||||
*/
|
||||
switch (srx->transport.family) {
|
||||
case AF_INET:
|
||||
srx->transport_len = sizeof(srx->transport.sin);
|
||||
srx->transport.family = AF_INET;
|
||||
srx->transport.sin.sin_port = udp->dest;
|
||||
memcpy(&srx->transport.sin.sin_addr, &ip->daddr,
|
||||
sizeof(struct in_addr));
|
||||
break;
|
||||
|
||||
#ifdef CONFIG_AF_RXRPC_IPV6
|
||||
case AF_INET6:
|
||||
srx->transport_len = sizeof(srx->transport.sin);
|
||||
srx->transport.family = AF_INET;
|
||||
srx->transport.sin.sin_port = udp->dest;
|
||||
memcpy(&srx->transport.sin.sin_addr, &ip->daddr,
|
||||
sizeof(struct in_addr));
|
||||
break;
|
||||
#endif
|
||||
|
||||
default:
|
||||
WARN_ON_ONCE(1);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
_net("ICMP {%pISp}", &srx->transport);
|
||||
return rxrpc_lookup_peer_rcu(local, srx);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_AF_RXRPC_IPV6
|
||||
/*
|
||||
* Find the peer associated with an ICMPv6 packet.
|
||||
*/
|
||||
static struct rxrpc_peer *rxrpc_lookup_peer_icmp6_rcu(struct rxrpc_local *local,
|
||||
struct sk_buff *skb,
|
||||
unsigned int udp_offset,
|
||||
unsigned int *info,
|
||||
struct sockaddr_rxrpc *srx)
|
||||
{
|
||||
struct icmp6hdr *icmp = icmp6_hdr(skb);
|
||||
struct ipv6hdr *ip, *ip0 = ipv6_hdr(skb);
|
||||
struct udphdr *udp = (struct udphdr *)(skb->data + udp_offset);
|
||||
|
||||
_enter("%u,%u,%u", ip0->nexthdr, icmp->icmp6_type, icmp->icmp6_code);
|
||||
|
||||
switch (icmp->icmp6_type) {
|
||||
case ICMPV6_DEST_UNREACH:
|
||||
*info = ntohl(icmp->icmp6_mtu);
|
||||
fallthrough;
|
||||
case ICMPV6_PKT_TOOBIG:
|
||||
case ICMPV6_TIME_EXCEED:
|
||||
case ICMPV6_PARAMPROB:
|
||||
ip = (struct ipv6hdr *)((void *)icmp + 8);
|
||||
break;
|
||||
default:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
memset(srx, 0, sizeof(*srx));
|
||||
srx->transport_type = local->srx.transport_type;
|
||||
srx->transport_len = local->srx.transport_len;
|
||||
srx->transport.family = local->srx.transport.family;
|
||||
|
||||
/* Can we see an ICMP4 packet on an ICMP6 listening socket? and vice
|
||||
* versa?
|
||||
*/
|
||||
switch (srx->transport.family) {
|
||||
case AF_INET:
|
||||
_net("Rx ICMP6 on v4 sock");
|
||||
srx->transport_len = sizeof(srx->transport.sin);
|
||||
srx->transport.family = AF_INET;
|
||||
srx->transport.sin.sin_port = udp->dest;
|
||||
memcpy(&srx->transport.sin.sin_addr,
|
||||
&ip->daddr.s6_addr32[3], sizeof(struct in_addr));
|
||||
break;
|
||||
case AF_INET6:
|
||||
_net("Rx ICMP6");
|
||||
srx->transport.sin.sin_port = udp->dest;
|
||||
memcpy(&srx->transport.sin6.sin6_addr, &ip->daddr,
|
||||
sizeof(struct in6_addr));
|
||||
break;
|
||||
default:
|
||||
WARN_ON_ONCE(1);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
_net("ICMP {%pISp}", &srx->transport);
|
||||
return rxrpc_lookup_peer_rcu(local, srx);
|
||||
}
|
||||
#endif /* CONFIG_AF_RXRPC_IPV6 */
|
||||
|
||||
/*
|
||||
* Handle an error received on the local endpoint as a tunnel.
|
||||
*/
|
||||
void rxrpc_encap_err_rcv(struct sock *sk, struct sk_buff *skb,
|
||||
unsigned int udp_offset)
|
||||
{
|
||||
struct sock_extended_err ee;
|
||||
struct sockaddr_rxrpc srx;
|
||||
struct rxrpc_local *local;
|
||||
struct rxrpc_peer *peer;
|
||||
unsigned int info = 0;
|
||||
int err;
|
||||
u8 version = ip_hdr(skb)->version;
|
||||
u8 type = icmp_hdr(skb)->type;
|
||||
u8 code = icmp_hdr(skb)->code;
|
||||
|
||||
rcu_read_lock();
|
||||
local = rcu_dereference_sk_user_data(sk);
|
||||
if (unlikely(!local)) {
|
||||
rcu_read_unlock();
|
||||
return;
|
||||
}
|
||||
|
||||
rxrpc_new_skb(skb, rxrpc_skb_received);
|
||||
|
||||
switch (ip_hdr(skb)->version) {
|
||||
case IPVERSION:
|
||||
peer = rxrpc_lookup_peer_icmp_rcu(local, skb, udp_offset,
|
||||
&info, &srx);
|
||||
break;
|
||||
#ifdef CONFIG_AF_RXRPC_IPV6
|
||||
case 6:
|
||||
peer = rxrpc_lookup_peer_icmp6_rcu(local, skb, udp_offset,
|
||||
&info, &srx);
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
rcu_read_unlock();
|
||||
return;
|
||||
}
|
||||
|
||||
if (peer && !rxrpc_get_peer_maybe(peer))
|
||||
peer = NULL;
|
||||
if (!peer) {
|
||||
rcu_read_unlock();
|
||||
return;
|
||||
}
|
||||
|
||||
memset(&ee, 0, sizeof(ee));
|
||||
|
||||
switch (version) {
|
||||
case IPVERSION:
|
||||
switch (type) {
|
||||
case ICMP_DEST_UNREACH:
|
||||
switch (code) {
|
||||
case ICMP_FRAG_NEEDED:
|
||||
rxrpc_adjust_mtu(peer, info);
|
||||
rcu_read_unlock();
|
||||
rxrpc_put_peer(peer);
|
||||
return;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
err = EHOSTUNREACH;
|
||||
if (code <= NR_ICMP_UNREACH) {
|
||||
/* Might want to do something different with
|
||||
* non-fatal errors
|
||||
*/
|
||||
//harderr = icmp_err_convert[code].fatal;
|
||||
err = icmp_err_convert[code].errno;
|
||||
}
|
||||
break;
|
||||
|
||||
case ICMP_TIME_EXCEEDED:
|
||||
err = EHOSTUNREACH;
|
||||
break;
|
||||
default:
|
||||
err = EPROTO;
|
||||
break;
|
||||
}
|
||||
|
||||
ee.ee_origin = SO_EE_ORIGIN_ICMP;
|
||||
ee.ee_type = type;
|
||||
ee.ee_code = code;
|
||||
ee.ee_errno = err;
|
||||
break;
|
||||
|
||||
#ifdef CONFIG_AF_RXRPC_IPV6
|
||||
case 6:
|
||||
switch (type) {
|
||||
case ICMPV6_PKT_TOOBIG:
|
||||
rxrpc_adjust_mtu(peer, info);
|
||||
rcu_read_unlock();
|
||||
rxrpc_put_peer(peer);
|
||||
return;
|
||||
}
|
||||
|
||||
icmpv6_err_convert(type, code, &err);
|
||||
|
||||
if (err == EACCES)
|
||||
err = EHOSTUNREACH;
|
||||
|
||||
ee.ee_origin = SO_EE_ORIGIN_ICMP6;
|
||||
ee.ee_type = type;
|
||||
ee.ee_code = code;
|
||||
ee.ee_errno = err;
|
||||
break;
|
||||
#endif
|
||||
}
|
||||
|
||||
trace_rxrpc_rx_icmp(peer, &ee, &srx);
|
||||
|
||||
rxrpc_distribute_error(peer, err, RXRPC_CALL_NETWORK_ERROR);
|
||||
rcu_read_unlock();
|
||||
rxrpc_put_peer(peer);
|
||||
}
|
||||
|
||||
/*
|
||||
* Find the peer associated with a local error.
|
||||
*/
|
||||
static struct rxrpc_peer *rxrpc_lookup_peer_local_rcu(struct rxrpc_local *local,
|
||||
const struct sk_buff *skb,
|
||||
struct sockaddr_rxrpc *srx)
|
||||
{
|
||||
struct sock_exterr_skb *serr = SKB_EXT_ERR(skb);
|
||||
|
||||
|
@ -38,9 +283,6 @@ static struct rxrpc_peer *rxrpc_lookup_peer_icmp_rcu(struct rxrpc_local *local,
|
|||
srx->transport_len = local->srx.transport_len;
|
||||
srx->transport.family = local->srx.transport.family;
|
||||
|
||||
/* Can we see an ICMP4 packet on an ICMP6 listening socket? and vice
|
||||
* versa?
|
||||
*/
|
||||
switch (srx->transport.family) {
|
||||
case AF_INET:
|
||||
srx->transport_len = sizeof(srx->transport.sin);
|
||||
|
@ -104,10 +346,8 @@ static struct rxrpc_peer *rxrpc_lookup_peer_icmp_rcu(struct rxrpc_local *local,
|
|||
/*
|
||||
* Handle an MTU/fragmentation problem.
|
||||
*/
|
||||
static void rxrpc_adjust_mtu(struct rxrpc_peer *peer, struct sock_exterr_skb *serr)
|
||||
static void rxrpc_adjust_mtu(struct rxrpc_peer *peer, unsigned int mtu)
|
||||
{
|
||||
u32 mtu = serr->ee.ee_info;
|
||||
|
||||
_net("Rx ICMP Fragmentation Needed (%d)", mtu);
|
||||
|
||||
/* wind down the local interface MTU */
|
||||
|
@ -148,7 +388,7 @@ void rxrpc_error_report(struct sock *sk)
|
|||
struct sock_exterr_skb *serr;
|
||||
struct sockaddr_rxrpc srx;
|
||||
struct rxrpc_local *local;
|
||||
struct rxrpc_peer *peer;
|
||||
struct rxrpc_peer *peer = NULL;
|
||||
struct sk_buff *skb;
|
||||
|
||||
rcu_read_lock();
|
||||
|
@ -172,41 +412,20 @@ void rxrpc_error_report(struct sock *sk)
|
|||
}
|
||||
rxrpc_new_skb(skb, rxrpc_skb_received);
|
||||
serr = SKB_EXT_ERR(skb);
|
||||
if (!skb->len && serr->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) {
|
||||
_leave("UDP empty message");
|
||||
rcu_read_unlock();
|
||||
rxrpc_free_skb(skb, rxrpc_skb_freed);
|
||||
return;
|
||||
|
||||
if (serr->ee.ee_origin == SO_EE_ORIGIN_LOCAL) {
|
||||
peer = rxrpc_lookup_peer_local_rcu(local, skb, &srx);
|
||||
if (peer && !rxrpc_get_peer_maybe(peer))
|
||||
peer = NULL;
|
||||
if (peer) {
|
||||
trace_rxrpc_rx_icmp(peer, &serr->ee, &srx);
|
||||
rxrpc_store_error(peer, serr);
|
||||
}
|
||||
}
|
||||
|
||||
peer = rxrpc_lookup_peer_icmp_rcu(local, skb, &srx);
|
||||
if (peer && !rxrpc_get_peer_maybe(peer))
|
||||
peer = NULL;
|
||||
if (!peer) {
|
||||
rcu_read_unlock();
|
||||
rxrpc_free_skb(skb, rxrpc_skb_freed);
|
||||
_leave(" [no peer]");
|
||||
return;
|
||||
}
|
||||
|
||||
trace_rxrpc_rx_icmp(peer, &serr->ee, &srx);
|
||||
|
||||
if ((serr->ee.ee_origin == SO_EE_ORIGIN_ICMP &&
|
||||
serr->ee.ee_type == ICMP_DEST_UNREACH &&
|
||||
serr->ee.ee_code == ICMP_FRAG_NEEDED)) {
|
||||
rxrpc_adjust_mtu(peer, serr);
|
||||
rcu_read_unlock();
|
||||
rxrpc_free_skb(skb, rxrpc_skb_freed);
|
||||
rxrpc_put_peer(peer);
|
||||
_leave(" [MTU update]");
|
||||
return;
|
||||
}
|
||||
|
||||
rxrpc_store_error(peer, serr);
|
||||
rcu_read_unlock();
|
||||
rxrpc_free_skb(skb, rxrpc_skb_freed);
|
||||
rxrpc_put_peer(peer);
|
||||
|
||||
_leave("");
|
||||
}
|
||||
|
||||
|
|
|
@ -771,46 +771,3 @@ call_complete:
|
|||
goto out;
|
||||
}
|
||||
EXPORT_SYMBOL(rxrpc_kernel_recv_data);
|
||||
|
||||
/**
|
||||
* rxrpc_kernel_get_reply_time - Get timestamp on first reply packet
|
||||
* @sock: The socket that the call exists on
|
||||
* @call: The call to query
|
||||
* @_ts: Where to put the timestamp
|
||||
*
|
||||
* Retrieve the timestamp from the first DATA packet of the reply if it is
|
||||
* in the ring. Returns true if successful, false if not.
|
||||
*/
|
||||
bool rxrpc_kernel_get_reply_time(struct socket *sock, struct rxrpc_call *call,
|
||||
ktime_t *_ts)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
rxrpc_seq_t hard_ack, top, seq;
|
||||
bool success = false;
|
||||
|
||||
mutex_lock(&call->user_mutex);
|
||||
|
||||
if (READ_ONCE(call->state) != RXRPC_CALL_CLIENT_RECV_REPLY)
|
||||
goto out;
|
||||
|
||||
hard_ack = call->rx_hard_ack;
|
||||
if (hard_ack != 0)
|
||||
goto out;
|
||||
|
||||
seq = hard_ack + 1;
|
||||
top = smp_load_acquire(&call->rx_top);
|
||||
if (after(seq, top))
|
||||
goto out;
|
||||
|
||||
skb = call->rxtx_buffer[seq & RXRPC_RXTX_BUFF_MASK];
|
||||
if (!skb)
|
||||
goto out;
|
||||
|
||||
*_ts = skb_get_ktime(skb);
|
||||
success = true;
|
||||
|
||||
out:
|
||||
mutex_unlock(&call->user_mutex);
|
||||
return success;
|
||||
}
|
||||
EXPORT_SYMBOL(rxrpc_kernel_get_reply_time);
|
||||
|
|
|
@ -540,7 +540,7 @@ static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb,
|
|||
* directly into the target buffer.
|
||||
*/
|
||||
sg = _sg;
|
||||
nsg = skb_shinfo(skb)->nr_frags;
|
||||
nsg = skb_shinfo(skb)->nr_frags + 1;
|
||||
if (nsg <= 4) {
|
||||
nsg = 4;
|
||||
} else {
|
||||
|
|
|
@ -135,15 +135,15 @@ static void increment_one_qlen(u32 sfbhash, u32 slot, struct sfb_sched_data *q)
|
|||
}
|
||||
}
|
||||
|
||||
static void increment_qlen(const struct sk_buff *skb, struct sfb_sched_data *q)
|
||||
static void increment_qlen(const struct sfb_skb_cb *cb, struct sfb_sched_data *q)
|
||||
{
|
||||
u32 sfbhash;
|
||||
|
||||
sfbhash = sfb_hash(skb, 0);
|
||||
sfbhash = cb->hashes[0];
|
||||
if (sfbhash)
|
||||
increment_one_qlen(sfbhash, 0, q);
|
||||
|
||||
sfbhash = sfb_hash(skb, 1);
|
||||
sfbhash = cb->hashes[1];
|
||||
if (sfbhash)
|
||||
increment_one_qlen(sfbhash, 1, q);
|
||||
}
|
||||
|
@ -281,8 +281,10 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
|||
{
|
||||
|
||||
struct sfb_sched_data *q = qdisc_priv(sch);
|
||||
unsigned int len = qdisc_pkt_len(skb);
|
||||
struct Qdisc *child = q->qdisc;
|
||||
struct tcf_proto *fl;
|
||||
struct sfb_skb_cb cb;
|
||||
int i;
|
||||
u32 p_min = ~0;
|
||||
u32 minqlen = ~0;
|
||||
|
@ -399,11 +401,12 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
|||
}
|
||||
|
||||
enqueue:
|
||||
memcpy(&cb, sfb_skb_cb(skb), sizeof(cb));
|
||||
ret = qdisc_enqueue(skb, child, to_free);
|
||||
if (likely(ret == NET_XMIT_SUCCESS)) {
|
||||
qdisc_qstats_backlog_inc(sch, skb);
|
||||
sch->qstats.backlog += len;
|
||||
sch->q.qlen++;
|
||||
increment_qlen(skb, q);
|
||||
increment_qlen(&cb, q);
|
||||
} else if (net_xmit_drop_count(ret)) {
|
||||
q->stats.childdrop++;
|
||||
qdisc_qstats_drop(sch);
|
||||
|
|
|
@ -757,6 +757,7 @@ int smcr_link_init(struct smc_link_group *lgr, struct smc_link *lnk,
|
|||
lnk->lgr = lgr;
|
||||
smc_lgr_hold(lgr); /* lgr_put in smcr_link_clear() */
|
||||
lnk->link_idx = link_idx;
|
||||
lnk->wr_rx_id_compl = 0;
|
||||
smc_ibdev_cnt_inc(lnk);
|
||||
smcr_copy_dev_info_to_link(lnk);
|
||||
atomic_set(&lnk->conn_cnt, 0);
|
||||
|
|
|
@ -115,8 +115,10 @@ struct smc_link {
|
|||
dma_addr_t wr_rx_dma_addr; /* DMA address of wr_rx_bufs */
|
||||
dma_addr_t wr_rx_v2_dma_addr; /* DMA address of v2 rx buf*/
|
||||
u64 wr_rx_id; /* seq # of last recv WR */
|
||||
u64 wr_rx_id_compl; /* seq # of last completed WR */
|
||||
u32 wr_rx_cnt; /* number of WR recv buffers */
|
||||
unsigned long wr_rx_tstamp; /* jiffies when last buf rx */
|
||||
wait_queue_head_t wr_rx_empty_wait; /* wait for RQ empty */
|
||||
|
||||
struct ib_reg_wr wr_reg; /* WR register memory region */
|
||||
wait_queue_head_t wr_reg_wait; /* wait for wr_reg result */
|
||||
|
|
|
@ -454,6 +454,7 @@ static inline void smc_wr_rx_process_cqes(struct ib_wc wc[], int num)
|
|||
|
||||
for (i = 0; i < num; i++) {
|
||||
link = wc[i].qp->qp_context;
|
||||
link->wr_rx_id_compl = wc[i].wr_id;
|
||||
if (wc[i].status == IB_WC_SUCCESS) {
|
||||
link->wr_rx_tstamp = jiffies;
|
||||
smc_wr_rx_demultiplex(&wc[i]);
|
||||
|
@ -465,6 +466,8 @@ static inline void smc_wr_rx_process_cqes(struct ib_wc wc[], int num)
|
|||
case IB_WC_RNR_RETRY_EXC_ERR:
|
||||
case IB_WC_WR_FLUSH_ERR:
|
||||
smcr_link_down_cond_sched(link);
|
||||
if (link->wr_rx_id_compl == link->wr_rx_id)
|
||||
wake_up(&link->wr_rx_empty_wait);
|
||||
break;
|
||||
default:
|
||||
smc_wr_rx_post(link); /* refill WR RX */
|
||||
|
@ -639,6 +642,7 @@ void smc_wr_free_link(struct smc_link *lnk)
|
|||
return;
|
||||
ibdev = lnk->smcibdev->ibdev;
|
||||
|
||||
smc_wr_drain_cq(lnk);
|
||||
smc_wr_wakeup_reg_wait(lnk);
|
||||
smc_wr_wakeup_tx_wait(lnk);
|
||||
|
||||
|
@ -889,6 +893,7 @@ int smc_wr_create_link(struct smc_link *lnk)
|
|||
atomic_set(&lnk->wr_tx_refcnt, 0);
|
||||
init_waitqueue_head(&lnk->wr_reg_wait);
|
||||
atomic_set(&lnk->wr_reg_refcnt, 0);
|
||||
init_waitqueue_head(&lnk->wr_rx_empty_wait);
|
||||
return rc;
|
||||
|
||||
dma_unmap:
|
||||
|
|
|
@ -73,6 +73,11 @@ static inline void smc_wr_tx_link_put(struct smc_link *link)
|
|||
wake_up_all(&link->wr_tx_wait);
|
||||
}
|
||||
|
||||
static inline void smc_wr_drain_cq(struct smc_link *lnk)
|
||||
{
|
||||
wait_event(lnk->wr_rx_empty_wait, lnk->wr_rx_id_compl == lnk->wr_rx_id);
|
||||
}
|
||||
|
||||
static inline void smc_wr_wakeup_tx_wait(struct smc_link *lnk)
|
||||
{
|
||||
wake_up_all(&lnk->wr_tx_wait);
|
||||
|
|
|
@ -160,7 +160,7 @@ static void map_set(u64 *up_map, int i, unsigned int v)
|
|||
|
||||
static int map_get(u64 up_map, int i)
|
||||
{
|
||||
return (up_map & (1 << i)) >> i;
|
||||
return (up_map & (1ULL << i)) >> i;
|
||||
}
|
||||
|
||||
static struct tipc_peer *peer_prev(struct tipc_peer *peer)
|
||||
|
|
|
@ -136,7 +136,7 @@ static int ccmp_init_iv_and_aad(const struct ieee80211_hdr *hdr,
|
|||
pos = (u8 *) hdr;
|
||||
aad[0] = pos[0] & 0x8f;
|
||||
aad[1] = pos[1] & 0xc7;
|
||||
memcpy(aad + 2, hdr->addr1, 3 * ETH_ALEN);
|
||||
memcpy(aad + 2, &hdr->addrs, 3 * ETH_ALEN);
|
||||
pos = (u8 *) & hdr->seq_ctrl;
|
||||
aad[20] = pos[0] & 0x0f;
|
||||
aad[21] = 0; /* all bits masked */
|
||||
|
|
|
@ -102,26 +102,42 @@ check_for_helper()
|
|||
|
||||
ip netns exec ${netns} conntrack -L -f $family -p tcp --dport $port 2> /dev/null |grep -q 'helper=ftp'
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "FAIL: ${netns} did not show attached helper $message" 1>&2
|
||||
ret=1
|
||||
if [ $autoassign -eq 0 ] ;then
|
||||
echo "FAIL: ${netns} did not show attached helper $message" 1>&2
|
||||
ret=1
|
||||
else
|
||||
echo "PASS: ${netns} did not show attached helper $message" 1>&2
|
||||
fi
|
||||
else
|
||||
if [ $autoassign -eq 0 ] ;then
|
||||
echo "PASS: ${netns} connection on port $port has ftp helper attached" 1>&2
|
||||
else
|
||||
echo "FAIL: ${netns} connection on port $port has ftp helper attached" 1>&2
|
||||
ret=1
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "PASS: ${netns} connection on port $port has ftp helper attached" 1>&2
|
||||
return 0
|
||||
}
|
||||
|
||||
test_helper()
|
||||
{
|
||||
local port=$1
|
||||
local msg=$2
|
||||
local autoassign=$2
|
||||
|
||||
if [ $autoassign -eq 0 ] ;then
|
||||
msg="set via ruleset"
|
||||
else
|
||||
msg="auto-assign"
|
||||
fi
|
||||
|
||||
sleep 3 | ip netns exec ${ns2} nc -w 2 -l -p $port > /dev/null &
|
||||
|
||||
sleep 1 | ip netns exec ${ns1} nc -w 2 10.0.1.2 $port > /dev/null &
|
||||
sleep 1
|
||||
|
||||
check_for_helper "$ns1" "ip $msg" $port
|
||||
check_for_helper "$ns2" "ip $msg" $port
|
||||
check_for_helper "$ns1" "ip $msg" $port $autoassign
|
||||
check_for_helper "$ns2" "ip $msg" $port $autoassign
|
||||
|
||||
wait
|
||||
|
||||
|
@ -173,9 +189,9 @@ if [ $? -ne 0 ];then
|
|||
fi
|
||||
fi
|
||||
|
||||
test_helper 2121 "set via ruleset"
|
||||
ip netns exec ${ns1} sysctl -q 'net.netfilter.nf_conntrack_helper=1'
|
||||
ip netns exec ${ns2} sysctl -q 'net.netfilter.nf_conntrack_helper=1'
|
||||
test_helper 21 "auto-assign"
|
||||
test_helper 2121 0
|
||||
ip netns exec ${ns1} sysctl -qe 'net.netfilter.nf_conntrack_helper=1'
|
||||
ip netns exec ${ns2} sysctl -qe 'net.netfilter.nf_conntrack_helper=1'
|
||||
test_helper 21 1
|
||||
|
||||
exit $ret
|
||||
|
|
Loading…
Reference in New Issue