Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Pull networking fixes from David Miller:

 - Fix the build with certain Kconfig combinations for the Chelsio
   inline TLS device, from Rohit Maheshwar and Vinay Kumar Yadavi.

 - Fix leak in genetlink, from Cong Lang.

 - Fix out of bounds packet header accesses in seg6, from Ahmed
   Abdelsalam.

 - Two XDP fixes in the ENA driver, from Sameeh Jubran

 - Use rwsem in device rename instead of a seqcount because this code
   can sleep, from Ahmed S. Darwish.

 - Fix WoL regressions in r8169, from Heiner Kallweit.

 - Fix qed crashes in kdump mode, from Alok Prasad.

 - Fix the callbacks used for certain thermal zones in mlxsw, from Vadim
   Pasternak.

* git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (35 commits)
  net: dsa: lantiq_gswip: fix and improve the unsupported interface error
  mlxsw: core: Use different get_trend() callbacks for different thermal zones
  net: dp83869: Reset return variable if PHY strap is read
  rhashtable: Drop raw RCU deref in nested_table_free
  cxgb4: Use kfree() instead kvfree() where appropriate
  net: qed: fixes crash while running driver in kdump kernel
  vsock/vmci: make vmci_vsock_transport_cb() static
  net: ethtool: Fix comment mentioning typo in IS_ENABLED()
  net: phy: mscc: fix Serdes configuration in vsc8584_config_init
  net: mscc: Fix OF_MDIO config check
  net: marvell: Fix OF_MDIO config check
  net: dp83867: Fix OF_MDIO config check
  net: dp83869: Fix OF_MDIO config check
  net: ethernet: mvneta: fix MVNETA_SKB_HEADROOM alignment
  ethtool: linkinfo: remove an unnecessary NULL check
  net/xdp: use shift instead of 64 bit division
  crypto/chtls:Fix compile error when CONFIG_IPV6 is disabled
  inet_connection_sock: clear inet_num out of destroy helper
  yam: fix possible memory leak in yam_init_driver
  lan743x: Use correct MAC_CR configuration for 1 GBit speed
  ...
This commit is contained in:
Linus Torvalds 2020-06-07 17:27:45 -07:00
commit af7b480103
43 changed files with 249 additions and 180 deletions

View File

@ -389,10 +389,6 @@ static inline void copy_hash_init_values(char *key, int digestsize)
} }
} }
static const u8 sgl_lengths[20] = {
0, 1, 2, 3, 4, 4, 5, 6, 7, 7, 8, 9, 10, 10, 11, 12, 13, 13, 14, 15
};
/* Number of len fields(8) * size of one addr field */ /* Number of len fields(8) * size of one addr field */
#define PHYSDSGL_MAX_LEN_SIZE 16 #define PHYSDSGL_MAX_LEN_SIZE 16

View File

@ -93,8 +93,10 @@ static struct net_device *chtls_find_netdev(struct chtls_dev *cdev,
struct sock *sk) struct sock *sk)
{ {
struct net_device *ndev = cdev->ports[0]; struct net_device *ndev = cdev->ports[0];
#if IS_ENABLED(CONFIG_IPV6)
struct net_device *temp; struct net_device *temp;
int addr_type; int addr_type;
#endif
switch (sk->sk_family) { switch (sk->sk_family) {
case PF_INET: case PF_INET:
@ -102,19 +104,21 @@ static struct net_device *chtls_find_netdev(struct chtls_dev *cdev,
return ndev; return ndev;
ndev = ip_dev_find(&init_net, inet_sk(sk)->inet_rcv_saddr); ndev = ip_dev_find(&init_net, inet_sk(sk)->inet_rcv_saddr);
break; break;
#if IS_ENABLED(CONFIG_IPV6)
case PF_INET6: case PF_INET6:
addr_type = ipv6_addr_type(&sk->sk_v6_rcv_saddr); addr_type = ipv6_addr_type(&sk->sk_v6_rcv_saddr);
if (likely(addr_type == IPV6_ADDR_ANY)) if (likely(addr_type == IPV6_ADDR_ANY))
return ndev; return ndev;
for_each_netdev_rcu(&init_net, temp) { for_each_netdev_rcu(&init_net, temp) {
if (ipv6_chk_addr(&init_net, (struct in6_addr *) if (ipv6_chk_addr(&init_net, (struct in6_addr *)
&sk->sk_v6_rcv_saddr, temp, 1)) { &sk->sk_v6_rcv_saddr, temp, 1)) {
ndev = temp; ndev = temp;
break; break;
}
} }
}
break; break;
#endif
default: default:
return NULL; return NULL;
} }
@ -476,8 +480,10 @@ void chtls_destroy_sock(struct sock *sk)
csk->cdev = NULL; csk->cdev = NULL;
if (sk->sk_family == AF_INET) if (sk->sk_family == AF_INET)
sk->sk_prot = &tcp_prot; sk->sk_prot = &tcp_prot;
#if IS_ENABLED(CONFIG_IPV6)
else else
sk->sk_prot = &tcpv6_prot; sk->sk_prot = &tcpv6_prot;
#endif
sk->sk_prot->destroy(sk); sk->sk_prot->destroy(sk);
} }
@ -629,14 +635,15 @@ static void chtls_reset_synq(struct listen_ctx *listen_ctx)
int chtls_listen_start(struct chtls_dev *cdev, struct sock *sk) int chtls_listen_start(struct chtls_dev *cdev, struct sock *sk)
{ {
struct net_device *ndev; struct net_device *ndev;
#if IS_ENABLED(CONFIG_IPV6)
bool clip_valid = false;
#endif
struct listen_ctx *ctx; struct listen_ctx *ctx;
struct adapter *adap; struct adapter *adap;
struct port_info *pi; struct port_info *pi;
bool clip_valid; int ret = 0;
int stid; int stid;
int ret;
clip_valid = false;
rcu_read_lock(); rcu_read_lock();
ndev = chtls_find_netdev(cdev, sk); ndev = chtls_find_netdev(cdev, sk);
rcu_read_unlock(); rcu_read_unlock();
@ -674,6 +681,7 @@ int chtls_listen_start(struct chtls_dev *cdev, struct sock *sk)
inet_sk(sk)->inet_rcv_saddr, inet_sk(sk)->inet_rcv_saddr,
inet_sk(sk)->inet_sport, 0, inet_sk(sk)->inet_sport, 0,
cdev->lldi->rxq_ids[0]); cdev->lldi->rxq_ids[0]);
#if IS_ENABLED(CONFIG_IPV6)
} else { } else {
int addr_type; int addr_type;
@ -689,6 +697,7 @@ int chtls_listen_start(struct chtls_dev *cdev, struct sock *sk)
&sk->sk_v6_rcv_saddr, &sk->sk_v6_rcv_saddr,
inet_sk(sk)->inet_sport, inet_sk(sk)->inet_sport,
cdev->lldi->rxq_ids[0]); cdev->lldi->rxq_ids[0]);
#endif
} }
if (ret > 0) if (ret > 0)
ret = net_xmit_errno(ret); ret = net_xmit_errno(ret);
@ -696,8 +705,10 @@ int chtls_listen_start(struct chtls_dev *cdev, struct sock *sk)
goto del_hash; goto del_hash;
return 0; return 0;
del_hash: del_hash:
#if IS_ENABLED(CONFIG_IPV6)
if (clip_valid) if (clip_valid)
cxgb4_clip_release(ndev, (const u32 *)&sk->sk_v6_rcv_saddr, 1); cxgb4_clip_release(ndev, (const u32 *)&sk->sk_v6_rcv_saddr, 1);
#endif
listen_hash_del(cdev, sk); listen_hash_del(cdev, sk);
free_stid: free_stid:
cxgb4_free_stid(cdev->tids, stid, sk->sk_family); cxgb4_free_stid(cdev->tids, stid, sk->sk_family);
@ -711,8 +722,6 @@ free_ctx:
void chtls_listen_stop(struct chtls_dev *cdev, struct sock *sk) void chtls_listen_stop(struct chtls_dev *cdev, struct sock *sk)
{ {
struct listen_ctx *listen_ctx; struct listen_ctx *listen_ctx;
struct chtls_sock *csk;
int addr_type = 0;
int stid; int stid;
stid = listen_hash_del(cdev, sk); stid = listen_hash_del(cdev, sk);
@ -725,7 +734,11 @@ void chtls_listen_stop(struct chtls_dev *cdev, struct sock *sk)
cxgb4_remove_server(cdev->lldi->ports[0], stid, cxgb4_remove_server(cdev->lldi->ports[0], stid,
cdev->lldi->rxq_ids[0], sk->sk_family == PF_INET6); cdev->lldi->rxq_ids[0], sk->sk_family == PF_INET6);
#if IS_ENABLED(CONFIG_IPV6)
if (sk->sk_family == PF_INET6) { if (sk->sk_family == PF_INET6) {
struct chtls_sock *csk;
int addr_type = 0;
csk = rcu_dereference_sk_user_data(sk); csk = rcu_dereference_sk_user_data(sk);
addr_type = ipv6_addr_type((const struct in6_addr *) addr_type = ipv6_addr_type((const struct in6_addr *)
&sk->sk_v6_rcv_saddr); &sk->sk_v6_rcv_saddr);
@ -733,6 +746,7 @@ void chtls_listen_stop(struct chtls_dev *cdev, struct sock *sk)
cxgb4_clip_release(csk->egress_dev, (const u32 *) cxgb4_clip_release(csk->egress_dev, (const u32 *)
&sk->sk_v6_rcv_saddr, 1); &sk->sk_v6_rcv_saddr, 1);
} }
#endif
chtls_disconnect_acceptq(sk); chtls_disconnect_acceptq(sk);
} }
@ -941,9 +955,11 @@ static unsigned int chtls_select_mss(const struct chtls_sock *csk,
tp = tcp_sk(sk); tp = tcp_sk(sk);
tcpoptsz = 0; tcpoptsz = 0;
#if IS_ENABLED(CONFIG_IPV6)
if (sk->sk_family == AF_INET6) if (sk->sk_family == AF_INET6)
iphdrsz = sizeof(struct ipv6hdr) + sizeof(struct tcphdr); iphdrsz = sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
else else
#endif
iphdrsz = sizeof(struct iphdr) + sizeof(struct tcphdr); iphdrsz = sizeof(struct iphdr) + sizeof(struct tcphdr);
if (req->tcpopt.tstamp) if (req->tcpopt.tstamp)
tcpoptsz += round_up(TCPOLEN_TIMESTAMP, 4); tcpoptsz += round_up(TCPOLEN_TIMESTAMP, 4);
@ -1091,13 +1107,13 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
const struct cpl_pass_accept_req *req, const struct cpl_pass_accept_req *req,
struct chtls_dev *cdev) struct chtls_dev *cdev)
{ {
struct neighbour *n = NULL;
struct inet_sock *newinet; struct inet_sock *newinet;
const struct iphdr *iph; const struct iphdr *iph;
struct tls_context *ctx; struct tls_context *ctx;
struct net_device *ndev; struct net_device *ndev;
struct chtls_sock *csk; struct chtls_sock *csk;
struct dst_entry *dst; struct dst_entry *dst;
struct neighbour *n;
struct tcp_sock *tp; struct tcp_sock *tp;
struct sock *newsk; struct sock *newsk;
u16 port_id; u16 port_id;
@ -1115,6 +1131,7 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
goto free_sk; goto free_sk;
n = dst_neigh_lookup(dst, &iph->saddr); n = dst_neigh_lookup(dst, &iph->saddr);
#if IS_ENABLED(CONFIG_IPV6)
} else { } else {
const struct ipv6hdr *ip6h; const struct ipv6hdr *ip6h;
struct flowi6 fl6; struct flowi6 fl6;
@ -1131,6 +1148,7 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
if (IS_ERR(dst)) if (IS_ERR(dst))
goto free_sk; goto free_sk;
n = dst_neigh_lookup(dst, &ip6h->saddr); n = dst_neigh_lookup(dst, &ip6h->saddr);
#endif
} }
if (!n) if (!n)
goto free_sk; goto free_sk;
@ -1158,6 +1176,7 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
newinet->inet_daddr = iph->saddr; newinet->inet_daddr = iph->saddr;
newinet->inet_rcv_saddr = iph->daddr; newinet->inet_rcv_saddr = iph->daddr;
newinet->inet_saddr = iph->daddr; newinet->inet_saddr = iph->daddr;
#if IS_ENABLED(CONFIG_IPV6)
} else { } else {
struct tcp6_sock *newtcp6sk = (struct tcp6_sock *)newsk; struct tcp6_sock *newtcp6sk = (struct tcp6_sock *)newsk;
struct inet_request_sock *treq = inet_rsk(oreq); struct inet_request_sock *treq = inet_rsk(oreq);
@ -1175,6 +1194,7 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
newinet->inet_opt = NULL; newinet->inet_opt = NULL;
newinet->inet_daddr = LOOPBACK4_IPV6; newinet->inet_daddr = LOOPBACK4_IPV6;
newinet->inet_saddr = LOOPBACK4_IPV6; newinet->inet_saddr = LOOPBACK4_IPV6;
#endif
} }
oreq->ts_recent = PASS_OPEN_TID_G(ntohl(req->tos_stid)); oreq->ts_recent = PASS_OPEN_TID_G(ntohl(req->tos_stid));
@ -1337,10 +1357,12 @@ static void chtls_pass_accept_request(struct sock *sk,
if (iph->version == 0x4) { if (iph->version == 0x4) {
chtls_set_req_addr(oreq, iph->daddr, iph->saddr); chtls_set_req_addr(oreq, iph->daddr, iph->saddr);
ip_dsfield = ipv4_get_dsfield(iph); ip_dsfield = ipv4_get_dsfield(iph);
#if IS_ENABLED(CONFIG_IPV6)
} else { } else {
inet_rsk(oreq)->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr; inet_rsk(oreq)->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
inet_rsk(oreq)->ir_v6_loc_addr = ipv6_hdr(skb)->daddr; inet_rsk(oreq)->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
ip_dsfield = ipv6_get_dsfield(ipv6_hdr(skb)); ip_dsfield = ipv6_get_dsfield(ipv6_hdr(skb));
#endif
} }
if (req->tcpopt.wsf <= 14 && if (req->tcpopt.wsf <= 14 &&
sock_net(sk)->ipv4.sysctl_tcp_window_scaling) { sock_net(sk)->ipv4.sysctl_tcp_window_scaling) {

View File

@ -608,9 +608,11 @@ static void __init chtls_init_ulp_ops(void)
chtls_cpl_prot.recvmsg = chtls_recvmsg; chtls_cpl_prot.recvmsg = chtls_recvmsg;
chtls_cpl_prot.setsockopt = chtls_setsockopt; chtls_cpl_prot.setsockopt = chtls_setsockopt;
chtls_cpl_prot.getsockopt = chtls_getsockopt; chtls_cpl_prot.getsockopt = chtls_getsockopt;
#if IS_ENABLED(CONFIG_IPV6)
chtls_cpl_protv6 = chtls_cpl_prot; chtls_cpl_protv6 = chtls_cpl_prot;
chtls_init_rsk_ops(&chtls_cpl_protv6, &chtls_rsk_opsv6, chtls_init_rsk_ops(&chtls_cpl_protv6, &chtls_rsk_opsv6,
&tcpv6_prot, PF_INET6); &tcpv6_prot, PF_INET6);
#endif
} }
static int __init chtls_register(void) static int __init chtls_register(void)

View File

@ -1452,7 +1452,8 @@ static void gswip_phylink_validate(struct dsa_switch *ds, int port,
unsupported: unsupported:
bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
dev_err(ds->dev, "Unsupported interface: %d\n", state->interface); dev_err(ds->dev, "Unsupported interface '%s' for port %d\n",
phy_modes(state->interface), port);
return; return;
} }

View File

@ -1079,8 +1079,7 @@ qca8k_sw_probe(struct mdio_device *mdiodev)
if (id != QCA8K_ID_QCA8337) if (id != QCA8K_ID_QCA8337)
return -ENODEV; return -ENODEV;
priv->ds = devm_kzalloc(&mdiodev->dev, sizeof(*priv->ds), priv->ds = devm_kzalloc(&mdiodev->dev, sizeof(*priv->ds), GFP_KERNEL);
QCA8K_NUM_PORTS);
if (!priv->ds) if (!priv->ds)
return -ENOMEM; return -ENOMEM;

View File

@ -355,7 +355,7 @@ error_unmap_dma:
ena_unmap_tx_buff(xdp_ring, tx_info); ena_unmap_tx_buff(xdp_ring, tx_info);
tx_info->xdpf = NULL; tx_info->xdpf = NULL;
error_drop_packet: error_drop_packet:
__free_page(tx_info->xdp_rx_page);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
@ -1646,11 +1646,9 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
&next_to_clean); &next_to_clean);
if (unlikely(!skb)) { if (unlikely(!skb)) {
if (xdp_verdict == XDP_TX) { if (xdp_verdict == XDP_TX)
ena_free_rx_page(rx_ring, ena_free_rx_page(rx_ring,
&rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id]); &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id]);
res_budget--;
}
for (i = 0; i < ena_rx_ctx.descs; i++) { for (i = 0; i < ena_rx_ctx.descs; i++) {
rx_ring->free_ids[next_to_clean] = rx_ring->free_ids[next_to_clean] =
rx_ring->ena_bufs[i].req_id; rx_ring->ena_bufs[i].req_id;
@ -1658,8 +1656,10 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
ENA_RX_RING_IDX_NEXT(next_to_clean, ENA_RX_RING_IDX_NEXT(next_to_clean,
rx_ring->ring_size); rx_ring->ring_size);
} }
if (xdp_verdict == XDP_TX || xdp_verdict == XDP_DROP) if (xdp_verdict != XDP_PASS) {
res_budget--;
continue; continue;
}
break; break;
} }

View File

@ -3357,7 +3357,7 @@ static ssize_t blocked_fl_read(struct file *filp, char __user *ubuf,
adap->sge.egr_sz, adap->sge.blocked_fl); adap->sge.egr_sz, adap->sge.blocked_fl);
len += sprintf(buf + len, "\n"); len += sprintf(buf + len, "\n");
size = simple_read_from_buffer(ubuf, count, ppos, buf, len); size = simple_read_from_buffer(ubuf, count, ppos, buf, len);
kvfree(buf); kfree(buf);
return size; return size;
} }
@ -3374,12 +3374,12 @@ static ssize_t blocked_fl_write(struct file *filp, const char __user *ubuf,
err = bitmap_parse_user(ubuf, count, t, adap->sge.egr_sz); err = bitmap_parse_user(ubuf, count, t, adap->sge.egr_sz);
if (err) { if (err) {
kvfree(t); kfree(t);
return err; return err;
} }
bitmap_copy(adap->sge.blocked_fl, t, adap->sge.egr_sz); bitmap_copy(adap->sge.blocked_fl, t, adap->sge.egr_sz);
kvfree(t); kfree(t);
return count; return count;
} }

View File

@ -663,6 +663,7 @@ static int uld_attach(struct adapter *adap, unsigned int uld)
return 0; return 0;
} }
#ifdef CONFIG_CHELSIO_TLS_DEVICE
static bool cxgb4_uld_in_use(struct adapter *adap) static bool cxgb4_uld_in_use(struct adapter *adap)
{ {
const struct tid_info *t = &adap->tids; const struct tid_info *t = &adap->tids;
@ -670,7 +671,6 @@ static bool cxgb4_uld_in_use(struct adapter *adap)
return (atomic_read(&t->conns_in_use) || t->stids_in_use); return (atomic_read(&t->conns_in_use) || t->stids_in_use);
} }
#ifdef CONFIG_CHELSIO_TLS_DEVICE
/* cxgb4_set_ktls_feature: request FW to enable/disable ktls settings. /* cxgb4_set_ktls_feature: request FW to enable/disable ktls settings.
* @adap: adapter info * @adap: adapter info
* @enable: 1 to enable / 0 to disable ktls settings. * @enable: 1 to enable / 0 to disable ktls settings.

View File

@ -42,7 +42,6 @@
#include <soc/fsl/qe/ucc.h> #include <soc/fsl/qe/ucc.h>
#include <soc/fsl/qe/ucc_fast.h> #include <soc/fsl/qe/ucc_fast.h>
#include <asm/machdep.h> #include <asm/machdep.h>
#include <net/sch_generic.h>
#include "ucc_geth.h" #include "ucc_geth.h"

View File

@ -325,7 +325,7 @@
cache_line_size()) cache_line_size())
/* Driver assumes that the last 3 bits are 0 */ /* Driver assumes that the last 3 bits are 0 */
#define MVNETA_SKB_HEADROOM (max(XDP_PACKET_HEADROOM, NET_SKB_PAD) & ~0x7) #define MVNETA_SKB_HEADROOM ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8)
#define MVNETA_SKB_PAD (SKB_DATA_ALIGN(sizeof(struct skb_shared_info) + \ #define MVNETA_SKB_PAD (SKB_DATA_ALIGN(sizeof(struct skb_shared_info) + \
MVNETA_SKB_HEADROOM)) MVNETA_SKB_HEADROOM))
#define MVNETA_SKB_SIZE(len) (SKB_DATA_ALIGN(len) + MVNETA_SKB_PAD) #define MVNETA_SKB_SIZE(len) (SKB_DATA_ALIGN(len) + MVNETA_SKB_PAD)

View File

@ -391,8 +391,7 @@ static int mlxsw_thermal_set_trip_hyst(struct thermal_zone_device *tzdev,
static int mlxsw_thermal_trend_get(struct thermal_zone_device *tzdev, static int mlxsw_thermal_trend_get(struct thermal_zone_device *tzdev,
int trip, enum thermal_trend *trend) int trip, enum thermal_trend *trend)
{ {
struct mlxsw_thermal_module *tz = tzdev->devdata; struct mlxsw_thermal *thermal = tzdev->devdata;
struct mlxsw_thermal *thermal = tz->parent;
if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS) if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS)
return -EINVAL; return -EINVAL;
@ -593,6 +592,22 @@ mlxsw_thermal_module_trip_hyst_set(struct thermal_zone_device *tzdev, int trip,
return 0; return 0;
} }
static int mlxsw_thermal_module_trend_get(struct thermal_zone_device *tzdev,
int trip, enum thermal_trend *trend)
{
struct mlxsw_thermal_module *tz = tzdev->devdata;
struct mlxsw_thermal *thermal = tz->parent;
if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS)
return -EINVAL;
if (tzdev == thermal->tz_highest_dev)
return 1;
*trend = THERMAL_TREND_STABLE;
return 0;
}
static struct thermal_zone_device_ops mlxsw_thermal_module_ops = { static struct thermal_zone_device_ops mlxsw_thermal_module_ops = {
.bind = mlxsw_thermal_module_bind, .bind = mlxsw_thermal_module_bind,
.unbind = mlxsw_thermal_module_unbind, .unbind = mlxsw_thermal_module_unbind,
@ -604,7 +619,7 @@ static struct thermal_zone_device_ops mlxsw_thermal_module_ops = {
.set_trip_temp = mlxsw_thermal_module_trip_temp_set, .set_trip_temp = mlxsw_thermal_module_trip_temp_set,
.get_trip_hyst = mlxsw_thermal_module_trip_hyst_get, .get_trip_hyst = mlxsw_thermal_module_trip_hyst_get,
.set_trip_hyst = mlxsw_thermal_module_trip_hyst_set, .set_trip_hyst = mlxsw_thermal_module_trip_hyst_set,
.get_trend = mlxsw_thermal_trend_get, .get_trend = mlxsw_thermal_module_trend_get,
}; };
static int mlxsw_thermal_gearbox_temp_get(struct thermal_zone_device *tzdev, static int mlxsw_thermal_gearbox_temp_get(struct thermal_zone_device *tzdev,
@ -643,7 +658,7 @@ static struct thermal_zone_device_ops mlxsw_thermal_gearbox_ops = {
.set_trip_temp = mlxsw_thermal_module_trip_temp_set, .set_trip_temp = mlxsw_thermal_module_trip_temp_set,
.get_trip_hyst = mlxsw_thermal_module_trip_hyst_get, .get_trip_hyst = mlxsw_thermal_module_trip_hyst_get,
.set_trip_hyst = mlxsw_thermal_module_trip_hyst_set, .set_trip_hyst = mlxsw_thermal_module_trip_hyst_set,
.get_trend = mlxsw_thermal_trend_get, .get_trend = mlxsw_thermal_module_trend_get,
}; };
static int mlxsw_thermal_get_max_state(struct thermal_cooling_device *cdev, static int mlxsw_thermal_get_max_state(struct thermal_cooling_device *cdev,

View File

@ -985,7 +985,7 @@ static void lan743x_phy_link_status_change(struct net_device *netdev)
break; break;
case SPEED_1000: case SPEED_1000:
data |= MAC_CR_CFG_H_; data |= MAC_CR_CFG_H_;
data |= MAC_CR_CFG_L_; data &= ~MAC_CR_CFG_L_;
break; break;
} }
lan743x_csr_write(adapter, MAC_CR, data); lan743x_csr_write(adapter, MAC_CR, data);

View File

@ -33,6 +33,7 @@
#include <linux/etherdevice.h> #include <linux/etherdevice.h>
#include <linux/crc32.h> #include <linux/crc32.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/crash_dump.h>
#include <linux/qed/qed_iov_if.h> #include <linux/qed/qed_iov_if.h>
#include "qed_cxt.h" #include "qed_cxt.h"
#include "qed_hsi.h" #include "qed_hsi.h"
@ -607,6 +608,9 @@ int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
int pos; int pos;
int rc; int rc;
if (is_kdump_kernel())
return 0;
if (IS_VF(p_hwfn->cdev)) if (IS_VF(p_hwfn->cdev))
return 0; return 0;

View File

@ -32,7 +32,6 @@
#ifndef _QED_SRIOV_H #ifndef _QED_SRIOV_H
#define _QED_SRIOV_H #define _QED_SRIOV_H
#include <linux/crash_dump.h>
#include <linux/types.h> #include <linux/types.h>
#include "qed_vf.h" #include "qed_vf.h"
@ -41,12 +40,9 @@
#define QED_VF_ARRAY_LENGTH (3) #define QED_VF_ARRAY_LENGTH (3)
#ifdef CONFIG_QED_SRIOV #ifdef CONFIG_QED_SRIOV
#define IS_VF(cdev) (is_kdump_kernel() ? \ #define IS_VF(cdev) ((cdev)->b_is_vf)
(0) : ((cdev)->b_is_vf)) #define IS_PF(cdev) (!((cdev)->b_is_vf))
#define IS_PF(cdev) (is_kdump_kernel() ? \ #define IS_PF_SRIOV(p_hwfn) (!!((p_hwfn)->cdev->p_iov_info))
(1) : !((cdev)->b_is_vf))
#define IS_PF_SRIOV(p_hwfn) (is_kdump_kernel() ? \
(0) : !!((p_hwfn)->cdev->p_iov_info))
#else #else
#define IS_VF(cdev) (0) #define IS_VF(cdev) (0)
#define IS_PF(cdev) (1) #define IS_PF(cdev) (1)

View File

@ -1265,7 +1265,7 @@ static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id)
case QEDE_PRIVATE_VF: case QEDE_PRIVATE_VF:
if (debug & QED_LOG_VERBOSE_MASK) if (debug & QED_LOG_VERBOSE_MASK)
dev_err(&pdev->dev, "Probing a VF\n"); dev_err(&pdev->dev, "Probing a VF\n");
is_vf = is_kdump_kernel() ? false : true; is_vf = true;
break; break;
default: default:
if (debug & QED_LOG_VERBOSE_MASK) if (debug & QED_LOG_VERBOSE_MASK)

View File

@ -3928,7 +3928,7 @@ static void rtl8169_tx_clear(struct rtl8169_private *tp)
netdev_reset_queue(tp->dev); netdev_reset_queue(tp->dev);
} }
static void rtl8169_hw_reset(struct rtl8169_private *tp) static void rtl8169_hw_reset(struct rtl8169_private *tp, bool going_down)
{ {
/* Give a racing hard_start_xmit a few cycles to complete. */ /* Give a racing hard_start_xmit a few cycles to complete. */
synchronize_rcu(); synchronize_rcu();
@ -3938,6 +3938,9 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
rtl_rx_close(tp); rtl_rx_close(tp);
if (going_down && tp->dev->wol_enabled)
goto no_reset;
switch (tp->mac_version) { switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_27: case RTL_GIGA_MAC_VER_27:
case RTL_GIGA_MAC_VER_28: case RTL_GIGA_MAC_VER_28:
@ -3959,7 +3962,7 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
} }
rtl_hw_reset(tp); rtl_hw_reset(tp);
no_reset:
rtl8169_tx_clear(tp); rtl8169_tx_clear(tp);
rtl8169_init_ring_indexes(tp); rtl8169_init_ring_indexes(tp);
} }
@ -3972,7 +3975,7 @@ static void rtl_reset_work(struct rtl8169_private *tp)
napi_disable(&tp->napi); napi_disable(&tp->napi);
netif_stop_queue(dev); netif_stop_queue(dev);
rtl8169_hw_reset(tp); rtl8169_hw_reset(tp, false);
for (i = 0; i < NUM_RX_DESC; i++) for (i = 0; i < NUM_RX_DESC; i++)
rtl8169_mark_to_asic(tp->RxDescArray + i); rtl8169_mark_to_asic(tp->RxDescArray + i);
@ -4637,7 +4640,7 @@ static void rtl8169_down(struct rtl8169_private *tp)
phy_stop(tp->phydev); phy_stop(tp->phydev);
napi_disable(&tp->napi); napi_disable(&tp->napi);
rtl8169_hw_reset(tp); rtl8169_hw_reset(tp, true);
rtl_pll_power_down(tp); rtl_pll_power_down(tp);
@ -4942,8 +4945,6 @@ static void rtl_shutdown(struct pci_dev *pdev)
/* Restore original MAC address */ /* Restore original MAC address */
rtl_rar_set(tp, tp->dev->perm_addr); rtl_rar_set(tp, tp->dev->perm_addr);
rtl8169_hw_reset(tp);
if (system_state == SYSTEM_POWER_OFF) { if (system_state == SYSTEM_POWER_OFF) {
if (tp->saved_wolopts) { if (tp->saved_wolopts) {
rtl_wol_suspend_quirk(tp); rtl_wol_suspend_quirk(tp);

View File

@ -225,7 +225,7 @@ static int imx_dwmac_probe(struct platform_device *pdev)
dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL); dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
if (!dwmac) if (!dwmac)
return PTR_ERR(dwmac); return -ENOMEM;
plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
if (IS_ERR(plat_dat)) if (IS_ERR(plat_dat))

View File

@ -987,9 +987,10 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
if (geneve->collect_md) { if (geneve->collect_md) {
info = skb_tunnel_info(skb); info = skb_tunnel_info(skb);
if (unlikely(!info || !(info->mode & IP_TUNNEL_INFO_TX))) { if (unlikely(!info || !(info->mode & IP_TUNNEL_INFO_TX))) {
err = -EINVAL;
netdev_dbg(dev, "no tunnel metadata\n"); netdev_dbg(dev, "no tunnel metadata\n");
goto tx_error; dev_kfree_skb(skb);
dev->stats.tx_dropped++;
return NETDEV_TX_OK;
} }
} else { } else {
info = &geneve->info; info = &geneve->info;
@ -1006,7 +1007,7 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
if (likely(!err)) if (likely(!err))
return NETDEV_TX_OK; return NETDEV_TX_OK;
tx_error:
dev_kfree_skb(skb); dev_kfree_skb(skb);
if (err == -ELOOP) if (err == -ELOOP)

View File

@ -1133,6 +1133,7 @@ static int __init yam_init_driver(void)
err = register_netdev(dev); err = register_netdev(dev);
if (err) { if (err) {
printk(KERN_WARNING "yam: cannot register net device %s\n", dev->name); printk(KERN_WARNING "yam: cannot register net device %s\n", dev->name);
free_netdev(dev);
goto error; goto error;
} }
yam_devs[i] = dev; yam_devs[i] = dev;

View File

@ -488,7 +488,7 @@ static int dp83867_verify_rgmii_cfg(struct phy_device *phydev)
return 0; return 0;
} }
#ifdef CONFIG_OF_MDIO #if IS_ENABLED(CONFIG_OF_MDIO)
static int dp83867_of_init(struct phy_device *phydev) static int dp83867_of_init(struct phy_device *phydev)
{ {
struct dp83867_private *dp83867 = phydev->priv; struct dp83867_private *dp83867 = phydev->priv;

View File

@ -176,7 +176,7 @@ static int dp83869_set_strapped_mode(struct phy_device *phydev)
return 0; return 0;
} }
#ifdef CONFIG_OF_MDIO #if IS_ENABLED(CONFIG_OF_MDIO)
static int dp83869_of_init(struct phy_device *phydev) static int dp83869_of_init(struct phy_device *phydev)
{ {
struct dp83869_private *dp83869 = phydev->priv; struct dp83869_private *dp83869 = phydev->priv;
@ -218,10 +218,13 @@ static int dp83869_of_init(struct phy_device *phydev)
ret = phy_read_mmd(phydev, DP83869_DEVADDR, DP83869_STRAP_STS1); ret = phy_read_mmd(phydev, DP83869_DEVADDR, DP83869_STRAP_STS1);
if (ret < 0) if (ret < 0)
return ret; return ret;
if (ret & DP83869_STRAP_MIRROR_ENABLED) if (ret & DP83869_STRAP_MIRROR_ENABLED)
dp83869->port_mirroring = DP83869_PORT_MIRRORING_EN; dp83869->port_mirroring = DP83869_PORT_MIRRORING_EN;
else else
dp83869->port_mirroring = DP83869_PORT_MIRRORING_DIS; dp83869->port_mirroring = DP83869_PORT_MIRRORING_DIS;
ret = 0;
} }
if (of_property_read_u32(of_node, "rx-fifo-depth", if (of_property_read_u32(of_node, "rx-fifo-depth",

View File

@ -19,7 +19,6 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/of.h> #include <linux/of.h>
#include <linux/gpio/consumer.h> #include <linux/gpio/consumer.h>
#include <linux/seqlock.h>
#include <linux/idr.h> #include <linux/idr.h>
#include <linux/netdevice.h> #include <linux/netdevice.h>
#include <linux/linkmode.h> #include <linux/linkmode.h>
@ -34,7 +33,6 @@ struct fixed_mdio_bus {
struct fixed_phy { struct fixed_phy {
int addr; int addr;
struct phy_device *phydev; struct phy_device *phydev;
seqcount_t seqcount;
struct fixed_phy_status status; struct fixed_phy_status status;
bool no_carrier; bool no_carrier;
int (*link_update)(struct net_device *, struct fixed_phy_status *); int (*link_update)(struct net_device *, struct fixed_phy_status *);
@ -80,19 +78,17 @@ static int fixed_mdio_read(struct mii_bus *bus, int phy_addr, int reg_num)
list_for_each_entry(fp, &fmb->phys, node) { list_for_each_entry(fp, &fmb->phys, node) {
if (fp->addr == phy_addr) { if (fp->addr == phy_addr) {
struct fixed_phy_status state; struct fixed_phy_status state;
int s;
do { fp->status.link = !fp->no_carrier;
s = read_seqcount_begin(&fp->seqcount);
fp->status.link = !fp->no_carrier; /* Issue callback if user registered it. */
/* Issue callback if user registered it. */ if (fp->link_update)
if (fp->link_update) fp->link_update(fp->phydev->attached_dev,
fp->link_update(fp->phydev->attached_dev, &fp->status);
&fp->status);
/* Check the GPIO for change in status */ /* Check the GPIO for change in status */
fixed_phy_update(fp); fixed_phy_update(fp);
state = fp->status; state = fp->status;
} while (read_seqcount_retry(&fp->seqcount, s));
return swphy_read_reg(reg_num, &state); return swphy_read_reg(reg_num, &state);
} }
@ -150,8 +146,6 @@ static int fixed_phy_add_gpiod(unsigned int irq, int phy_addr,
if (!fp) if (!fp)
return -ENOMEM; return -ENOMEM;
seqcount_init(&fp->seqcount);
if (irq != PHY_POLL) if (irq != PHY_POLL)
fmb->mii_bus->irq[phy_addr] = irq; fmb->mii_bus->irq[phy_addr] = irq;

View File

@ -429,7 +429,7 @@ static int m88e1101_config_aneg(struct phy_device *phydev)
return marvell_config_aneg(phydev); return marvell_config_aneg(phydev);
} }
#ifdef CONFIG_OF_MDIO #if IS_ENABLED(CONFIG_OF_MDIO)
/* Set and/or override some configuration registers based on the /* Set and/or override some configuration registers based on the
* marvell,reg-init property stored in the of_node for the phydev. * marvell,reg-init property stored in the of_node for the phydev.
* *

View File

@ -764,6 +764,7 @@ EXPORT_SYMBOL(mdiobus_scan);
static void mdiobus_stats_acct(struct mdio_bus_stats *stats, bool op, int ret) static void mdiobus_stats_acct(struct mdio_bus_stats *stats, bool op, int ret)
{ {
preempt_disable();
u64_stats_update_begin(&stats->syncp); u64_stats_update_begin(&stats->syncp);
u64_stats_inc(&stats->transfers); u64_stats_inc(&stats->transfers);
@ -778,6 +779,7 @@ static void mdiobus_stats_acct(struct mdio_bus_stats *stats, bool op, int ret)
u64_stats_inc(&stats->writes); u64_stats_inc(&stats->writes);
out: out:
u64_stats_update_end(&stats->syncp); u64_stats_update_end(&stats->syncp);
preempt_enable();
} }
/** /**

View File

@ -374,7 +374,7 @@ struct vsc8531_private {
#endif #endif
}; };
#ifdef CONFIG_OF_MDIO #if IS_ENABLED(CONFIG_OF_MDIO)
struct vsc8531_edge_rate_table { struct vsc8531_edge_rate_table {
u32 vddmac; u32 vddmac;
u32 slowdown[8]; u32 slowdown[8];

View File

@ -98,7 +98,7 @@ static const struct vsc85xx_hw_stat vsc8584_hw_stats[] = {
}, },
}; };
#ifdef CONFIG_OF_MDIO #if IS_ENABLED(CONFIG_OF_MDIO)
static const struct vsc8531_edge_rate_table edge_table[] = { static const struct vsc8531_edge_rate_table edge_table[] = {
{MSCC_VDDMAC_3300, { 0, 2, 4, 7, 10, 17, 29, 53} }, {MSCC_VDDMAC_3300, { 0, 2, 4, 7, 10, 17, 29, 53} },
{MSCC_VDDMAC_2500, { 0, 3, 6, 10, 14, 23, 37, 63} }, {MSCC_VDDMAC_2500, { 0, 3, 6, 10, 14, 23, 37, 63} },
@ -382,7 +382,7 @@ out_unlock:
mutex_unlock(&phydev->lock); mutex_unlock(&phydev->lock);
} }
#ifdef CONFIG_OF_MDIO #if IS_ENABLED(CONFIG_OF_MDIO)
static int vsc85xx_edge_rate_magic_get(struct phy_device *phydev) static int vsc85xx_edge_rate_magic_get(struct phy_device *phydev)
{ {
u32 vdd, sd; u32 vdd, sd;
@ -1396,7 +1396,7 @@ static int vsc8584_config_init(struct phy_device *phydev)
/* Disable SerDes for 100Base-FX */ /* Disable SerDes for 100Base-FX */
ret = vsc8584_cmd(phydev, PROC_CMD_FIBER_MEDIA_CONF | ret = vsc8584_cmd(phydev, PROC_CMD_FIBER_MEDIA_CONF |
PROC_CMD_FIBER_PORT(vsc8531->base_addr) | PROC_CMD_FIBER_PORT(vsc8531->addr) |
PROC_CMD_FIBER_DISABLE | PROC_CMD_FIBER_DISABLE |
PROC_CMD_READ_MOD_WRITE_PORT | PROC_CMD_READ_MOD_WRITE_PORT |
PROC_CMD_RST_CONF_PORT | PROC_CMD_FIBER_100BASE_FX); PROC_CMD_RST_CONF_PORT | PROC_CMD_FIBER_100BASE_FX);
@ -1405,7 +1405,7 @@ static int vsc8584_config_init(struct phy_device *phydev)
/* Disable SerDes for 1000Base-X */ /* Disable SerDes for 1000Base-X */
ret = vsc8584_cmd(phydev, PROC_CMD_FIBER_MEDIA_CONF | ret = vsc8584_cmd(phydev, PROC_CMD_FIBER_MEDIA_CONF |
PROC_CMD_FIBER_PORT(vsc8531->base_addr) | PROC_CMD_FIBER_PORT(vsc8531->addr) |
PROC_CMD_FIBER_DISABLE | PROC_CMD_FIBER_DISABLE |
PROC_CMD_READ_MOD_WRITE_PORT | PROC_CMD_READ_MOD_WRITE_PORT |
PROC_CMD_RST_CONF_PORT | PROC_CMD_FIBER_1000BASE_X); PROC_CMD_RST_CONF_PORT | PROC_CMD_FIBER_1000BASE_X);

View File

@ -67,5 +67,5 @@ static inline int ethnl_cable_test_step(struct phy_device *phydev, u32 first,
{ {
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
#endif /* IS_ENABLED(ETHTOOL_NETLINK) */ #endif /* IS_ENABLED(CONFIG_ETHTOOL_NETLINK) */
#endif /* _LINUX_ETHTOOL_NETLINK_H_ */ #endif /* _LINUX_ETHTOOL_NETLINK_H_ */

View File

@ -3,33 +3,36 @@
#define _LINUX_U64_STATS_SYNC_H #define _LINUX_U64_STATS_SYNC_H
/* /*
* To properly implement 64bits network statistics on 32bit and 64bit hosts, * Protect against 64-bit values tearing on 32-bit architectures. This is
* we provide a synchronization point, that is a noop on 64bit or UP kernels. * typically used for statistics read/update in different subsystems.
* *
* Key points : * Key points :
* 1) Use a seqcount on SMP 32bits, with low overhead. *
* 2) Whole thing is a noop on 64bit arches or UP kernels. * - Use a seqcount on 32-bit SMP, only disable preemption for 32-bit UP.
* 3) Write side must ensure mutual exclusion or one seqcount update could * - The whole thing is a no-op on 64-bit architectures.
*
* Usage constraints:
*
* 1) Write side must ensure mutual exclusion, or one seqcount update could
* be lost, thus blocking readers forever. * be lost, thus blocking readers forever.
* If this synchronization point is not a mutex, but a spinlock or *
* spinlock_bh() or disable_bh() : * 2) Write side must disable preemption, or a seqcount reader can preempt the
* 3.1) Write side should not sleep. * writer and also spin forever.
* 3.2) Write side should not allow preemption. *
* 3.3) If applicable, interrupts should be disabled. * 3) Write side must use the _irqsave() variant if other writers, or a reader,
* can be invoked from an IRQ context.
* *
* 4) If reader fetches several counters, there is no guarantee the whole values * 4) If reader fetches several counters, there is no guarantee the whole values
* are consistent (remember point 1) : this is a noop on 64bit arches anyway) * are consistent w.r.t. each other (remember point #2: seqcounts are not
* used for 64bit architectures).
* *
* 5) readers are allowed to sleep or be preempted/interrupted : They perform * 5) Readers are allowed to sleep or be preempted/interrupted: they perform
* pure reads. But if they have to fetch many values, it's better to not allow * pure reads.
* preemptions/interruptions to avoid many retries.
* *
* 6) If counter might be written by an interrupt, readers should block interrupts. * 6) Readers must use both u64_stats_fetch_{begin,retry}_irq() if the stats
* (On UP, there is no seqcount_t protection, a reader allowing interrupts could * might be updated from a hardirq or softirq context (remember point #1:
* read partial values) * seqcounts are not used for UP kernels). 32-bit UP stat readers could read
* * corrupted 64-bit values otherwise.
* 7) For irq and softirq uses, readers can use u64_stats_fetch_begin_irq() and
* u64_stats_fetch_retry_irq() helpers
* *
* Usage : * Usage :
* *

View File

@ -292,7 +292,6 @@ static inline void inet_csk_prepare_for_destroy_sock(struct sock *sk)
/* The below has to be done to allow calling inet_csk_destroy_sock */ /* The below has to be done to allow calling inet_csk_destroy_sock */
sock_set_flag(sk, SOCK_DEAD); sock_set_flag(sk, SOCK_DEAD);
percpu_counter_inc(sk->sk_prot->orphan_count); percpu_counter_inc(sk->sk_prot->orphan_count);
inet_sk(sk)->inet_num = 0;
} }
void inet_csk_destroy_sock(struct sock *sk); void inet_csk_destroy_sock(struct sock *sk);

View File

@ -57,7 +57,7 @@ extern void seg6_iptunnel_exit(void);
extern int seg6_local_init(void); extern int seg6_local_init(void);
extern void seg6_local_exit(void); extern void seg6_local_exit(void);
extern bool seg6_validate_srh(struct ipv6_sr_hdr *srh, int len); extern bool seg6_validate_srh(struct ipv6_sr_hdr *srh, int len, bool reduced);
extern int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, extern int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh,
int proto); int proto);
extern int seg6_do_srh_inline(struct sk_buff *skb, struct ipv6_sr_hdr *osrh); extern int seg6_do_srh_inline(struct sk_buff *skb, struct ipv6_sr_hdr *osrh);

View File

@ -63,13 +63,22 @@ EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
#define ASSERT_RHT_MUTEX(HT) #define ASSERT_RHT_MUTEX(HT)
#endif #endif
static inline union nested_table *nested_table_top(
const struct bucket_table *tbl)
{
/* The top-level bucket entry does not need RCU protection
* because it's set at the same time as tbl->nest.
*/
return (void *)rcu_dereference_protected(tbl->buckets[0], 1);
}
static void nested_table_free(union nested_table *ntbl, unsigned int size) static void nested_table_free(union nested_table *ntbl, unsigned int size)
{ {
const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
const unsigned int len = 1 << shift; const unsigned int len = 1 << shift;
unsigned int i; unsigned int i;
ntbl = rcu_dereference_raw(ntbl->table); ntbl = rcu_dereference_protected(ntbl->table, 1);
if (!ntbl) if (!ntbl)
return; return;
@ -89,7 +98,7 @@ static void nested_bucket_table_free(const struct bucket_table *tbl)
union nested_table *ntbl; union nested_table *ntbl;
unsigned int i; unsigned int i;
ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]); ntbl = nested_table_top(tbl);
for (i = 0; i < len; i++) for (i = 0; i < len; i++)
nested_table_free(ntbl + i, size); nested_table_free(ntbl + i, size);
@ -1173,7 +1182,7 @@ struct rhash_lock_head **__rht_bucket_nested(const struct bucket_table *tbl,
unsigned int subhash = hash; unsigned int subhash = hash;
union nested_table *ntbl; union nested_table *ntbl;
ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]); ntbl = nested_table_top(tbl);
ntbl = rht_dereference_bucket_rcu(ntbl[index].table, tbl, hash); ntbl = rht_dereference_bucket_rcu(ntbl[index].table, tbl, hash);
subhash >>= tbl->nest; subhash >>= tbl->nest;
@ -1213,7 +1222,7 @@ struct rhash_lock_head **rht_bucket_nested_insert(struct rhashtable *ht,
unsigned int size = tbl->size >> tbl->nest; unsigned int size = tbl->size >> tbl->nest;
union nested_table *ntbl; union nested_table *ntbl;
ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]); ntbl = nested_table_top(tbl);
hash >>= tbl->nest; hash >>= tbl->nest;
ntbl = nested_table_alloc(ht, &ntbl[index].table, ntbl = nested_table_alloc(ht, &ntbl[index].table,
size <= (1 << shift)); size <= (1 << shift));

View File

@ -79,6 +79,7 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/sched/mm.h> #include <linux/sched/mm.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/rwsem.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/socket.h> #include <linux/socket.h>
@ -194,7 +195,7 @@ static DEFINE_SPINLOCK(napi_hash_lock);
static unsigned int napi_gen_id = NR_CPUS; static unsigned int napi_gen_id = NR_CPUS;
static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8); static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8);
static seqcount_t devnet_rename_seq; static DECLARE_RWSEM(devnet_rename_sem);
static inline void dev_base_seq_inc(struct net *net) static inline void dev_base_seq_inc(struct net *net)
{ {
@ -998,33 +999,28 @@ EXPORT_SYMBOL(dev_get_by_napi_id);
* @net: network namespace * @net: network namespace
* @name: a pointer to the buffer where the name will be stored. * @name: a pointer to the buffer where the name will be stored.
* @ifindex: the ifindex of the interface to get the name from. * @ifindex: the ifindex of the interface to get the name from.
*
* The use of raw_seqcount_begin() and cond_resched() before
* retrying is required as we want to give the writers a chance
* to complete when CONFIG_PREEMPTION is not set.
*/ */
int netdev_get_name(struct net *net, char *name, int ifindex) int netdev_get_name(struct net *net, char *name, int ifindex)
{ {
struct net_device *dev; struct net_device *dev;
unsigned int seq; int ret;
retry: down_read(&devnet_rename_sem);
seq = raw_seqcount_begin(&devnet_rename_seq);
rcu_read_lock(); rcu_read_lock();
dev = dev_get_by_index_rcu(net, ifindex); dev = dev_get_by_index_rcu(net, ifindex);
if (!dev) { if (!dev) {
rcu_read_unlock(); ret = -ENODEV;
return -ENODEV; goto out;
} }
strcpy(name, dev->name); strcpy(name, dev->name);
rcu_read_unlock();
if (read_seqcount_retry(&devnet_rename_seq, seq)) {
cond_resched();
goto retry;
}
return 0; ret = 0;
out:
rcu_read_unlock();
up_read(&devnet_rename_sem);
return ret;
} }
/** /**
@ -1296,10 +1292,10 @@ int dev_change_name(struct net_device *dev, const char *newname)
likely(!(dev->priv_flags & IFF_LIVE_RENAME_OK))) likely(!(dev->priv_flags & IFF_LIVE_RENAME_OK)))
return -EBUSY; return -EBUSY;
write_seqcount_begin(&devnet_rename_seq); down_write(&devnet_rename_sem);
if (strncmp(newname, dev->name, IFNAMSIZ) == 0) { if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
write_seqcount_end(&devnet_rename_seq); up_write(&devnet_rename_sem);
return 0; return 0;
} }
@ -1307,7 +1303,7 @@ int dev_change_name(struct net_device *dev, const char *newname)
err = dev_get_valid_name(net, dev, newname); err = dev_get_valid_name(net, dev, newname);
if (err < 0) { if (err < 0) {
write_seqcount_end(&devnet_rename_seq); up_write(&devnet_rename_sem);
return err; return err;
} }
@ -1322,11 +1318,11 @@ rollback:
if (ret) { if (ret) {
memcpy(dev->name, oldname, IFNAMSIZ); memcpy(dev->name, oldname, IFNAMSIZ);
dev->name_assign_type = old_assign_type; dev->name_assign_type = old_assign_type;
write_seqcount_end(&devnet_rename_seq); up_write(&devnet_rename_sem);
return ret; return ret;
} }
write_seqcount_end(&devnet_rename_seq); up_write(&devnet_rename_sem);
netdev_adjacent_rename_links(dev, oldname); netdev_adjacent_rename_links(dev, oldname);
@ -1347,7 +1343,7 @@ rollback:
/* err >= 0 after dev_alloc_name() or stores the first errno */ /* err >= 0 after dev_alloc_name() or stores the first errno */
if (err >= 0) { if (err >= 0) {
err = ret; err = ret;
write_seqcount_begin(&devnet_rename_seq); down_write(&devnet_rename_sem);
memcpy(dev->name, oldname, IFNAMSIZ); memcpy(dev->name, oldname, IFNAMSIZ);
memcpy(oldname, newname, IFNAMSIZ); memcpy(oldname, newname, IFNAMSIZ);
dev->name_assign_type = old_assign_type; dev->name_assign_type = old_assign_type;

View File

@ -5050,7 +5050,7 @@ static int bpf_push_seg6_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len
int err; int err;
struct ipv6_sr_hdr *srh = (struct ipv6_sr_hdr *)hdr; struct ipv6_sr_hdr *srh = (struct ipv6_sr_hdr *)hdr;
if (!seg6_validate_srh(srh, len)) if (!seg6_validate_srh(srh, len, false))
return -EINVAL; return -EINVAL;
switch (type) { switch (type) {

View File

@ -140,8 +140,7 @@ int ethnl_set_linkinfo(struct sk_buff *skb, struct genl_info *info)
ret = __ethtool_get_link_ksettings(dev, &ksettings); ret = __ethtool_get_link_ksettings(dev, &ksettings);
if (ret < 0) { if (ret < 0) {
if (info) GENL_SET_ERR_MSG(info, "failed to retrieve link settings");
GENL_SET_ERR_MSG(info, "failed to retrieve link settings");
goto out_ops; goto out_ops;
} }
lsettings = &ksettings.base; lsettings = &ksettings.base;

View File

@ -902,6 +902,7 @@ void inet_csk_prepare_forced_close(struct sock *sk)
bh_unlock_sock(sk); bh_unlock_sock(sk);
sock_put(sk); sock_put(sk);
inet_csk_prepare_for_destroy_sock(sk); inet_csk_prepare_for_destroy_sock(sk);
inet_sk(sk)->inet_num = 0;
} }
EXPORT_SYMBOL(inet_csk_prepare_forced_close); EXPORT_SYMBOL(inet_csk_prepare_forced_close);

View File

@ -493,7 +493,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
struct ipv6_sr_hdr *srh = (struct ipv6_sr_hdr *) struct ipv6_sr_hdr *srh = (struct ipv6_sr_hdr *)
opt->srcrt; opt->srcrt;
if (!seg6_validate_srh(srh, optlen)) if (!seg6_validate_srh(srh, optlen, false))
goto sticky_done; goto sticky_done;
break; break;
} }

View File

@ -25,7 +25,7 @@
#include <net/seg6_hmac.h> #include <net/seg6_hmac.h>
#endif #endif
bool seg6_validate_srh(struct ipv6_sr_hdr *srh, int len) bool seg6_validate_srh(struct ipv6_sr_hdr *srh, int len, bool reduced)
{ {
unsigned int tlv_offset; unsigned int tlv_offset;
int max_last_entry; int max_last_entry;
@ -37,13 +37,17 @@ bool seg6_validate_srh(struct ipv6_sr_hdr *srh, int len)
if (((srh->hdrlen + 1) << 3) != len) if (((srh->hdrlen + 1) << 3) != len)
return false; return false;
max_last_entry = (srh->hdrlen / 2) - 1; if (!reduced && srh->segments_left > srh->first_segment) {
if (srh->first_segment > max_last_entry)
return false; return false;
} else {
max_last_entry = (srh->hdrlen / 2) - 1;
if (srh->segments_left > srh->first_segment + 1) if (srh->first_segment > max_last_entry)
return false; return false;
if (srh->segments_left > srh->first_segment + 1)
return false;
}
tlv_offset = sizeof(*srh) + ((srh->first_segment + 1) << 4); tlv_offset = sizeof(*srh) + ((srh->first_segment + 1) << 4);

View File

@ -426,7 +426,7 @@ static int seg6_build_state(struct net *net, struct nlattr *nla,
} }
/* verify that SRH is consistent */ /* verify that SRH is consistent */
if (!seg6_validate_srh(tuninfo->srh, tuninfo_len - sizeof(*tuninfo))) if (!seg6_validate_srh(tuninfo->srh, tuninfo_len - sizeof(*tuninfo), false))
return -EINVAL; return -EINVAL;
newts = lwtunnel_state_alloc(tuninfo_len + sizeof(*slwt)); newts = lwtunnel_state_alloc(tuninfo_len + sizeof(*slwt));

View File

@ -87,7 +87,7 @@ static struct ipv6_sr_hdr *get_srh(struct sk_buff *skb)
*/ */
srh = (struct ipv6_sr_hdr *)(skb->data + srhoff); srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
if (!seg6_validate_srh(srh, len)) if (!seg6_validate_srh(srh, len, true))
return NULL; return NULL;
return srh; return srh;
@ -495,7 +495,7 @@ bool seg6_bpf_has_valid_srh(struct sk_buff *skb)
return false; return false;
srh->hdrlen = (u8)(srh_state->hdrlen >> 3); srh->hdrlen = (u8)(srh_state->hdrlen >> 3);
if (!seg6_validate_srh(srh, (srh->hdrlen + 1) << 3)) if (!seg6_validate_srh(srh, (srh->hdrlen + 1) << 3, true))
return false; return false;
srh_state->valid = true; srh_state->valid = true;
@ -670,7 +670,7 @@ static int parse_nla_srh(struct nlattr **attrs, struct seg6_local_lwt *slwt)
if (len < sizeof(*srh) + sizeof(struct in6_addr)) if (len < sizeof(*srh) + sizeof(struct in6_addr))
return -EINVAL; return -EINVAL;
if (!seg6_validate_srh(srh, len)) if (!seg6_validate_srh(srh, len, false))
return -EINVAL; return -EINVAL;
slwt->srh = kmemdup(srh, len, GFP_KERNEL); slwt->srh = kmemdup(srh, len, GFP_KERNEL);

View File

@ -513,15 +513,58 @@ static void genl_family_rcv_msg_attrs_free(const struct genl_family *family,
kfree(attrbuf); kfree(attrbuf);
} }
static int genl_lock_start(struct netlink_callback *cb) struct genl_start_context {
const struct genl_family *family;
struct nlmsghdr *nlh;
struct netlink_ext_ack *extack;
const struct genl_ops *ops;
int hdrlen;
};
static int genl_start(struct netlink_callback *cb)
{ {
const struct genl_ops *ops = genl_dumpit_info(cb)->ops; struct genl_start_context *ctx = cb->data;
const struct genl_ops *ops = ctx->ops;
struct genl_dumpit_info *info;
struct nlattr **attrs = NULL;
int rc = 0; int rc = 0;
if (ops->validate & GENL_DONT_VALIDATE_DUMP)
goto no_attrs;
if (ctx->nlh->nlmsg_len < nlmsg_msg_size(ctx->hdrlen))
return -EINVAL;
attrs = genl_family_rcv_msg_attrs_parse(ctx->family, ctx->nlh, ctx->extack,
ops, ctx->hdrlen,
GENL_DONT_VALIDATE_DUMP_STRICT,
true);
if (IS_ERR(attrs))
return PTR_ERR(attrs);
no_attrs:
info = genl_dumpit_info_alloc();
if (!info) {
kfree(attrs);
return -ENOMEM;
}
info->family = ctx->family;
info->ops = ops;
info->attrs = attrs;
cb->data = info;
if (ops->start) { if (ops->start) {
genl_lock(); if (!ctx->family->parallel_ops)
genl_lock();
rc = ops->start(cb); rc = ops->start(cb);
genl_unlock(); if (!ctx->family->parallel_ops)
genl_unlock();
}
if (rc) {
kfree(attrs);
genl_dumpit_info_free(info);
cb->data = NULL;
} }
return rc; return rc;
} }
@ -548,7 +591,7 @@ static int genl_lock_done(struct netlink_callback *cb)
rc = ops->done(cb); rc = ops->done(cb);
genl_unlock(); genl_unlock();
} }
genl_family_rcv_msg_attrs_free(info->family, info->attrs, true); genl_family_rcv_msg_attrs_free(info->family, info->attrs, false);
genl_dumpit_info_free(info); genl_dumpit_info_free(info);
return rc; return rc;
} }
@ -573,43 +616,23 @@ static int genl_family_rcv_msg_dumpit(const struct genl_family *family,
const struct genl_ops *ops, const struct genl_ops *ops,
int hdrlen, struct net *net) int hdrlen, struct net *net)
{ {
struct genl_dumpit_info *info; struct genl_start_context ctx;
struct nlattr **attrs = NULL;
int err; int err;
if (!ops->dumpit) if (!ops->dumpit)
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (ops->validate & GENL_DONT_VALIDATE_DUMP) ctx.family = family;
goto no_attrs; ctx.nlh = nlh;
ctx.extack = extack;
if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen)) ctx.ops = ops;
return -EINVAL; ctx.hdrlen = hdrlen;
attrs = genl_family_rcv_msg_attrs_parse(family, nlh, extack,
ops, hdrlen,
GENL_DONT_VALIDATE_DUMP_STRICT,
true);
if (IS_ERR(attrs))
return PTR_ERR(attrs);
no_attrs:
/* Allocate dumpit info. It is going to be freed by done() callback. */
info = genl_dumpit_info_alloc();
if (!info) {
genl_family_rcv_msg_attrs_free(family, attrs, true);
return -ENOMEM;
}
info->family = family;
info->ops = ops;
info->attrs = attrs;
if (!family->parallel_ops) { if (!family->parallel_ops) {
struct netlink_dump_control c = { struct netlink_dump_control c = {
.module = family->module, .module = family->module,
.data = info, .data = &ctx,
.start = genl_lock_start, .start = genl_start,
.dump = genl_lock_dumpit, .dump = genl_lock_dumpit,
.done = genl_lock_done, .done = genl_lock_done,
}; };
@ -617,12 +640,11 @@ no_attrs:
genl_unlock(); genl_unlock();
err = __netlink_dump_start(net->genl_sock, skb, nlh, &c); err = __netlink_dump_start(net->genl_sock, skb, nlh, &c);
genl_lock(); genl_lock();
} else { } else {
struct netlink_dump_control c = { struct netlink_dump_control c = {
.module = family->module, .module = family->module,
.data = info, .data = &ctx,
.start = ops->start, .start = genl_start,
.dump = ops->dumpit, .dump = ops->dumpit,
.done = genl_parallel_done, .done = genl_parallel_done,
}; };

View File

@ -221,7 +221,7 @@ int tipc_msg_append(struct tipc_msg *_hdr, struct msghdr *m, int dlen,
accounted = skb ? msg_blocks(buf_msg(skb)) : 0; accounted = skb ? msg_blocks(buf_msg(skb)) : 0;
total = accounted; total = accounted;
while (rem) { do {
if (!skb || skb->len >= mss) { if (!skb || skb->len >= mss) {
skb = tipc_buf_acquire(mss, GFP_KERNEL); skb = tipc_buf_acquire(mss, GFP_KERNEL);
if (unlikely(!skb)) if (unlikely(!skb))
@ -245,7 +245,7 @@ int tipc_msg_append(struct tipc_msg *_hdr, struct msghdr *m, int dlen,
skb_put(skb, cpy); skb_put(skb, cpy);
rem -= cpy; rem -= cpy;
total += msg_blocks(hdr) - curr; total += msg_blocks(hdr) - curr;
} } while (rem);
return total - accounted; return total - accounted;
} }

View File

@ -2055,7 +2055,7 @@ static bool vmci_check_transport(struct vsock_sock *vsk)
return vsk->transport == &vmci_transport; return vsk->transport == &vmci_transport;
} }
void vmci_vsock_transport_cb(bool is_host) static void vmci_vsock_transport_cb(bool is_host)
{ {
int features; int features;

View File

@ -336,7 +336,7 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
if ((addr + size) < addr) if ((addr + size) < addr)
return -EINVAL; return -EINVAL;
npgs = div_u64(size, PAGE_SIZE); npgs = size >> PAGE_SHIFT;
if (npgs > U32_MAX) if (npgs > U32_MAX)
return -EINVAL; return -EINVAL;