Merge ra.kernel.org:/pub/scm/linux/kernel/git/netdev/net
Pull in bug fixes from 'net' tree for the merge window. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
1bab8d4c48
|
@ -649,6 +649,12 @@ M: Lino Sanfilippo <LinoSanfilippo@gmx.de>
|
|||
S: Maintained
|
||||
F: drivers/net/ethernet/alacritech/*
|
||||
|
||||
FORCEDETH GIGABIT ETHERNET DRIVER
|
||||
M: Rain River <rain.1986.08.12@gmail.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/net/ethernet/nvidia/*
|
||||
|
||||
ALCATEL SPEEDTOUCH USB DRIVER
|
||||
M: Duncan Sands <duncan.sands@free.fr>
|
||||
L: linux-usb@vger.kernel.org
|
||||
|
@ -17673,7 +17679,7 @@ F: Documentation/ABI/testing/sysfs-hypervisor-xen
|
|||
|
||||
XEN NETWORK BACKEND DRIVER
|
||||
M: Wei Liu <wei.liu@kernel.org>
|
||||
M: Paul Durrant <paul.durrant@citrix.com>
|
||||
M: Paul Durrant <paul@xen.org>
|
||||
L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
|
|
|
@ -824,7 +824,8 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
|
|||
above_thresh =
|
||||
ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
|
||||
ENA_TX_WAKEUP_THRESH);
|
||||
if (netif_tx_queue_stopped(txq) && above_thresh) {
|
||||
if (netif_tx_queue_stopped(txq) && above_thresh &&
|
||||
test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags)) {
|
||||
netif_tx_wake_queue(txq);
|
||||
u64_stats_update_begin(&tx_ring->syncp);
|
||||
tx_ring->tx_stats.queue_wakeup++;
|
||||
|
|
|
@ -4713,10 +4713,12 @@ int stmmac_suspend(struct device *dev)
|
|||
if (!ndev || !netif_running(ndev))
|
||||
return 0;
|
||||
|
||||
phylink_stop(priv->phylink);
|
||||
|
||||
mutex_lock(&priv->lock);
|
||||
|
||||
rtnl_lock();
|
||||
phylink_stop(priv->phylink);
|
||||
rtnl_unlock();
|
||||
|
||||
netif_device_detach(ndev);
|
||||
stmmac_stop_all_queues(priv);
|
||||
|
||||
|
@ -4820,9 +4822,11 @@ int stmmac_resume(struct device *dev)
|
|||
|
||||
stmmac_start_all_queues(priv);
|
||||
|
||||
mutex_unlock(&priv->lock);
|
||||
|
||||
rtnl_lock();
|
||||
phylink_start(priv->phylink);
|
||||
rtnl_unlock();
|
||||
|
||||
mutex_unlock(&priv->lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -906,7 +906,7 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
|
|||
__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
|
||||
}
|
||||
if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
|
||||
queue->rx.rsp_cons = ++cons;
|
||||
queue->rx.rsp_cons = ++cons + skb_queue_len(list);
|
||||
kfree_skb(nskb);
|
||||
return ~0U;
|
||||
}
|
||||
|
|
|
@ -118,7 +118,12 @@ void __qdisc_run(struct Qdisc *q);
|
|||
static inline void qdisc_run(struct Qdisc *q)
|
||||
{
|
||||
if (qdisc_run_begin(q)) {
|
||||
__qdisc_run(q);
|
||||
/* NOLOCK qdisc must check 'state' under the qdisc seqlock
|
||||
* to avoid racing with dev_qdisc_reset()
|
||||
*/
|
||||
if (!(q->flags & TCQ_F_NOLOCK) ||
|
||||
likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
|
||||
__qdisc_run(q);
|
||||
qdisc_run_end(q);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,7 +21,8 @@ struct sock_reuseport {
|
|||
unsigned int synq_overflow_ts;
|
||||
/* ID stays the same even after the size of socks[] grows. */
|
||||
unsigned int reuseport_id;
|
||||
bool bind_inany;
|
||||
unsigned int bind_inany:1;
|
||||
unsigned int has_conns:1;
|
||||
struct bpf_prog __rcu *prog; /* optional BPF sock selector */
|
||||
struct sock *socks[0]; /* array of sock pointers */
|
||||
};
|
||||
|
@ -37,6 +38,23 @@ extern struct sock *reuseport_select_sock(struct sock *sk,
|
|||
extern int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog);
|
||||
extern int reuseport_detach_prog(struct sock *sk);
|
||||
|
||||
static inline bool reuseport_has_conns(struct sock *sk, bool set)
|
||||
{
|
||||
struct sock_reuseport *reuse;
|
||||
bool ret = false;
|
||||
|
||||
rcu_read_lock();
|
||||
reuse = rcu_dereference(sk->sk_reuseport_cb);
|
||||
if (reuse) {
|
||||
if (set)
|
||||
reuse->has_conns = 1;
|
||||
ret = reuse->has_conns;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int reuseport_get_id(struct sock_reuseport *reuse);
|
||||
|
||||
#endif /* _SOCK_REUSEPORT_H */
|
||||
|
|
|
@ -3467,18 +3467,22 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
|
|||
qdisc_calculate_pkt_len(skb, q);
|
||||
|
||||
if (q->flags & TCQ_F_NOLOCK) {
|
||||
if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
|
||||
__qdisc_drop(skb, &to_free);
|
||||
rc = NET_XMIT_DROP;
|
||||
} else if ((q->flags & TCQ_F_CAN_BYPASS) && q->empty &&
|
||||
qdisc_run_begin(q)) {
|
||||
if ((q->flags & TCQ_F_CAN_BYPASS) && q->empty &&
|
||||
qdisc_run_begin(q)) {
|
||||
if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED,
|
||||
&q->state))) {
|
||||
__qdisc_drop(skb, &to_free);
|
||||
rc = NET_XMIT_DROP;
|
||||
goto end_run;
|
||||
}
|
||||
qdisc_bstats_cpu_update(q, skb);
|
||||
|
||||
rc = NET_XMIT_SUCCESS;
|
||||
if (sch_direct_xmit(skb, q, dev, txq, NULL, true))
|
||||
__qdisc_run(q);
|
||||
|
||||
end_run:
|
||||
qdisc_run_end(q);
|
||||
rc = NET_XMIT_SUCCESS;
|
||||
} else {
|
||||
rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
|
||||
qdisc_run(q);
|
||||
|
|
|
@ -295,8 +295,19 @@ struct sock *reuseport_select_sock(struct sock *sk,
|
|||
|
||||
select_by_hash:
|
||||
/* no bpf or invalid bpf result: fall back to hash usage */
|
||||
if (!sk2)
|
||||
sk2 = reuse->socks[reciprocal_scale(hash, socks)];
|
||||
if (!sk2) {
|
||||
int i, j;
|
||||
|
||||
i = j = reciprocal_scale(hash, socks);
|
||||
while (reuse->socks[i]->sk_state == TCP_ESTABLISHED) {
|
||||
i++;
|
||||
if (i >= reuse->num_socks)
|
||||
i = 0;
|
||||
if (i == j)
|
||||
goto out;
|
||||
}
|
||||
sk2 = reuse->socks[i];
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
|
|
|
@ -644,6 +644,8 @@ static int dsa_port_parse_cpu(struct dsa_port *dp, struct net_device *master)
|
|||
tag_protocol = ds->ops->get_tag_protocol(ds, dp->index);
|
||||
tag_ops = dsa_tag_driver_get(tag_protocol);
|
||||
if (IS_ERR(tag_ops)) {
|
||||
if (PTR_ERR(tag_ops) == -ENOPROTOOPT)
|
||||
return -EPROBE_DEFER;
|
||||
dev_warn(ds->dev, "No tagger for this switch\n");
|
||||
return PTR_ERR(tag_ops);
|
||||
}
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
#include <net/sock.h>
|
||||
#include <net/route.h>
|
||||
#include <net/tcp_states.h>
|
||||
#include <net/sock_reuseport.h>
|
||||
|
||||
int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
|
||||
{
|
||||
|
@ -69,6 +70,7 @@ int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len
|
|||
}
|
||||
inet->inet_daddr = fl4->daddr;
|
||||
inet->inet_dport = usin->sin_port;
|
||||
reuseport_has_conns(sk, true);
|
||||
sk->sk_state = TCP_ESTABLISHED;
|
||||
sk_set_txhash(sk);
|
||||
inet->inet_id = jiffies;
|
||||
|
|
|
@ -423,12 +423,13 @@ static struct sock *udp4_lib_lookup2(struct net *net,
|
|||
score = compute_score(sk, net, saddr, sport,
|
||||
daddr, hnum, dif, sdif);
|
||||
if (score > badness) {
|
||||
if (sk->sk_reuseport) {
|
||||
if (sk->sk_reuseport &&
|
||||
sk->sk_state != TCP_ESTABLISHED) {
|
||||
hash = udp_ehashfn(net, daddr, hnum,
|
||||
saddr, sport);
|
||||
result = reuseport_select_sock(sk, hash, skb,
|
||||
sizeof(struct udphdr));
|
||||
if (result)
|
||||
if (result && !reuseport_has_conns(sk, false))
|
||||
return result;
|
||||
}
|
||||
badness = score;
|
||||
|
|
|
@ -27,6 +27,7 @@
|
|||
#include <net/ip6_route.h>
|
||||
#include <net/tcp_states.h>
|
||||
#include <net/dsfield.h>
|
||||
#include <net/sock_reuseport.h>
|
||||
|
||||
#include <linux/errqueue.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
@ -254,6 +255,7 @@ ipv4_connected:
|
|||
goto out;
|
||||
}
|
||||
|
||||
reuseport_has_conns(sk, true);
|
||||
sk->sk_state = TCP_ESTABLISHED;
|
||||
sk_set_txhash(sk);
|
||||
out:
|
||||
|
|
|
@ -968,7 +968,7 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
|
|||
if (unlikely(!tun_info ||
|
||||
!(tun_info->mode & IP_TUNNEL_INFO_TX) ||
|
||||
ip_tunnel_info_af(tun_info) != AF_INET6))
|
||||
return -EINVAL;
|
||||
goto tx_err;
|
||||
|
||||
key = &tun_info->key;
|
||||
memset(&fl6, 0, sizeof(fl6));
|
||||
|
|
|
@ -158,13 +158,14 @@ static struct sock *udp6_lib_lookup2(struct net *net,
|
|||
score = compute_score(sk, net, saddr, sport,
|
||||
daddr, hnum, dif, sdif);
|
||||
if (score > badness) {
|
||||
if (sk->sk_reuseport) {
|
||||
if (sk->sk_reuseport &&
|
||||
sk->sk_state != TCP_ESTABLISHED) {
|
||||
hash = udp6_ehashfn(net, daddr, hnum,
|
||||
saddr, sport);
|
||||
|
||||
result = reuseport_select_sock(sk, hash, skb,
|
||||
sizeof(struct udphdr));
|
||||
if (result)
|
||||
if (result && !reuseport_has_conns(sk, false))
|
||||
return result;
|
||||
}
|
||||
result = sk;
|
||||
|
|
|
@ -42,7 +42,7 @@ DEFINE_PER_CPU_SHARED_ALIGNED(struct rds_ib_statistics, rds_ib_stats);
|
|||
static const char *const rds_ib_stat_names[] = {
|
||||
"ib_connect_raced",
|
||||
"ib_listen_closed_stale",
|
||||
"s_ib_evt_handler_call",
|
||||
"ib_evt_handler_call",
|
||||
"ib_tasklet_call",
|
||||
"ib_tx_cq_event",
|
||||
"ib_tx_ring_full",
|
||||
|
|
|
@ -985,6 +985,9 @@ static void qdisc_destroy(struct Qdisc *qdisc)
|
|||
|
||||
void qdisc_put(struct Qdisc *qdisc)
|
||||
{
|
||||
if (!qdisc)
|
||||
return;
|
||||
|
||||
if (qdisc->flags & TCQ_F_BUILTIN ||
|
||||
!refcount_dec_and_test(&qdisc->refcnt))
|
||||
return;
|
||||
|
|
Loading…
Reference in New Issue