Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) GRE tunnel drivers don't set the transport header properly, they also blindly deref the inner protocol ipv4 and needs some checks. Fixes from Isaku Yamahata. 2) Fix sleeps while atomic in netdevice rename code, from Eric Dumazet. 3) Fix double-spinlock in solos-pci driver, from Dan Carpenter. 4) More ARP bug fixes. Fix lockdep splat in arp_solicit() and then the bug accidentally added by that fix. From Eric Dumazet and Cong Wang. 5) Remove some __dev* annotations that slipped back in, as well as all HOTPLUG references. From Greg KH 6) RDS protocol uses wrong interfaces to access scatter-gather elements, causing a regression. From Mike Marciniszyn. 7) Fix build error in cpts driver, from Richard Cochran. 8) Fix arithmetic in packet scheduler, from Stefan Hasko. 9) Similarly, fix association during calculation of random backoff in batman-adv. From Akinobu Mita. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (21 commits) ipv6/ip6_gre: set transport header correctly ipv4/ip_gre: set transport header correctly to gre header IB/rds: suppress incompatible protocol when version is known IB/rds: Correct ib_api use with gs_dma_address/sg_dma_len net/vxlan: Use the underlying device index when joining/leaving multicast groups tcp: should drop incoming frames without ACK flag set netprio_cgroup: define sk_cgrp_prioidx only if NETPRIO_CGROUP is enabled cpts: fix a run time warn_on. cpts: fix build error by removing useless code. batman-adv: fix random jitter calculation arp: fix a regression in arp_solicit() net: sched: integer overflow fix CONFIG_HOTPLUG removal from networking core Drivers: network: more __dev* removal bridge: call br_netpoll_disable in br_add_if ipv4: arp: fix a lockdep splat in arp_solicit() tuntap: dont use a private kmem_cache net: devnet_rename_seq should be a seqcount ip_gre: fix possible use after free ip_gre: make ipgre_tunnel_xmit() not parse network header as IP unconditionally ...
This commit is contained in:
commit
7fd83b47ce
|
@ -538,7 +538,7 @@ static ssize_t geos_gpio_store(struct device *dev, struct device_attribute *attr
|
|||
} else {
|
||||
count = -EINVAL;
|
||||
}
|
||||
spin_lock_irq(&card->param_queue_lock);
|
||||
spin_unlock_irq(&card->param_queue_lock);
|
||||
return count;
|
||||
}
|
||||
|
||||
|
|
|
@ -141,7 +141,7 @@ static int orion_mdio_reset(struct mii_bus *bus)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int __devinit orion_mdio_probe(struct platform_device *pdev)
|
||||
static int orion_mdio_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device_node *np = pdev->dev.of_node;
|
||||
struct mii_bus *bus;
|
||||
|
@ -197,7 +197,7 @@ static int __devinit orion_mdio_probe(struct platform_device *pdev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int __devexit orion_mdio_remove(struct platform_device *pdev)
|
||||
static int orion_mdio_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct mii_bus *bus = platform_get_drvdata(pdev);
|
||||
mdiobus_unregister(bus);
|
||||
|
@ -214,7 +214,7 @@ MODULE_DEVICE_TABLE(of, orion_mdio_match);
|
|||
|
||||
static struct platform_driver orion_mdio_driver = {
|
||||
.probe = orion_mdio_probe,
|
||||
.remove = __devexit_p(orion_mdio_remove),
|
||||
.remove = orion_mdio_remove,
|
||||
.driver = {
|
||||
.name = "orion-mdio",
|
||||
.of_match_table = orion_mdio_match,
|
||||
|
|
|
@ -635,7 +635,7 @@ static void mvneta_rxq_bm_disable(struct mvneta_port *pp,
|
|||
|
||||
|
||||
/* Sets the RGMII Enable bit (RGMIIEn) in port MAC control register */
|
||||
static void __devinit mvneta_gmac_rgmii_set(struct mvneta_port *pp, int enable)
|
||||
static void mvneta_gmac_rgmii_set(struct mvneta_port *pp, int enable)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
|
@ -650,7 +650,7 @@ static void __devinit mvneta_gmac_rgmii_set(struct mvneta_port *pp, int enable)
|
|||
}
|
||||
|
||||
/* Config SGMII port */
|
||||
static void __devinit mvneta_port_sgmii_config(struct mvneta_port *pp)
|
||||
static void mvneta_port_sgmii_config(struct mvneta_port *pp)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
|
@ -2564,7 +2564,7 @@ const struct ethtool_ops mvneta_eth_tool_ops = {
|
|||
};
|
||||
|
||||
/* Initialize hw */
|
||||
static int __devinit mvneta_init(struct mvneta_port *pp, int phy_addr)
|
||||
static int mvneta_init(struct mvneta_port *pp, int phy_addr)
|
||||
{
|
||||
int queue;
|
||||
|
||||
|
@ -2613,9 +2613,8 @@ static void mvneta_deinit(struct mvneta_port *pp)
|
|||
}
|
||||
|
||||
/* platform glue : initialize decoding windows */
|
||||
static void __devinit
|
||||
mvneta_conf_mbus_windows(struct mvneta_port *pp,
|
||||
const struct mbus_dram_target_info *dram)
|
||||
static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
|
||||
const struct mbus_dram_target_info *dram)
|
||||
{
|
||||
u32 win_enable;
|
||||
u32 win_protect;
|
||||
|
@ -2648,7 +2647,7 @@ mvneta_conf_mbus_windows(struct mvneta_port *pp,
|
|||
}
|
||||
|
||||
/* Power up the port */
|
||||
static void __devinit mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
|
||||
static void mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
|
@ -2671,7 +2670,7 @@ static void __devinit mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
|
|||
}
|
||||
|
||||
/* Device initialization routine */
|
||||
static int __devinit mvneta_probe(struct platform_device *pdev)
|
||||
static int mvneta_probe(struct platform_device *pdev)
|
||||
{
|
||||
const struct mbus_dram_target_info *dram_target_info;
|
||||
struct device_node *dn = pdev->dev.of_node;
|
||||
|
@ -2803,7 +2802,7 @@ err_free_netdev:
|
|||
}
|
||||
|
||||
/* Device removal routine */
|
||||
static int __devexit mvneta_remove(struct platform_device *pdev)
|
||||
static int mvneta_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct net_device *dev = platform_get_drvdata(pdev);
|
||||
struct mvneta_port *pp = netdev_priv(dev);
|
||||
|
@ -2828,7 +2827,7 @@ MODULE_DEVICE_TABLE(of, mvneta_match);
|
|||
|
||||
static struct platform_driver mvneta_driver = {
|
||||
.probe = mvneta_probe,
|
||||
.remove = __devexit_p(mvneta_remove),
|
||||
.remove = mvneta_remove,
|
||||
.driver = {
|
||||
.name = MVNETA_DRIVER_NAME,
|
||||
.of_match_table = mvneta_match,
|
||||
|
|
|
@ -247,8 +247,7 @@ static void cpts_clk_init(struct cpts *cpts)
|
|||
cpts->refclk = NULL;
|
||||
return;
|
||||
}
|
||||
clk_enable(cpts->refclk);
|
||||
cpts->freq = cpts->refclk->recalc(cpts->refclk);
|
||||
clk_prepare_enable(cpts->refclk);
|
||||
}
|
||||
|
||||
static void cpts_clk_release(struct cpts *cpts)
|
||||
|
|
|
@ -120,7 +120,6 @@ struct cpts {
|
|||
struct delayed_work overflow_work;
|
||||
int phc_index;
|
||||
struct clk *refclk;
|
||||
unsigned long freq;
|
||||
struct list_head events;
|
||||
struct list_head pool;
|
||||
struct cpts_event pool_data[CPTS_MAX_EVENTS];
|
||||
|
|
|
@ -180,7 +180,6 @@ struct tun_struct {
|
|||
int debug;
|
||||
#endif
|
||||
spinlock_t lock;
|
||||
struct kmem_cache *flow_cache;
|
||||
struct hlist_head flows[TUN_NUM_FLOW_ENTRIES];
|
||||
struct timer_list flow_gc_timer;
|
||||
unsigned long ageing_time;
|
||||
|
@ -209,8 +208,8 @@ static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun,
|
|||
struct hlist_head *head,
|
||||
u32 rxhash, u16 queue_index)
|
||||
{
|
||||
struct tun_flow_entry *e = kmem_cache_alloc(tun->flow_cache,
|
||||
GFP_ATOMIC);
|
||||
struct tun_flow_entry *e = kmalloc(sizeof(*e), GFP_ATOMIC);
|
||||
|
||||
if (e) {
|
||||
tun_debug(KERN_INFO, tun, "create flow: hash %u index %u\n",
|
||||
rxhash, queue_index);
|
||||
|
@ -223,19 +222,12 @@ static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun,
|
|||
return e;
|
||||
}
|
||||
|
||||
static void tun_flow_free(struct rcu_head *head)
|
||||
{
|
||||
struct tun_flow_entry *e
|
||||
= container_of(head, struct tun_flow_entry, rcu);
|
||||
kmem_cache_free(e->tun->flow_cache, e);
|
||||
}
|
||||
|
||||
static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e)
|
||||
{
|
||||
tun_debug(KERN_INFO, tun, "delete flow: hash %u index %u\n",
|
||||
e->rxhash, e->queue_index);
|
||||
hlist_del_rcu(&e->hash_link);
|
||||
call_rcu(&e->rcu, tun_flow_free);
|
||||
kfree_rcu(e, rcu);
|
||||
}
|
||||
|
||||
static void tun_flow_flush(struct tun_struct *tun)
|
||||
|
@ -833,12 +825,6 @@ static int tun_flow_init(struct tun_struct *tun)
|
|||
{
|
||||
int i;
|
||||
|
||||
tun->flow_cache = kmem_cache_create("tun_flow_cache",
|
||||
sizeof(struct tun_flow_entry), 0, 0,
|
||||
NULL);
|
||||
if (!tun->flow_cache)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++)
|
||||
INIT_HLIST_HEAD(&tun->flows[i]);
|
||||
|
||||
|
@ -854,10 +840,6 @@ static void tun_flow_uninit(struct tun_struct *tun)
|
|||
{
|
||||
del_timer_sync(&tun->flow_gc_timer);
|
||||
tun_flow_flush(tun);
|
||||
|
||||
/* Wait for completion of call_rcu()'s */
|
||||
rcu_barrier();
|
||||
kmem_cache_destroy(tun->flow_cache);
|
||||
}
|
||||
|
||||
/* Initialize net device. */
|
||||
|
|
|
@ -505,7 +505,8 @@ static int vxlan_join_group(struct net_device *dev)
|
|||
struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
|
||||
struct sock *sk = vn->sock->sk;
|
||||
struct ip_mreqn mreq = {
|
||||
.imr_multiaddr.s_addr = vxlan->gaddr,
|
||||
.imr_multiaddr.s_addr = vxlan->gaddr,
|
||||
.imr_ifindex = vxlan->link,
|
||||
};
|
||||
int err;
|
||||
|
||||
|
@ -532,7 +533,8 @@ static int vxlan_leave_group(struct net_device *dev)
|
|||
int err = 0;
|
||||
struct sock *sk = vn->sock->sk;
|
||||
struct ip_mreqn mreq = {
|
||||
.imr_multiaddr.s_addr = vxlan->gaddr,
|
||||
.imr_multiaddr.s_addr = vxlan->gaddr,
|
||||
.imr_ifindex = vxlan->link,
|
||||
};
|
||||
|
||||
/* Only leave group when last vxlan is done. */
|
||||
|
|
|
@ -341,7 +341,7 @@ static struct rtl_hal_cfg rtl8723ae_hal_cfg = {
|
|||
.maps[RTL_RC_HT_RATEMCS15] = DESC92_RATEMCS15,
|
||||
};
|
||||
|
||||
static struct pci_device_id rtl8723ae_pci_ids[] __devinitdata = {
|
||||
static struct pci_device_id rtl8723ae_pci_ids[] = {
|
||||
{RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8723, rtl8723ae_hal_cfg)},
|
||||
{},
|
||||
};
|
||||
|
|
|
@ -1576,7 +1576,7 @@ extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
|
|||
|
||||
extern rwlock_t dev_base_lock; /* Device list lock */
|
||||
|
||||
extern seqlock_t devnet_rename_seq; /* Device rename lock */
|
||||
extern seqcount_t devnet_rename_seq; /* Device rename seq */
|
||||
|
||||
|
||||
#define for_each_netdev(net, d) \
|
||||
|
|
|
@ -367,7 +367,7 @@ struct sock {
|
|||
unsigned short sk_ack_backlog;
|
||||
unsigned short sk_max_ack_backlog;
|
||||
__u32 sk_priority;
|
||||
#ifdef CONFIG_CGROUPS
|
||||
#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
|
||||
__u32 sk_cgrp_prioidx;
|
||||
#endif
|
||||
struct pid *sk_peer_pid;
|
||||
|
|
|
@ -123,7 +123,7 @@ batadv_iv_ogm_emit_send_time(const struct batadv_priv *bat_priv)
|
|||
unsigned int msecs;
|
||||
|
||||
msecs = atomic_read(&bat_priv->orig_interval) - BATADV_JITTER;
|
||||
msecs += (random32() % 2 * BATADV_JITTER);
|
||||
msecs += random32() % (2 * BATADV_JITTER);
|
||||
|
||||
return jiffies + msecs_to_jiffies(msecs);
|
||||
}
|
||||
|
|
|
@ -366,11 +366,11 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
|
|||
|
||||
err = netdev_set_master(dev, br->dev);
|
||||
if (err)
|
||||
goto err3;
|
||||
goto err4;
|
||||
|
||||
err = netdev_rx_handler_register(dev, br_handle_frame, p);
|
||||
if (err)
|
||||
goto err4;
|
||||
goto err5;
|
||||
|
||||
dev->priv_flags |= IFF_BRIDGE_PORT;
|
||||
|
||||
|
@ -402,8 +402,10 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
|
|||
|
||||
return 0;
|
||||
|
||||
err4:
|
||||
err5:
|
||||
netdev_set_master(dev, NULL);
|
||||
err4:
|
||||
br_netpoll_disable(p);
|
||||
err3:
|
||||
sysfs_remove_link(br->ifobj, p->dev->name);
|
||||
err2:
|
||||
|
|
|
@ -203,7 +203,7 @@ static struct list_head offload_base __read_mostly;
|
|||
DEFINE_RWLOCK(dev_base_lock);
|
||||
EXPORT_SYMBOL(dev_base_lock);
|
||||
|
||||
DEFINE_SEQLOCK(devnet_rename_seq);
|
||||
seqcount_t devnet_rename_seq;
|
||||
|
||||
static inline void dev_base_seq_inc(struct net *net)
|
||||
{
|
||||
|
@ -1093,10 +1093,10 @@ int dev_change_name(struct net_device *dev, const char *newname)
|
|||
if (dev->flags & IFF_UP)
|
||||
return -EBUSY;
|
||||
|
||||
write_seqlock(&devnet_rename_seq);
|
||||
write_seqcount_begin(&devnet_rename_seq);
|
||||
|
||||
if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
|
||||
write_sequnlock(&devnet_rename_seq);
|
||||
write_seqcount_end(&devnet_rename_seq);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1104,7 +1104,7 @@ int dev_change_name(struct net_device *dev, const char *newname)
|
|||
|
||||
err = dev_get_valid_name(net, dev, newname);
|
||||
if (err < 0) {
|
||||
write_sequnlock(&devnet_rename_seq);
|
||||
write_seqcount_end(&devnet_rename_seq);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -1112,11 +1112,11 @@ rollback:
|
|||
ret = device_rename(&dev->dev, dev->name);
|
||||
if (ret) {
|
||||
memcpy(dev->name, oldname, IFNAMSIZ);
|
||||
write_sequnlock(&devnet_rename_seq);
|
||||
write_seqcount_end(&devnet_rename_seq);
|
||||
return ret;
|
||||
}
|
||||
|
||||
write_sequnlock(&devnet_rename_seq);
|
||||
write_seqcount_end(&devnet_rename_seq);
|
||||
|
||||
write_lock_bh(&dev_base_lock);
|
||||
hlist_del_rcu(&dev->name_hlist);
|
||||
|
@ -1135,7 +1135,7 @@ rollback:
|
|||
/* err >= 0 after dev_alloc_name() or stores the first errno */
|
||||
if (err >= 0) {
|
||||
err = ret;
|
||||
write_seqlock(&devnet_rename_seq);
|
||||
write_seqcount_begin(&devnet_rename_seq);
|
||||
memcpy(dev->name, oldname, IFNAMSIZ);
|
||||
goto rollback;
|
||||
} else {
|
||||
|
@ -4180,7 +4180,7 @@ static int dev_ifname(struct net *net, struct ifreq __user *arg)
|
|||
return -EFAULT;
|
||||
|
||||
retry:
|
||||
seq = read_seqbegin(&devnet_rename_seq);
|
||||
seq = read_seqcount_begin(&devnet_rename_seq);
|
||||
rcu_read_lock();
|
||||
dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex);
|
||||
if (!dev) {
|
||||
|
@ -4190,7 +4190,7 @@ retry:
|
|||
|
||||
strcpy(ifr.ifr_name, dev->name);
|
||||
rcu_read_unlock();
|
||||
if (read_seqretry(&devnet_rename_seq, seq))
|
||||
if (read_seqcount_retry(&devnet_rename_seq, seq))
|
||||
goto retry;
|
||||
|
||||
if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
|
||||
|
|
|
@ -1334,7 +1334,6 @@ struct kobj_ns_type_operations net_ns_type_operations = {
|
|||
};
|
||||
EXPORT_SYMBOL_GPL(net_ns_type_operations);
|
||||
|
||||
#ifdef CONFIG_HOTPLUG
|
||||
static int netdev_uevent(struct device *d, struct kobj_uevent_env *env)
|
||||
{
|
||||
struct net_device *dev = to_net_dev(d);
|
||||
|
@ -1353,7 +1352,6 @@ static int netdev_uevent(struct device *d, struct kobj_uevent_env *env)
|
|||
exit:
|
||||
return retval;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* netdev_release -- destroy and free a dead device.
|
||||
|
@ -1382,9 +1380,7 @@ static struct class net_class = {
|
|||
#ifdef CONFIG_SYSFS
|
||||
.dev_attrs = net_class_attributes,
|
||||
#endif /* CONFIG_SYSFS */
|
||||
#ifdef CONFIG_HOTPLUG
|
||||
.dev_uevent = netdev_uevent,
|
||||
#endif
|
||||
.ns_type = &net_ns_type_operations,
|
||||
.namespace = net_namespace,
|
||||
};
|
||||
|
|
|
@ -583,7 +583,7 @@ static int sock_getbindtodevice(struct sock *sk, char __user *optval,
|
|||
goto out;
|
||||
|
||||
retry:
|
||||
seq = read_seqbegin(&devnet_rename_seq);
|
||||
seq = read_seqcount_begin(&devnet_rename_seq);
|
||||
rcu_read_lock();
|
||||
dev = dev_get_by_index_rcu(net, sk->sk_bound_dev_if);
|
||||
ret = -ENODEV;
|
||||
|
@ -594,7 +594,7 @@ retry:
|
|||
|
||||
strcpy(devname, dev->name);
|
||||
rcu_read_unlock();
|
||||
if (read_seqretry(&devnet_rename_seq, seq))
|
||||
if (read_seqcount_retry(&devnet_rename_seq, seq))
|
||||
goto retry;
|
||||
|
||||
len = strlen(devname) + 1;
|
||||
|
|
|
@ -321,7 +321,7 @@ static void arp_error_report(struct neighbour *neigh, struct sk_buff *skb)
|
|||
static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
|
||||
{
|
||||
__be32 saddr = 0;
|
||||
u8 *dst_ha = NULL;
|
||||
u8 dst_ha[MAX_ADDR_LEN], *dst_hw = NULL;
|
||||
struct net_device *dev = neigh->dev;
|
||||
__be32 target = *(__be32 *)neigh->primary_key;
|
||||
int probes = atomic_read(&neigh->probes);
|
||||
|
@ -363,8 +363,8 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
|
|||
if (probes < 0) {
|
||||
if (!(neigh->nud_state & NUD_VALID))
|
||||
pr_debug("trying to ucast probe in NUD_INVALID\n");
|
||||
dst_ha = neigh->ha;
|
||||
read_lock_bh(&neigh->lock);
|
||||
neigh_ha_snapshot(dst_ha, neigh, dev);
|
||||
dst_hw = dst_ha;
|
||||
} else {
|
||||
probes -= neigh->parms->app_probes;
|
||||
if (probes < 0) {
|
||||
|
@ -376,9 +376,7 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
|
|||
}
|
||||
|
||||
arp_send(ARPOP_REQUEST, ETH_P_ARP, target, dev, saddr,
|
||||
dst_ha, dev->dev_addr, NULL);
|
||||
if (dst_ha)
|
||||
read_unlock_bh(&neigh->lock);
|
||||
dst_hw, dev->dev_addr, NULL);
|
||||
}
|
||||
|
||||
static int arp_ignore(struct in_device *in_dev, __be32 sip, __be32 tip)
|
||||
|
|
|
@ -750,6 +750,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
|
|||
int gre_hlen;
|
||||
__be32 dst;
|
||||
int mtu;
|
||||
u8 ttl;
|
||||
|
||||
if (skb->ip_summed == CHECKSUM_PARTIAL &&
|
||||
skb_checksum_help(skb))
|
||||
|
@ -760,7 +761,10 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
|
|||
|
||||
if (dev->header_ops && dev->type == ARPHRD_IPGRE) {
|
||||
gre_hlen = 0;
|
||||
tiph = (const struct iphdr *)skb->data;
|
||||
if (skb->protocol == htons(ETH_P_IP))
|
||||
tiph = (const struct iphdr *)skb->data;
|
||||
else
|
||||
tiph = &tunnel->parms.iph;
|
||||
} else {
|
||||
gre_hlen = tunnel->hlen;
|
||||
tiph = &tunnel->parms.iph;
|
||||
|
@ -812,6 +816,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
|
|||
goto tx_error;
|
||||
}
|
||||
|
||||
ttl = tiph->ttl;
|
||||
tos = tiph->tos;
|
||||
if (tos == 1) {
|
||||
tos = 0;
|
||||
|
@ -904,11 +909,12 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
|
|||
dev_kfree_skb(skb);
|
||||
skb = new_skb;
|
||||
old_iph = ip_hdr(skb);
|
||||
/* Warning : tiph value might point to freed memory */
|
||||
}
|
||||
|
||||
skb_reset_transport_header(skb);
|
||||
skb_push(skb, gre_hlen);
|
||||
skb_reset_network_header(skb);
|
||||
skb_set_transport_header(skb, sizeof(*iph));
|
||||
memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
|
||||
IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
|
||||
IPSKB_REROUTED);
|
||||
|
@ -927,8 +933,9 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
|
|||
iph->tos = ipgre_ecn_encapsulate(tos, old_iph, skb);
|
||||
iph->daddr = fl4.daddr;
|
||||
iph->saddr = fl4.saddr;
|
||||
iph->ttl = ttl;
|
||||
|
||||
if ((iph->ttl = tiph->ttl) == 0) {
|
||||
if (ttl == 0) {
|
||||
if (skb->protocol == htons(ETH_P_IP))
|
||||
iph->ttl = old_iph->ttl;
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
|
|
|
@ -5543,6 +5543,9 @@ slow_path:
|
|||
if (len < (th->doff << 2) || tcp_checksum_complete_user(sk, skb))
|
||||
goto csum_error;
|
||||
|
||||
if (!th->ack)
|
||||
goto discard;
|
||||
|
||||
/*
|
||||
* Standard slow path.
|
||||
*/
|
||||
|
@ -5551,7 +5554,7 @@ slow_path:
|
|||
return 0;
|
||||
|
||||
step5:
|
||||
if (th->ack && tcp_ack(sk, skb, FLAG_SLOWPATH) < 0)
|
||||
if (tcp_ack(sk, skb, FLAG_SLOWPATH) < 0)
|
||||
goto discard;
|
||||
|
||||
/* ts_recent update must be made after we are sure that the packet
|
||||
|
@ -5984,11 +5987,15 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
|
|||
if (tcp_check_req(sk, skb, req, NULL, true) == NULL)
|
||||
goto discard;
|
||||
}
|
||||
|
||||
if (!th->ack)
|
||||
goto discard;
|
||||
|
||||
if (!tcp_validate_incoming(sk, skb, th, 0))
|
||||
return 0;
|
||||
|
||||
/* step 5: check the ACK field */
|
||||
if (th->ack) {
|
||||
if (true) {
|
||||
int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH) > 0;
|
||||
|
||||
switch (sk->sk_state) {
|
||||
|
@ -6138,8 +6145,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
|
|||
}
|
||||
break;
|
||||
}
|
||||
} else
|
||||
goto discard;
|
||||
}
|
||||
|
||||
/* ts_recent update must be made after we are sure that the packet
|
||||
* is in window.
|
||||
|
|
|
@ -758,8 +758,6 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
|
|||
skb_dst_set_noref(skb, dst);
|
||||
}
|
||||
|
||||
skb->transport_header = skb->network_header;
|
||||
|
||||
proto = NEXTHDR_GRE;
|
||||
if (encap_limit >= 0) {
|
||||
init_tel_txopt(&opt, encap_limit);
|
||||
|
@ -768,6 +766,7 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
|
|||
|
||||
skb_push(skb, gre_hlen);
|
||||
skb_reset_network_header(skb);
|
||||
skb_set_transport_header(skb, sizeof(*ipv6h));
|
||||
|
||||
/*
|
||||
* Push down and install the IP header.
|
||||
|
|
|
@ -434,12 +434,11 @@ static u32 rds_ib_protocol_compatible(struct rdma_cm_event *event)
|
|||
version = RDS_PROTOCOL_3_0;
|
||||
while ((common >>= 1) != 0)
|
||||
version++;
|
||||
}
|
||||
printk_ratelimited(KERN_NOTICE "RDS: Connection from %pI4 using "
|
||||
"incompatible protocol version %u.%u\n",
|
||||
&dp->dp_saddr,
|
||||
dp->dp_protocol_major,
|
||||
dp->dp_protocol_minor);
|
||||
} else
|
||||
printk_ratelimited(KERN_NOTICE "RDS: Connection from %pI4 using incompatible protocol version %u.%u\n",
|
||||
&dp->dp_saddr,
|
||||
dp->dp_protocol_major,
|
||||
dp->dp_protocol_minor);
|
||||
return version;
|
||||
}
|
||||
|
||||
|
|
|
@ -339,8 +339,8 @@ static int rds_ib_recv_refill_one(struct rds_connection *conn,
|
|||
sge->length = sizeof(struct rds_header);
|
||||
|
||||
sge = &recv->r_sge[1];
|
||||
sge->addr = sg_dma_address(&recv->r_frag->f_sg);
|
||||
sge->length = sg_dma_len(&recv->r_frag->f_sg);
|
||||
sge->addr = ib_sg_dma_address(ic->i_cm_id->device, &recv->r_frag->f_sg);
|
||||
sge->length = ib_sg_dma_len(ic->i_cm_id->device, &recv->r_frag->f_sg);
|
||||
|
||||
ret = 0;
|
||||
out:
|
||||
|
@ -381,7 +381,10 @@ void rds_ib_recv_refill(struct rds_connection *conn, int prefill)
|
|||
ret = ib_post_recv(ic->i_cm_id->qp, &recv->r_wr, &failed_wr);
|
||||
rdsdebug("recv %p ibinc %p page %p addr %lu ret %d\n", recv,
|
||||
recv->r_ibinc, sg_page(&recv->r_frag->f_sg),
|
||||
(long) sg_dma_address(&recv->r_frag->f_sg), ret);
|
||||
(long) ib_sg_dma_address(
|
||||
ic->i_cm_id->device,
|
||||
&recv->r_frag->f_sg),
|
||||
ret);
|
||||
if (ret) {
|
||||
rds_ib_conn_error(conn, "recv post on "
|
||||
"%pI4 returned %d, disconnecting and "
|
||||
|
|
|
@ -919,7 +919,7 @@ ok:
|
|||
q->now = ktime_to_ns(ktime_get());
|
||||
start_at = jiffies;
|
||||
|
||||
next_event = q->now + 5 * NSEC_PER_SEC;
|
||||
next_event = q->now + 5LLU * NSEC_PER_SEC;
|
||||
|
||||
for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
|
||||
/* common case optimization - skip event handler quickly */
|
||||
|
|
|
@ -2365,7 +2365,6 @@ int set_regdom(const struct ieee80211_regdomain *rd)
|
|||
return r;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HOTPLUG
|
||||
int reg_device_uevent(struct device *dev, struct kobj_uevent_env *env)
|
||||
{
|
||||
if (last_request && !last_request->processed) {
|
||||
|
@ -2377,12 +2376,6 @@ int reg_device_uevent(struct device *dev, struct kobj_uevent_env *env)
|
|||
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
int reg_device_uevent(struct device *dev, struct kobj_uevent_env *env)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
#endif /* CONFIG_HOTPLUG */
|
||||
|
||||
void wiphy_regulatory_register(struct wiphy *wiphy)
|
||||
{
|
||||
|
|
|
@ -77,13 +77,11 @@ static void wiphy_dev_release(struct device *dev)
|
|||
cfg80211_dev_free(rdev);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HOTPLUG
|
||||
static int wiphy_uevent(struct device *dev, struct kobj_uevent_env *env)
|
||||
{
|
||||
/* TODO, we probably need stuff here */
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int wiphy_suspend(struct device *dev, pm_message_t state)
|
||||
{
|
||||
|
@ -134,9 +132,7 @@ struct class ieee80211_class = {
|
|||
.owner = THIS_MODULE,
|
||||
.dev_release = wiphy_dev_release,
|
||||
.dev_attrs = ieee80211_dev_attrs,
|
||||
#ifdef CONFIG_HOTPLUG
|
||||
.dev_uevent = wiphy_uevent,
|
||||
#endif
|
||||
.suspend = wiphy_suspend,
|
||||
.resume = wiphy_resume,
|
||||
.ns_type = &net_ns_type_operations,
|
||||
|
|
Loading…
Reference in New Issue