2019-05-27 14:55:01 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* INET An implementation of the TCP/IP protocol suite for the LINUX
|
|
|
|
* operating system. INET is implemented using the BSD Socket
|
|
|
|
* interface as the means of communication with the user level.
|
|
|
|
*
|
|
|
|
* Pseudo-driver for the loopback interface.
|
|
|
|
*
|
|
|
|
* Version: @(#)loopback.c 1.0.4b 08/16/93
|
|
|
|
*
|
2005-05-06 07:16:16 +08:00
|
|
|
* Authors: Ross Biro
|
2005-04-17 06:20:36 +08:00
|
|
|
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
|
|
|
|
* Donald Becker, <becker@scyld.com>
|
|
|
|
*
|
|
|
|
* Alan Cox : Fixed oddments for NET3.014
|
|
|
|
* Alan Cox : Rejig for NET3.029 snap #3
|
2017-03-12 04:06:01 +08:00
|
|
|
* Alan Cox : Fixed NET3.029 bugs and sped up
|
2005-04-17 06:20:36 +08:00
|
|
|
* Larry McVoy : Tiny tweak to double performance
|
|
|
|
* Alan Cox : Backed out LMV's tweak - the linux mm
|
|
|
|
* can't take it...
|
|
|
|
* Michael Griffith: Don't bother computing the checksums
|
|
|
|
* on packets received on the loopback
|
|
|
|
* interface.
|
|
|
|
* Alexey Kuznetsov: Potential hang under some extreme
|
|
|
|
* cases removed.
|
|
|
|
*/
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/jiffies.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/fs.h>
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/string.h>
|
|
|
|
#include <linux/socket.h>
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/fcntl.h>
|
|
|
|
#include <linux/in.h>
|
|
|
|
|
2016-12-25 03:46:01 +08:00
|
|
|
#include <linux/uaccess.h>
|
2017-03-12 04:06:01 +08:00
|
|
|
#include <linux/io.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
#include <linux/inet.h>
|
|
|
|
#include <linux/netdevice.h>
|
|
|
|
#include <linux/etherdevice.h>
|
|
|
|
#include <linux/skbuff.h>
|
|
|
|
#include <linux/ethtool.h>
|
|
|
|
#include <net/sock.h>
|
|
|
|
#include <net/checksum.h>
|
|
|
|
#include <linux/if_ether.h> /* For the statistics structure. */
|
|
|
|
#include <linux/if_arp.h> /* For ARPHRD_ETHER */
|
|
|
|
#include <linux/ip.h>
|
|
|
|
#include <linux/tcp.h>
|
|
|
|
#include <linux/percpu.h>
|
2017-03-12 04:06:54 +08:00
|
|
|
#include <linux/net_tstamp.h>
|
2007-09-27 13:10:56 +08:00
|
|
|
#include <net/net_namespace.h>
|
2010-06-22 20:44:11 +08:00
|
|
|
#include <linux/u64_stats_sync.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2019-07-02 05:38:49 +08:00
|
|
|
/* blackhole_netdev - a device used for dsts that are marked expired!
|
|
|
|
* This is global device (instead of per-net-ns) since it's not needed
|
|
|
|
* to be per-ns and gets initialized at boot time.
|
|
|
|
*/
|
|
|
|
struct net_device *blackhole_netdev;
|
|
|
|
EXPORT_SYMBOL(blackhole_netdev);
|
|
|
|
|
2017-03-12 04:06:01 +08:00
|
|
|
/* The higher levels take care of making this non-reentrant (it's
|
2005-04-17 06:20:36 +08:00
|
|
|
* called with bh's disabled).
|
|
|
|
*/
|
2009-09-01 03:50:58 +08:00
|
|
|
static netdev_tx_t loopback_xmit(struct sk_buff *skb,
|
|
|
|
struct net_device *dev)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2010-02-16 23:21:08 +08:00
|
|
|
struct pcpu_lstats *lb_stats;
|
2009-04-18 06:03:10 +08:00
|
|
|
int len;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2017-03-12 04:06:54 +08:00
|
|
|
skb_tx_timestamp(skb);
|
2018-10-20 10:11:26 +08:00
|
|
|
|
|
|
|
/* do not fool net_timestamp_check() with various clock bases */
|
|
|
|
skb->tstamp = 0;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
skb_orphan(skb);
|
|
|
|
|
2013-01-25 15:44:41 +08:00
|
|
|
/* Before queueing this packet to netif_rx(),
|
|
|
|
* make sure dst is refcounted.
|
|
|
|
*/
|
|
|
|
skb_dst_force(skb);
|
|
|
|
|
2009-04-18 06:03:10 +08:00
|
|
|
skb->protocol = eth_type_trans(skb, dev);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-09-28 08:09:39 +08:00
|
|
|
/* it's OK to use per_cpu_ptr() because BHs are off */
|
2010-09-24 07:51:51 +08:00
|
|
|
lb_stats = this_cpu_ptr(dev->lstats);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2009-04-18 06:03:10 +08:00
|
|
|
len = skb->len;
|
|
|
|
if (likely(netif_rx(skb) == NET_RX_SUCCESS)) {
|
2010-06-22 20:44:11 +08:00
|
|
|
u64_stats_update_begin(&lb_stats->syncp);
|
2009-04-18 06:03:10 +08:00
|
|
|
lb_stats->bytes += len;
|
|
|
|
lb_stats->packets++;
|
2010-06-22 20:44:11 +08:00
|
|
|
u64_stats_update_end(&lb_stats->syncp);
|
2010-10-01 05:06:55 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2009-06-23 14:03:08 +08:00
|
|
|
return NETDEV_TX_OK;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2017-01-07 11:12:52 +08:00
|
|
|
static void loopback_get_stats64(struct net_device *dev,
|
|
|
|
struct rtnl_link_stats64 *stats)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2010-06-14 13:59:22 +08:00
|
|
|
u64 bytes = 0;
|
|
|
|
u64 packets = 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
int i;
|
|
|
|
|
2006-03-28 17:56:37 +08:00
|
|
|
for_each_possible_cpu(i) {
|
2006-10-19 11:51:57 +08:00
|
|
|
const struct pcpu_lstats *lb_stats;
|
2010-06-22 20:44:11 +08:00
|
|
|
u64 tbytes, tpackets;
|
|
|
|
unsigned int start;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2010-09-24 07:51:51 +08:00
|
|
|
lb_stats = per_cpu_ptr(dev->lstats, i);
|
2010-06-22 20:44:11 +08:00
|
|
|
do {
|
2014-03-14 12:26:42 +08:00
|
|
|
start = u64_stats_fetch_begin_irq(&lb_stats->syncp);
|
2010-06-22 20:44:11 +08:00
|
|
|
tbytes = lb_stats->bytes;
|
|
|
|
tpackets = lb_stats->packets;
|
2014-03-14 12:26:42 +08:00
|
|
|
} while (u64_stats_fetch_retry_irq(&lb_stats->syncp, start));
|
2010-06-22 20:44:11 +08:00
|
|
|
bytes += tbytes;
|
|
|
|
packets += tpackets;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2006-10-19 11:51:57 +08:00
|
|
|
stats->rx_packets = packets;
|
|
|
|
stats->tx_packets = packets;
|
2009-04-18 06:03:10 +08:00
|
|
|
stats->rx_bytes = bytes;
|
|
|
|
stats->tx_bytes = bytes;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2006-09-28 11:33:34 +08:00
|
|
|
static u32 always_on(struct net_device *dev)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2006-09-14 02:30:00 +08:00
|
|
|
static const struct ethtool_ops loopback_ethtool_ops = {
|
2006-09-28 11:33:34 +08:00
|
|
|
.get_link = always_on,
|
2019-04-12 19:06:14 +08:00
|
|
|
.get_ts_info = ethtool_op_get_ts_info,
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
|
2007-09-27 13:08:12 +08:00
|
|
|
static int loopback_dev_init(struct net_device *dev)
|
|
|
|
{
|
2014-02-14 03:46:28 +08:00
|
|
|
dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
|
2010-09-24 07:51:51 +08:00
|
|
|
if (!dev->lstats)
|
2007-09-27 13:08:12 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void loopback_dev_free(struct net_device *dev)
|
|
|
|
{
|
2013-09-17 07:52:41 +08:00
|
|
|
dev_net(dev)->loopback_dev = NULL;
|
2010-09-24 07:51:51 +08:00
|
|
|
free_percpu(dev->lstats);
|
2007-09-27 13:08:12 +08:00
|
|
|
}
|
|
|
|
|
2008-11-20 13:46:18 +08:00
|
|
|
static const struct net_device_ops loopback_ops = {
|
2017-03-12 04:06:01 +08:00
|
|
|
.ndo_init = loopback_dev_init,
|
|
|
|
.ndo_start_xmit = loopback_xmit,
|
2010-06-14 13:59:22 +08:00
|
|
|
.ndo_get_stats64 = loopback_get_stats64,
|
2014-02-12 09:21:26 +08:00
|
|
|
.ndo_set_mac_address = eth_mac_addr,
|
2008-11-20 13:46:18 +08:00
|
|
|
};
|
|
|
|
|
2019-07-02 05:38:49 +08:00
|
|
|
static void gen_lo_setup(struct net_device *dev,
|
|
|
|
unsigned int mtu,
|
|
|
|
const struct ethtool_ops *eth_ops,
|
|
|
|
const struct header_ops *hdr_ops,
|
|
|
|
const struct net_device_ops *dev_ops,
|
|
|
|
void (*dev_destructor)(struct net_device *dev))
|
2007-09-26 10:18:04 +08:00
|
|
|
{
|
2019-07-02 05:38:49 +08:00
|
|
|
dev->mtu = mtu;
|
2007-09-26 10:18:04 +08:00
|
|
|
dev->hard_header_len = ETH_HLEN; /* 14 */
|
2017-02-08 04:57:20 +08:00
|
|
|
dev->min_header_len = ETH_HLEN; /* 14 */
|
2007-09-26 10:18:04 +08:00
|
|
|
dev->addr_len = ETH_ALEN; /* 6 */
|
|
|
|
dev->type = ARPHRD_LOOPBACK; /* 0x0001*/
|
|
|
|
dev->flags = IFF_LOOPBACK;
|
2015-08-18 16:30:32 +08:00
|
|
|
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
|
2014-10-06 09:38:35 +08:00
|
|
|
netif_keep_dst(dev);
|
2016-06-03 02:05:38 +08:00
|
|
|
dev->hw_features = NETIF_F_GSO_SOFTWARE;
|
2017-03-12 04:06:01 +08:00
|
|
|
dev->features = NETIF_F_SG | NETIF_F_FRAGLIST
|
2016-06-03 02:05:38 +08:00
|
|
|
| NETIF_F_GSO_SOFTWARE
|
2011-11-15 23:29:55 +08:00
|
|
|
| NETIF_F_HW_CSUM
|
2011-02-16 00:59:18 +08:00
|
|
|
| NETIF_F_RXCSUM
|
2015-12-15 03:19:41 +08:00
|
|
|
| NETIF_F_SCTP_CRC
|
2007-09-26 10:18:04 +08:00
|
|
|
| NETIF_F_HIGHDMA
|
|
|
|
| NETIF_F_LLTX
|
2011-04-14 14:07:04 +08:00
|
|
|
| NETIF_F_NETNS_LOCAL
|
2011-05-04 23:30:11 +08:00
|
|
|
| NETIF_F_VLAN_CHALLENGED
|
|
|
|
| NETIF_F_LOOPBACK;
|
2019-07-02 05:38:49 +08:00
|
|
|
dev->ethtool_ops = eth_ops;
|
|
|
|
dev->header_ops = hdr_ops;
|
|
|
|
dev->netdev_ops = dev_ops;
|
net: Fix inconsistent teardown and release of private netdev state.
Network devices can allocate reasources and private memory using
netdev_ops->ndo_init(). However, the release of these resources
can occur in one of two different places.
Either netdev_ops->ndo_uninit() or netdev->destructor().
The decision of which operation frees the resources depends upon
whether it is necessary for all netdev refs to be released before it
is safe to perform the freeing.
netdev_ops->ndo_uninit() presumably can occur right after the
NETDEV_UNREGISTER notifier completes and the unicast and multicast
address lists are flushed.
netdev->destructor(), on the other hand, does not run until the
netdev references all go away.
Further complicating the situation is that netdev->destructor()
almost universally does also a free_netdev().
This creates a problem for the logic in register_netdevice().
Because all callers of register_netdevice() manage the freeing
of the netdev, and invoke free_netdev(dev) if register_netdevice()
fails.
If netdev_ops->ndo_init() succeeds, but something else fails inside
of register_netdevice(), it does call ndo_ops->ndo_uninit(). But
it is not able to invoke netdev->destructor().
This is because netdev->destructor() will do a free_netdev() and
then the caller of register_netdevice() will do the same.
However, this means that the resources that would normally be released
by netdev->destructor() will not be.
Over the years drivers have added local hacks to deal with this, by
invoking their destructor parts by hand when register_netdevice()
fails.
Many drivers do not try to deal with this, and instead we have leaks.
Let's close this hole by formalizing the distinction between what
private things need to be freed up by netdev->destructor() and whether
the driver needs unregister_netdevice() to perform the free_netdev().
netdev->priv_destructor() performs all actions to free up the private
resources that used to be freed by netdev->destructor(), except for
free_netdev().
netdev->needs_free_netdev is a boolean that indicates whether
free_netdev() should be done at the end of unregister_netdevice().
Now, register_netdevice() can sanely release all resources after
ndo_ops->ndo_init() succeeds, by invoking both ndo_ops->ndo_uninit()
and netdev->priv_destructor().
And at the end of unregister_netdevice(), we invoke
netdev->priv_destructor() and optionally call free_netdev().
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-05-09 00:52:56 +08:00
|
|
|
dev->needs_free_netdev = true;
|
2019-07-02 05:38:49 +08:00
|
|
|
dev->priv_destructor = dev_destructor;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* The loopback device is special. There is only one instance
|
|
|
|
* per network namespace.
|
|
|
|
*/
|
|
|
|
static void loopback_setup(struct net_device *dev)
|
|
|
|
{
|
|
|
|
gen_lo_setup(dev, (64 * 1024), &loopback_ethtool_ops, ð_header_ops,
|
|
|
|
&loopback_ops, loopback_dev_free);
|
2007-09-26 10:18:04 +08:00
|
|
|
}
|
2007-09-26 10:16:28 +08:00
|
|
|
|
2005-08-19 05:05:18 +08:00
|
|
|
/* Setup and register the loopback device. */
|
2007-10-09 11:38:39 +08:00
|
|
|
static __net_init int loopback_net_init(struct net *net)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2007-09-26 10:18:04 +08:00
|
|
|
struct net_device *dev;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = -ENOMEM;
|
2024-06-12 13:13:20 +08:00
|
|
|
dev = alloc_netdev(0, "lo", NET_NAME_PREDICTABLE, loopback_setup);
|
2007-09-26 10:18:04 +08:00
|
|
|
if (!dev)
|
|
|
|
goto out;
|
2007-07-31 07:37:19 +08:00
|
|
|
|
2008-03-25 20:47:49 +08:00
|
|
|
dev_net_set(dev, net);
|
2007-09-26 10:18:04 +08:00
|
|
|
err = register_netdev(dev);
|
2007-07-31 07:37:19 +08:00
|
|
|
if (err)
|
2007-09-26 10:18:04 +08:00
|
|
|
goto out_free_netdev;
|
2007-07-31 07:37:19 +08:00
|
|
|
|
2012-08-09 05:53:36 +08:00
|
|
|
BUG_ON(dev->ifindex != LOOPBACK_IFINDEX);
|
2007-09-27 13:10:56 +08:00
|
|
|
net->loopback_dev = dev;
|
2007-10-16 03:55:33 +08:00
|
|
|
return 0;
|
2007-09-26 10:18:04 +08:00
|
|
|
|
|
|
|
out_free_netdev:
|
|
|
|
free_netdev(dev);
|
2007-10-16 03:55:33 +08:00
|
|
|
out:
|
2009-11-26 07:14:13 +08:00
|
|
|
if (net_eq(net, &init_net))
|
2007-10-16 03:55:33 +08:00
|
|
|
panic("loopback: Failed to register netdevice: %d\n", err);
|
|
|
|
return err;
|
2007-09-26 10:18:04 +08:00
|
|
|
}
|
|
|
|
|
2008-11-08 14:54:20 +08:00
|
|
|
/* Registered in net/core/dev.c */
|
|
|
|
struct pernet_operations __net_initdata loopback_net_ops = {
|
2017-03-12 04:06:01 +08:00
|
|
|
.init = loopback_net_init,
|
2007-09-27 13:10:56 +08:00
|
|
|
};
|
2019-07-02 05:38:49 +08:00
|
|
|
|
|
|
|
/* blackhole netdevice */
|
|
|
|
static netdev_tx_t blackhole_netdev_xmit(struct sk_buff *skb,
|
|
|
|
struct net_device *dev)
|
|
|
|
{
|
|
|
|
kfree_skb(skb);
|
|
|
|
net_warn_ratelimited("%s(): Dropping skb.\n", __func__);
|
|
|
|
return NETDEV_TX_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct net_device_ops blackhole_netdev_ops = {
|
|
|
|
.ndo_start_xmit = blackhole_netdev_xmit,
|
|
|
|
};
|
|
|
|
|
|
|
|
/* This is a dst-dummy device used specifically for invalidated
|
|
|
|
* DSTs and unlike loopback, this is not per-ns.
|
|
|
|
*/
|
|
|
|
static void blackhole_netdev_setup(struct net_device *dev)
|
|
|
|
{
|
|
|
|
gen_lo_setup(dev, ETH_MIN_MTU, NULL, NULL, &blackhole_netdev_ops, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Setup and register the blackhole_netdev. */
|
|
|
|
static int __init blackhole_netdev_init(void)
|
|
|
|
{
|
|
|
|
blackhole_netdev = alloc_netdev(0, "blackhole_dev", NET_NAME_UNKNOWN,
|
|
|
|
blackhole_netdev_setup);
|
|
|
|
if (!blackhole_netdev)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2019-07-03 14:16:31 +08:00
|
|
|
rtnl_lock();
|
2019-07-02 05:38:49 +08:00
|
|
|
dev_init_scheduler(blackhole_netdev);
|
|
|
|
dev_activate(blackhole_netdev);
|
2019-07-03 14:16:31 +08:00
|
|
|
rtnl_unlock();
|
2019-07-02 05:38:49 +08:00
|
|
|
|
|
|
|
blackhole_netdev->flags |= IFF_UP | IFF_RUNNING;
|
|
|
|
dev_net_set(blackhole_netdev, &init_net);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
device_initcall(blackhole_netdev_init);
|