2019-05-30 07:57:58 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2017-04-25 14:19:44 +08:00
|
|
|
/*
|
|
|
|
* vxcan.c - Virtual CAN Tunnel for cross namespace communication
|
|
|
|
*
|
|
|
|
* This code is derived from drivers/net/can/vcan.c for the virtual CAN
|
|
|
|
* specific parts and from drivers/net/veth.c to implement the netlink API
|
|
|
|
* for network interface pairs in a common and established way.
|
|
|
|
*
|
|
|
|
* Copyright (c) 2017 Oliver Hartkopp <socketcan@hartkopp.net>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/netdevice.h>
|
|
|
|
#include <linux/if_arp.h>
|
|
|
|
#include <linux/if_ether.h>
|
|
|
|
#include <linux/can.h>
|
|
|
|
#include <linux/can/dev.h>
|
|
|
|
#include <linux/can/skb.h>
|
|
|
|
#include <linux/can/vxcan.h>
|
2018-10-08 15:02:38 +08:00
|
|
|
#include <linux/can/can-ml.h>
|
2017-04-25 14:19:44 +08:00
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <net/rtnetlink.h>
|
|
|
|
|
|
|
|
#define DRV_NAME "vxcan"
|
|
|
|
|
|
|
|
MODULE_DESCRIPTION("Virtual CAN Tunnel");
|
|
|
|
MODULE_LICENSE("GPL");
|
|
|
|
MODULE_AUTHOR("Oliver Hartkopp <socketcan@hartkopp.net>");
|
|
|
|
MODULE_ALIAS_RTNL_LINK(DRV_NAME);
|
|
|
|
|
|
|
|
struct vxcan_priv {
|
|
|
|
struct net_device __rcu *peer;
|
|
|
|
};
|
|
|
|
|
|
|
|
static netdev_tx_t vxcan_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct vxcan_priv *priv = netdev_priv(dev);
|
|
|
|
struct net_device *peer;
|
|
|
|
struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
|
|
|
|
struct net_device_stats *peerstats, *srcstats = &dev->stats;
|
2021-01-20 19:41:36 +08:00
|
|
|
u8 len;
|
2017-04-25 14:19:44 +08:00
|
|
|
|
|
|
|
if (can_dropped_invalid_skb(dev, skb))
|
|
|
|
return NETDEV_TX_OK;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
peer = rcu_dereference(priv->peer);
|
|
|
|
if (unlikely(!peer)) {
|
|
|
|
kfree_skb(skb);
|
|
|
|
dev->stats.tx_dropped++;
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
skb = can_create_echo_skb(skb);
|
|
|
|
if (!skb)
|
|
|
|
goto out_unlock;
|
|
|
|
|
|
|
|
/* reset CAN GW hop counter */
|
|
|
|
skb->csum_start = 0;
|
|
|
|
skb->pkt_type = PACKET_BROADCAST;
|
|
|
|
skb->dev = peer;
|
|
|
|
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
|
|
|
|
2021-01-20 19:41:36 +08:00
|
|
|
len = cfd->len;
|
2017-04-25 14:19:44 +08:00
|
|
|
if (netif_rx_ni(skb) == NET_RX_SUCCESS) {
|
|
|
|
srcstats->tx_packets++;
|
2021-01-20 19:41:36 +08:00
|
|
|
srcstats->tx_bytes += len;
|
2017-04-25 14:19:44 +08:00
|
|
|
peerstats = &peer->stats;
|
|
|
|
peerstats->rx_packets++;
|
2021-01-20 19:41:36 +08:00
|
|
|
peerstats->rx_bytes += len;
|
2017-04-25 14:19:44 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
out_unlock:
|
|
|
|
rcu_read_unlock();
|
|
|
|
return NETDEV_TX_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int vxcan_open(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct vxcan_priv *priv = netdev_priv(dev);
|
|
|
|
struct net_device *peer = rtnl_dereference(priv->peer);
|
|
|
|
|
|
|
|
if (!peer)
|
|
|
|
return -ENOTCONN;
|
|
|
|
|
|
|
|
if (peer->flags & IFF_UP) {
|
|
|
|
netif_carrier_on(dev);
|
|
|
|
netif_carrier_on(peer);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int vxcan_close(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct vxcan_priv *priv = netdev_priv(dev);
|
|
|
|
struct net_device *peer = rtnl_dereference(priv->peer);
|
|
|
|
|
|
|
|
netif_carrier_off(dev);
|
|
|
|
if (peer)
|
|
|
|
netif_carrier_off(peer);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int vxcan_get_iflink(const struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct vxcan_priv *priv = netdev_priv(dev);
|
|
|
|
struct net_device *peer;
|
|
|
|
int iflink;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
peer = rcu_dereference(priv->peer);
|
|
|
|
iflink = peer ? peer->ifindex : 0;
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
return iflink;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int vxcan_change_mtu(struct net_device *dev, int new_mtu)
|
|
|
|
{
|
|
|
|
/* Do not allow changing the MTU while running */
|
|
|
|
if (dev->flags & IFF_UP)
|
|
|
|
return -EBUSY;
|
|
|
|
|
|
|
|
if (new_mtu != CAN_MTU && new_mtu != CANFD_MTU)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
dev->mtu = new_mtu;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct net_device_ops vxcan_netdev_ops = {
|
|
|
|
.ndo_open = vxcan_open,
|
|
|
|
.ndo_stop = vxcan_close,
|
|
|
|
.ndo_start_xmit = vxcan_xmit,
|
|
|
|
.ndo_get_iflink = vxcan_get_iflink,
|
|
|
|
.ndo_change_mtu = vxcan_change_mtu,
|
|
|
|
};
|
|
|
|
|
|
|
|
static void vxcan_setup(struct net_device *dev)
|
|
|
|
{
|
2021-02-23 15:01:26 +08:00
|
|
|
struct can_ml_priv *can_ml;
|
|
|
|
|
2017-04-25 14:19:44 +08:00
|
|
|
dev->type = ARPHRD_CAN;
|
2017-06-03 01:37:30 +08:00
|
|
|
dev->mtu = CANFD_MTU;
|
2017-04-25 14:19:44 +08:00
|
|
|
dev->hard_header_len = 0;
|
|
|
|
dev->addr_len = 0;
|
|
|
|
dev->tx_queue_len = 0;
|
|
|
|
dev->flags = (IFF_NOARP|IFF_ECHO);
|
|
|
|
dev->netdev_ops = &vxcan_netdev_ops;
|
net: Fix inconsistent teardown and release of private netdev state.
Network devices can allocate reasources and private memory using
netdev_ops->ndo_init(). However, the release of these resources
can occur in one of two different places.
Either netdev_ops->ndo_uninit() or netdev->destructor().
The decision of which operation frees the resources depends upon
whether it is necessary for all netdev refs to be released before it
is safe to perform the freeing.
netdev_ops->ndo_uninit() presumably can occur right after the
NETDEV_UNREGISTER notifier completes and the unicast and multicast
address lists are flushed.
netdev->destructor(), on the other hand, does not run until the
netdev references all go away.
Further complicating the situation is that netdev->destructor()
almost universally does also a free_netdev().
This creates a problem for the logic in register_netdevice().
Because all callers of register_netdevice() manage the freeing
of the netdev, and invoke free_netdev(dev) if register_netdevice()
fails.
If netdev_ops->ndo_init() succeeds, but something else fails inside
of register_netdevice(), it does call ndo_ops->ndo_uninit(). But
it is not able to invoke netdev->destructor().
This is because netdev->destructor() will do a free_netdev() and
then the caller of register_netdevice() will do the same.
However, this means that the resources that would normally be released
by netdev->destructor() will not be.
Over the years drivers have added local hacks to deal with this, by
invoking their destructor parts by hand when register_netdevice()
fails.
Many drivers do not try to deal with this, and instead we have leaks.
Let's close this hole by formalizing the distinction between what
private things need to be freed up by netdev->destructor() and whether
the driver needs unregister_netdevice() to perform the free_netdev().
netdev->priv_destructor() performs all actions to free up the private
resources that used to be freed by netdev->destructor(), except for
free_netdev().
netdev->needs_free_netdev is a boolean that indicates whether
free_netdev() should be done at the end of unregister_netdevice().
Now, register_netdevice() can sanely release all resources after
ndo_ops->ndo_init() succeeds, by invoking both ndo_ops->ndo_uninit()
and netdev->priv_destructor().
And at the end of unregister_netdevice(), we invoke
netdev->priv_destructor() and optionally call free_netdev().
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-05-09 00:52:56 +08:00
|
|
|
dev->needs_free_netdev = true;
|
2021-02-23 15:01:26 +08:00
|
|
|
|
|
|
|
can_ml = netdev_priv(dev) + ALIGN(sizeof(struct vxcan_priv), NETDEV_ALIGN);
|
|
|
|
can_set_ml_priv(dev, can_ml);
|
2017-04-25 14:19:44 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* forward declaration for rtnl_create_link() */
|
|
|
|
static struct rtnl_link_ops vxcan_link_ops;
|
|
|
|
|
|
|
|
static int vxcan_newlink(struct net *net, struct net_device *dev,
|
2017-06-26 05:55:59 +08:00
|
|
|
struct nlattr *tb[], struct nlattr *data[],
|
|
|
|
struct netlink_ext_ack *extack)
|
2017-04-25 14:19:44 +08:00
|
|
|
{
|
|
|
|
struct vxcan_priv *priv;
|
|
|
|
struct net_device *peer;
|
|
|
|
struct net *peer_net;
|
|
|
|
|
|
|
|
struct nlattr *peer_tb[IFLA_MAX + 1], **tbp = tb;
|
|
|
|
char ifname[IFNAMSIZ];
|
|
|
|
unsigned char name_assign_type;
|
|
|
|
struct ifinfomsg *ifmp = NULL;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
/* register peer device */
|
|
|
|
if (data && data[VXCAN_INFO_PEER]) {
|
|
|
|
struct nlattr *nla_peer;
|
|
|
|
|
|
|
|
nla_peer = data[VXCAN_INFO_PEER];
|
|
|
|
ifmp = nla_data(nla_peer);
|
|
|
|
err = rtnl_nla_parse_ifla(peer_tb,
|
|
|
|
nla_data(nla_peer) +
|
|
|
|
sizeof(struct ifinfomsg),
|
|
|
|
nla_len(nla_peer) -
|
|
|
|
sizeof(struct ifinfomsg),
|
|
|
|
NULL);
|
|
|
|
if (err < 0)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
tbp = peer_tb;
|
|
|
|
}
|
|
|
|
|
2017-12-03 01:48:52 +08:00
|
|
|
if (ifmp && tbp[IFLA_IFNAME]) {
|
2020-11-16 01:08:06 +08:00
|
|
|
nla_strscpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ);
|
2017-04-25 14:19:44 +08:00
|
|
|
name_assign_type = NET_NAME_USER;
|
|
|
|
} else {
|
|
|
|
snprintf(ifname, IFNAMSIZ, DRV_NAME "%%d");
|
|
|
|
name_assign_type = NET_NAME_ENUM;
|
|
|
|
}
|
|
|
|
|
|
|
|
peer_net = rtnl_link_get_net(net, tbp);
|
|
|
|
if (IS_ERR(peer_net))
|
|
|
|
return PTR_ERR(peer_net);
|
|
|
|
|
|
|
|
peer = rtnl_create_link(peer_net, ifname, name_assign_type,
|
2018-11-07 04:51:14 +08:00
|
|
|
&vxcan_link_ops, tbp, extack);
|
2017-04-25 14:19:44 +08:00
|
|
|
if (IS_ERR(peer)) {
|
|
|
|
put_net(peer_net);
|
|
|
|
return PTR_ERR(peer);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ifmp && dev->ifindex)
|
|
|
|
peer->ifindex = ifmp->ifi_index;
|
|
|
|
|
|
|
|
err = register_netdevice(peer);
|
|
|
|
put_net(peer_net);
|
|
|
|
peer_net = NULL;
|
|
|
|
if (err < 0) {
|
|
|
|
free_netdev(peer);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
netif_carrier_off(peer);
|
|
|
|
|
|
|
|
err = rtnl_configure_link(peer, ifmp);
|
2017-11-01 21:56:15 +08:00
|
|
|
if (err < 0)
|
|
|
|
goto unregister_network_device;
|
2017-04-25 14:19:44 +08:00
|
|
|
|
|
|
|
/* register first device */
|
|
|
|
if (tb[IFLA_IFNAME])
|
2020-11-16 01:08:06 +08:00
|
|
|
nla_strscpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
|
2017-04-25 14:19:44 +08:00
|
|
|
else
|
|
|
|
snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d");
|
|
|
|
|
|
|
|
err = register_netdevice(dev);
|
2017-11-01 21:56:15 +08:00
|
|
|
if (err < 0)
|
|
|
|
goto unregister_network_device;
|
2017-04-25 14:19:44 +08:00
|
|
|
|
|
|
|
netif_carrier_off(dev);
|
|
|
|
|
|
|
|
/* cross link the device pair */
|
|
|
|
priv = netdev_priv(dev);
|
|
|
|
rcu_assign_pointer(priv->peer, peer);
|
|
|
|
|
|
|
|
priv = netdev_priv(peer);
|
|
|
|
rcu_assign_pointer(priv->peer, dev);
|
|
|
|
|
|
|
|
return 0;
|
2017-11-01 21:56:15 +08:00
|
|
|
|
|
|
|
unregister_network_device:
|
|
|
|
unregister_netdevice(peer);
|
|
|
|
return err;
|
2017-04-25 14:19:44 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void vxcan_dellink(struct net_device *dev, struct list_head *head)
|
|
|
|
{
|
|
|
|
struct vxcan_priv *priv;
|
|
|
|
struct net_device *peer;
|
|
|
|
|
|
|
|
priv = netdev_priv(dev);
|
|
|
|
peer = rtnl_dereference(priv->peer);
|
|
|
|
|
|
|
|
/* Note : dellink() is called from default_device_exit_batch(),
|
|
|
|
* before a rcu_synchronize() point. The devices are guaranteed
|
|
|
|
* not being freed before one RCU grace period.
|
|
|
|
*/
|
|
|
|
RCU_INIT_POINTER(priv->peer, NULL);
|
|
|
|
unregister_netdevice_queue(dev, head);
|
|
|
|
|
|
|
|
if (peer) {
|
|
|
|
priv = netdev_priv(peer);
|
|
|
|
RCU_INIT_POINTER(priv->peer, NULL);
|
|
|
|
unregister_netdevice_queue(peer, head);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct nla_policy vxcan_policy[VXCAN_INFO_MAX + 1] = {
|
|
|
|
[VXCAN_INFO_PEER] = { .len = sizeof(struct ifinfomsg) },
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct net *vxcan_get_link_net(const struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct vxcan_priv *priv = netdev_priv(dev);
|
|
|
|
struct net_device *peer = rtnl_dereference(priv->peer);
|
|
|
|
|
|
|
|
return peer ? dev_net(peer) : dev_net(dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct rtnl_link_ops vxcan_link_ops = {
|
|
|
|
.kind = DRV_NAME,
|
2018-10-08 15:02:38 +08:00
|
|
|
.priv_size = ALIGN(sizeof(struct vxcan_priv), NETDEV_ALIGN) + sizeof(struct can_ml_priv),
|
2017-04-25 14:19:44 +08:00
|
|
|
.setup = vxcan_setup,
|
|
|
|
.newlink = vxcan_newlink,
|
|
|
|
.dellink = vxcan_dellink,
|
|
|
|
.policy = vxcan_policy,
|
|
|
|
.maxtype = VXCAN_INFO_MAX,
|
|
|
|
.get_link_net = vxcan_get_link_net,
|
|
|
|
};
|
|
|
|
|
|
|
|
static __init int vxcan_init(void)
|
|
|
|
{
|
|
|
|
pr_info("vxcan: Virtual CAN Tunnel driver\n");
|
|
|
|
|
|
|
|
return rtnl_link_register(&vxcan_link_ops);
|
|
|
|
}
|
|
|
|
|
|
|
|
static __exit void vxcan_exit(void)
|
|
|
|
{
|
|
|
|
rtnl_link_unregister(&vxcan_link_ops);
|
|
|
|
}
|
|
|
|
|
|
|
|
module_init(vxcan_init);
|
|
|
|
module_exit(vxcan_exit);
|