2019-04-06 01:31:34 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2014-07-05 05:35:24 +08:00
|
|
|
/* Copyright 2011-2014 Autronica Fire and Security AS
|
|
|
|
*
|
|
|
|
* Author(s):
|
|
|
|
* 2011-2014 Arvid Brodin, arvid.brodin@alten.se
|
2020-07-22 22:40:16 +08:00
|
|
|
*
|
|
|
|
* Frame handler other utility functions for HSR and PRP.
|
2014-07-05 05:35:24 +08:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include "hsr_slave.h"
|
|
|
|
#include <linux/etherdevice.h>
|
2014-07-05 05:37:27 +08:00
|
|
|
#include <linux/if_arp.h>
|
2017-02-05 01:00:49 +08:00
|
|
|
#include <linux/if_vlan.h>
|
2014-07-05 05:35:24 +08:00
|
|
|
#include "hsr_main.h"
|
2014-07-05 05:37:27 +08:00
|
|
|
#include "hsr_device.h"
|
2014-07-05 05:41:03 +08:00
|
|
|
#include "hsr_forward.h"
|
2014-07-05 05:35:24 +08:00
|
|
|
#include "hsr_framereg.h"
|
|
|
|
|
2020-07-22 22:40:21 +08:00
|
|
|
bool hsr_invalid_dan_ingress_frame(__be16 protocol)
|
|
|
|
{
|
|
|
|
return (protocol != htons(ETH_P_PRP) && protocol != htons(ETH_P_HSR));
|
|
|
|
}
|
|
|
|
|
2014-07-05 05:41:03 +08:00
|
|
|
static rx_handler_result_t hsr_handle_frame(struct sk_buff **pskb)
|
|
|
|
{
|
|
|
|
struct sk_buff *skb = *pskb;
|
|
|
|
struct hsr_port *port;
|
2020-07-22 22:40:21 +08:00
|
|
|
struct hsr_priv *hsr;
|
2020-05-06 23:41:07 +08:00
|
|
|
__be16 protocol;
|
2014-07-05 05:41:03 +08:00
|
|
|
|
2020-07-22 22:40:21 +08:00
|
|
|
/* Packets from dev_loopback_xmit() do not have L2 header, bail out */
|
|
|
|
if (unlikely(skb->pkt_type == PACKET_LOOPBACK))
|
|
|
|
return RX_HANDLER_PASS;
|
|
|
|
|
2014-07-05 05:41:03 +08:00
|
|
|
if (!skb_mac_header_was_set(skb)) {
|
|
|
|
WARN_ONCE(1, "%s: skb invalid", __func__);
|
|
|
|
return RX_HANDLER_PASS;
|
|
|
|
}
|
|
|
|
|
|
|
|
port = hsr_port_get_rcu(skb->dev);
|
2020-02-04 02:15:07 +08:00
|
|
|
if (!port)
|
|
|
|
goto finish_pass;
|
2020-07-22 22:40:21 +08:00
|
|
|
hsr = port->hsr;
|
2014-07-05 05:41:03 +08:00
|
|
|
|
|
|
|
if (hsr_addr_is_self(port->hsr, eth_hdr(skb)->h_source)) {
|
|
|
|
/* Directly kill frames sent by ourselves */
|
|
|
|
kfree_skb(skb);
|
|
|
|
goto finish_consume;
|
|
|
|
}
|
|
|
|
|
2021-02-10 09:02:11 +08:00
|
|
|
/* For HSR, only tagged frames are expected (unless the device offloads
|
|
|
|
* HSR tag removal), but for PRP there could be non tagged frames as
|
|
|
|
* well from Single attached nodes (SANs).
|
2020-07-22 22:40:21 +08:00
|
|
|
*/
|
2016-04-13 19:52:22 +08:00
|
|
|
protocol = eth_hdr(skb)->h_proto;
|
2021-02-10 09:02:11 +08:00
|
|
|
|
|
|
|
if (!(port->dev->features & NETIF_F_HW_HSR_TAG_RM) &&
|
|
|
|
hsr->proto_ops->invalid_dan_ingress_frame &&
|
2020-07-22 22:40:21 +08:00
|
|
|
hsr->proto_ops->invalid_dan_ingress_frame(protocol))
|
2014-07-05 05:41:03 +08:00
|
|
|
goto finish_pass;
|
|
|
|
|
|
|
|
skb_push(skb, ETH_HLEN);
|
2021-05-25 02:50:54 +08:00
|
|
|
skb_reset_mac_header(skb);
|
|
|
|
if ((!hsr->prot_version && protocol == htons(ETH_P_PRP)) ||
|
|
|
|
protocol == htons(ETH_P_HSR))
|
|
|
|
skb_set_network_header(skb, ETH_HLEN + HSR_HLEN);
|
|
|
|
skb_reset_mac_len(skb);
|
2020-07-22 22:40:21 +08:00
|
|
|
|
2014-07-05 05:41:03 +08:00
|
|
|
hsr_forward_skb(skb, port);
|
|
|
|
|
|
|
|
finish_consume:
|
|
|
|
return RX_HANDLER_CONSUMED;
|
|
|
|
|
|
|
|
finish_pass:
|
|
|
|
return RX_HANDLER_PASS;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool hsr_port_exists(const struct net_device *dev)
|
|
|
|
{
|
|
|
|
return rcu_access_pointer(dev->rx_handler) == hsr_handle_frame;
|
|
|
|
}
|
|
|
|
|
2020-02-29 02:01:35 +08:00
|
|
|
static int hsr_check_dev_ok(struct net_device *dev,
|
|
|
|
struct netlink_ext_ack *extack)
|
2014-07-05 05:37:27 +08:00
|
|
|
{
|
|
|
|
/* Don't allow HSR on non-ethernet like devices */
|
2019-04-06 01:31:25 +08:00
|
|
|
if ((dev->flags & IFF_LOOPBACK) || dev->type != ARPHRD_ETHER ||
|
|
|
|
dev->addr_len != ETH_ALEN) {
|
2020-02-29 02:01:35 +08:00
|
|
|
NL_SET_ERR_MSG_MOD(extack, "Cannot use loopback or non-ethernet device as HSR slave.");
|
2014-07-05 05:37:27 +08:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Don't allow enslaving hsr devices */
|
|
|
|
if (is_hsr_master(dev)) {
|
2020-02-29 02:01:35 +08:00
|
|
|
NL_SET_ERR_MSG_MOD(extack,
|
|
|
|
"Cannot create trees of HSR devices.");
|
2014-07-05 05:37:27 +08:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2014-07-05 05:38:05 +08:00
|
|
|
if (hsr_port_exists(dev)) {
|
2020-02-29 02:01:35 +08:00
|
|
|
NL_SET_ERR_MSG_MOD(extack,
|
|
|
|
"This device is already a HSR slave.");
|
2014-07-05 05:37:27 +08:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2017-02-05 01:00:49 +08:00
|
|
|
if (is_vlan_dev(dev)) {
|
2020-02-29 02:01:35 +08:00
|
|
|
NL_SET_ERR_MSG_MOD(extack, "HSR on top of VLAN is not yet supported in this driver.");
|
2014-07-05 05:37:27 +08:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2014-07-05 05:41:03 +08:00
|
|
|
if (dev->priv_flags & IFF_DONT_BRIDGE) {
|
2020-02-29 02:01:35 +08:00
|
|
|
NL_SET_ERR_MSG_MOD(extack,
|
|
|
|
"This device does not support bridging.");
|
2014-07-05 05:41:03 +08:00
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
2014-07-05 05:37:27 +08:00
|
|
|
/* HSR over bonded devices has not been tested, but I'm not sure it
|
|
|
|
* won't work...
|
|
|
|
*/
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-07-05 05:38:05 +08:00
|
|
|
/* Setup device to be added to the HSR bridge. */
|
2020-02-29 02:02:10 +08:00
|
|
|
static int hsr_portdev_setup(struct hsr_priv *hsr, struct net_device *dev,
|
|
|
|
struct hsr_port *port,
|
|
|
|
struct netlink_ext_ack *extack)
|
|
|
|
|
2014-07-05 05:37:27 +08:00
|
|
|
{
|
2020-02-29 02:02:10 +08:00
|
|
|
struct net_device *hsr_dev;
|
|
|
|
struct hsr_port *master;
|
2014-07-05 05:37:27 +08:00
|
|
|
int res;
|
|
|
|
|
2023-06-14 19:47:10 +08:00
|
|
|
/* Don't use promiscuous mode for offload since L2 frame forward
|
|
|
|
* happens at the offloaded hardware.
|
|
|
|
*/
|
|
|
|
if (!port->hsr->fwd_offloaded) {
|
|
|
|
res = dev_set_promiscuity(dev, 1);
|
|
|
|
if (res)
|
|
|
|
return res;
|
|
|
|
}
|
2014-07-05 05:37:27 +08:00
|
|
|
|
2020-02-29 02:02:10 +08:00
|
|
|
master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
|
|
|
|
hsr_dev = master->dev;
|
|
|
|
|
|
|
|
res = netdev_upper_dev_link(dev, hsr_dev, extack);
|
|
|
|
if (res)
|
|
|
|
goto fail_upper_dev_link;
|
2014-07-05 05:37:27 +08:00
|
|
|
|
2014-07-05 05:41:03 +08:00
|
|
|
res = netdev_rx_handler_register(dev, hsr_handle_frame, port);
|
|
|
|
if (res)
|
|
|
|
goto fail_rx_handler;
|
|
|
|
dev_disable_lro(dev);
|
|
|
|
|
2014-07-05 05:37:27 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
fail_rx_handler:
|
2020-02-29 02:02:10 +08:00
|
|
|
netdev_upper_dev_unlink(dev, hsr_dev);
|
|
|
|
fail_upper_dev_link:
|
2023-06-14 19:47:10 +08:00
|
|
|
if (!port->hsr->fwd_offloaded)
|
|
|
|
dev_set_promiscuity(dev, -1);
|
|
|
|
|
2014-07-05 05:37:27 +08:00
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2014-07-05 05:38:05 +08:00
|
|
|
int hsr_add_port(struct hsr_priv *hsr, struct net_device *dev,
|
2020-02-29 02:01:35 +08:00
|
|
|
enum hsr_port_type type, struct netlink_ext_ack *extack)
|
2014-07-05 05:37:27 +08:00
|
|
|
{
|
2014-07-05 05:38:05 +08:00
|
|
|
struct hsr_port *port, *master;
|
|
|
|
int res;
|
2014-07-05 05:37:27 +08:00
|
|
|
|
2014-07-05 05:38:05 +08:00
|
|
|
if (type != HSR_PT_MASTER) {
|
2020-02-29 02:01:35 +08:00
|
|
|
res = hsr_check_dev_ok(dev, extack);
|
2014-07-05 05:38:05 +08:00
|
|
|
if (res)
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
port = hsr_port_get_hsr(hsr, type);
|
2019-04-06 01:31:28 +08:00
|
|
|
if (port)
|
2014-07-05 05:38:05 +08:00
|
|
|
return -EBUSY; /* This port already exists */
|
|
|
|
|
|
|
|
port = kzalloc(sizeof(*port), GFP_KERNEL);
|
2019-04-06 01:31:28 +08:00
|
|
|
if (!port)
|
2014-07-05 05:38:05 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
|
2020-03-21 14:46:50 +08:00
|
|
|
port->hsr = hsr;
|
|
|
|
port->dev = dev;
|
|
|
|
port->type = type;
|
|
|
|
|
2014-07-05 05:38:05 +08:00
|
|
|
if (type != HSR_PT_MASTER) {
|
2020-02-29 02:02:10 +08:00
|
|
|
res = hsr_portdev_setup(hsr, dev, port, extack);
|
2014-07-05 05:38:05 +08:00
|
|
|
if (res)
|
|
|
|
goto fail_dev_setup;
|
|
|
|
}
|
|
|
|
|
|
|
|
list_add_tail_rcu(&port->port_list, &hsr->ports);
|
|
|
|
synchronize_rcu();
|
|
|
|
|
|
|
|
master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
|
2014-07-05 05:38:57 +08:00
|
|
|
netdev_update_features(master->dev);
|
2014-07-05 05:38:05 +08:00
|
|
|
dev_set_mtu(master->dev, hsr_get_max_mtu(hsr));
|
2014-07-05 05:37:27 +08:00
|
|
|
|
2014-07-05 05:38:05 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
fail_dev_setup:
|
|
|
|
kfree(port);
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
void hsr_del_port(struct hsr_port *port)
|
|
|
|
{
|
|
|
|
struct hsr_priv *hsr;
|
|
|
|
struct hsr_port *master;
|
|
|
|
|
|
|
|
hsr = port->hsr;
|
|
|
|
master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
|
|
|
|
list_del_rcu(&port->port_list);
|
|
|
|
|
|
|
|
if (port != master) {
|
2020-02-29 02:02:10 +08:00
|
|
|
netdev_update_features(master->dev);
|
|
|
|
dev_set_mtu(master->dev, hsr_get_max_mtu(hsr));
|
2014-07-05 05:38:05 +08:00
|
|
|
netdev_rx_handler_unregister(port->dev);
|
2024-03-22 18:04:47 +08:00
|
|
|
if (!port->hsr->fwd_offloaded)
|
|
|
|
dev_set_promiscuity(port->dev, -1);
|
2020-02-29 02:02:10 +08:00
|
|
|
netdev_upper_dev_unlink(port->dev, master->dev);
|
2014-07-05 05:37:27 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
synchronize_rcu();
|
2015-02-28 04:26:03 +08:00
|
|
|
|
2019-07-04 08:21:12 +08:00
|
|
|
kfree(port);
|
2014-07-05 05:37:27 +08:00
|
|
|
}
|