OpenCloudOS-Kernel/net/hsr/hsr_debugfs.c

127 lines
3.3 KiB
C
Raw Normal View History

// SPDX-License-Identifier: GPL-2.0-only
/*
* debugfs code for HSR & PRP
* Copyright (C) 2019 Texas Instruments Incorporated
*
* Author(s):
* Murali Karicheri <m-karicheri2@ti.com>
*/
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/debugfs.h>
#include "hsr_main.h"
#include "hsr_framereg.h"
static struct dentry *hsr_debugfs_root_dir;
/* hsr_node_table_show - Formats and prints node_table entries */
static int
hsr_node_table_show(struct seq_file *sfp, void *data)
{
struct hsr_priv *priv = (struct hsr_priv *)sfp->private;
struct hsr_node *node;
seq_printf(sfp, "Node Table entries for (%s) device\n",
(priv->prot_version == PRP_V1 ? "PRP" : "HSR"));
seq_puts(sfp, "MAC-Address-A, MAC-Address-B, time_in[A], ");
seq_puts(sfp, "time_in[B], Address-B port, ");
if (priv->prot_version == PRP_V1)
seq_puts(sfp, "SAN-A, SAN-B, DAN-P\n");
else
seq_puts(sfp, "DAN-H\n");
rcu_read_lock();
Revert "net: hsr: use hlist_head instead of list_head for mac addresses" The hlist optimisation (which not only uses hlist_head instead of list_head but also splits hsr_priv::node_db into an array of 256 slots) does not consider the "node merge": Upon starting the hsr network (with three nodes) a packet that is sent from node1 to node3 will also be sent from node1 to node2 and then forwarded to node3. As a result node3 will receive 2 packets because it is not able to filter out the duplicate. Each packet received will create a new struct hsr_node with macaddress_A only set the MAC address it received from (the two MAC addesses from node1). At some point (early in the process) two supervision frames will be received from node1. They will be processed by hsr_handle_sup_frame() and one frame will leave early ("Node has already been merged") and does nothing. The other frame will be merged as portB and have its MAC address written to macaddress_B and the hsr_node (that was created for it as macaddress_A) will be removed. From now on HSR is able to identify a duplicate because both packets sent from one node will result in the same struct hsr_node because hsr_get_node() will find the MAC address either on macaddress_A or macaddress_B. Things get tricky with the optimisation: If sender's MAC address is saved as macaddress_A then the lookup will work as usual. If the MAC address has been merged into macaddress_B of another hsr_node then the lookup won't work because it is likely that the data structure is in another bucket. This results in creating a new struct hsr_node and not recognising a possible duplicate. A way around it would be to add another hsr_node::mac_list_B and attach it to the other bucket to ensure that this hsr_node will be looked up either via macaddress_A _or_ macaddress_B. I however prefer to revert it because it sounds like an academic problem rather than real life workload plus it adds complexity. I'm not an HSR expert with what is usual size of a network but I would guess 40 to 60 nodes. With 10.000 nodes and assuming 60us for pass-through (from node to node) then it would take almost 600ms for a packet to almost wrap around which sounds a lot. Revert the hash MAC addresses optimisation. Fixes: 4acc45db71158 ("net: hsr: use hlist_head instead of list_head for mac addresses") Cc: Juhee Kang <claudiajkang@gmail.com> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2022-11-30 00:48:08 +08:00
list_for_each_entry_rcu(node, &priv->node_db, mac_list) {
/* skip self node */
if (hsr_addr_is_self(priv, node->macaddress_A))
continue;
seq_printf(sfp, "%pM ", &node->macaddress_A[0]);
seq_printf(sfp, "%pM ", &node->macaddress_B[0]);
seq_printf(sfp, "%10lx, ", node->time_in[HSR_PT_SLAVE_A]);
seq_printf(sfp, "%10lx, ", node->time_in[HSR_PT_SLAVE_B]);
seq_printf(sfp, "%14x, ", node->addr_B_port);
Revert "net: hsr: use hlist_head instead of list_head for mac addresses" The hlist optimisation (which not only uses hlist_head instead of list_head but also splits hsr_priv::node_db into an array of 256 slots) does not consider the "node merge": Upon starting the hsr network (with three nodes) a packet that is sent from node1 to node3 will also be sent from node1 to node2 and then forwarded to node3. As a result node3 will receive 2 packets because it is not able to filter out the duplicate. Each packet received will create a new struct hsr_node with macaddress_A only set the MAC address it received from (the two MAC addesses from node1). At some point (early in the process) two supervision frames will be received from node1. They will be processed by hsr_handle_sup_frame() and one frame will leave early ("Node has already been merged") and does nothing. The other frame will be merged as portB and have its MAC address written to macaddress_B and the hsr_node (that was created for it as macaddress_A) will be removed. From now on HSR is able to identify a duplicate because both packets sent from one node will result in the same struct hsr_node because hsr_get_node() will find the MAC address either on macaddress_A or macaddress_B. Things get tricky with the optimisation: If sender's MAC address is saved as macaddress_A then the lookup will work as usual. If the MAC address has been merged into macaddress_B of another hsr_node then the lookup won't work because it is likely that the data structure is in another bucket. This results in creating a new struct hsr_node and not recognising a possible duplicate. A way around it would be to add another hsr_node::mac_list_B and attach it to the other bucket to ensure that this hsr_node will be looked up either via macaddress_A _or_ macaddress_B. I however prefer to revert it because it sounds like an academic problem rather than real life workload plus it adds complexity. I'm not an HSR expert with what is usual size of a network but I would guess 40 to 60 nodes. With 10.000 nodes and assuming 60us for pass-through (from node to node) then it would take almost 600ms for a packet to almost wrap around which sounds a lot. Revert the hash MAC addresses optimisation. Fixes: 4acc45db71158 ("net: hsr: use hlist_head instead of list_head for mac addresses") Cc: Juhee Kang <claudiajkang@gmail.com> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2022-11-30 00:48:08 +08:00
if (priv->prot_version == PRP_V1)
seq_printf(sfp, "%5x, %5x, %5x\n",
node->san_a, node->san_b,
(node->san_a == 0 && node->san_b == 0));
else
seq_printf(sfp, "%5x\n", 1);
}
rcu_read_unlock();
return 0;
}
DEFINE_SHOW_ATTRIBUTE(hsr_node_table);
void hsr_debugfs_rename(struct net_device *dev)
{
struct hsr_priv *priv = netdev_priv(dev);
struct dentry *d;
d = debugfs_rename(hsr_debugfs_root_dir, priv->node_tbl_root,
hsr_debugfs_root_dir, dev->name);
if (IS_ERR(d))
netdev_warn(dev, "failed to rename\n");
else
priv->node_tbl_root = d;
}
/* hsr_debugfs_init - create hsr node_table file for dumping
* the node table
*
* Description:
* When debugfs is configured this routine sets up the node_table file per
* hsr device for dumping the node_table entries
*/
void hsr_debugfs_init(struct hsr_priv *priv, struct net_device *hsr_dev)
{
struct dentry *de = NULL;
de = debugfs_create_dir(hsr_dev->name, hsr_debugfs_root_dir);
if (IS_ERR(de)) {
pr_err("Cannot create hsr debugfs directory\n");
return;
}
priv->node_tbl_root = de;
de = debugfs_create_file("node_table", S_IFREG | 0444,
priv->node_tbl_root, priv,
&hsr_node_table_fops);
if (IS_ERR(de)) {
pr_err("Cannot create hsr node_table file\n");
debugfs_remove(priv->node_tbl_root);
priv->node_tbl_root = NULL;
return;
}
}
/* hsr_debugfs_term - Tear down debugfs intrastructure
*
* Description:
* When Debugfs is configured this routine removes debugfs file system
* elements that are specific to hsr
*/
void
hsr_debugfs_term(struct hsr_priv *priv)
{
debugfs_remove_recursive(priv->node_tbl_root);
priv->node_tbl_root = NULL;
}
void hsr_debugfs_create_root(void)
{
hsr_debugfs_root_dir = debugfs_create_dir("hsr", NULL);
if (IS_ERR(hsr_debugfs_root_dir)) {
pr_err("Cannot create hsr debugfs root directory\n");
hsr_debugfs_root_dir = NULL;
}
}
void hsr_debugfs_remove_root(void)
{
/* debugfs_remove() internally checks NULL and ERROR */
debugfs_remove(hsr_debugfs_root_dir);
}