2018-08-09 16:59:11 +08:00
|
|
|
/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
|
|
|
|
/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
|
2015-10-16 20:01:37 +08:00
|
|
|
|
|
|
|
#ifndef _MLXSW_SPECTRUM_H
|
|
|
|
#define _MLXSW_SPECTRUM_H
|
|
|
|
|
2020-11-21 06:50:52 +08:00
|
|
|
#include <linux/ethtool.h>
|
2015-10-16 20:01:37 +08:00
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/netdevice.h>
|
2016-07-05 17:27:39 +08:00
|
|
|
#include <linux/rhashtable.h>
|
2015-10-16 20:01:37 +08:00
|
|
|
#include <linux/bitops.h>
|
2018-11-29 04:07:04 +08:00
|
|
|
#include <linux/if_bridge.h>
|
2015-10-16 20:01:37 +08:00
|
|
|
#include <linux/if_vlan.h>
|
2015-12-15 23:03:37 +08:00
|
|
|
#include <linux/list.h>
|
2016-04-06 23:10:10 +08:00
|
|
|
#include <linux/dcbnl.h>
|
2016-07-04 14:23:04 +08:00
|
|
|
#include <linux/in6.h>
|
2016-09-26 18:52:31 +08:00
|
|
|
#include <linux/notifier.h>
|
2019-10-03 17:49:32 +08:00
|
|
|
#include <linux/net_namespace.h>
|
2017-01-23 18:07:11 +08:00
|
|
|
#include <net/psample.h>
|
2017-02-03 17:29:09 +08:00
|
|
|
#include <net/pkt_cls.h>
|
2017-11-06 14:23:48 +08:00
|
|
|
#include <net/red.h>
|
2018-10-17 16:53:31 +08:00
|
|
|
#include <net/vxlan.h>
|
2020-02-25 18:45:23 +08:00
|
|
|
#include <net/flow_offload.h>
|
2015-10-16 20:01:37 +08:00
|
|
|
|
2016-01-11 04:06:28 +08:00
|
|
|
#include "port.h"
|
2015-10-16 20:01:37 +08:00
|
|
|
#include "core.h"
|
2017-02-03 17:29:07 +08:00
|
|
|
#include "core_acl_flex_keys.h"
|
|
|
|
#include "core_acl_flex_actions.h"
|
mlxsw: spectrum: Support ieee_setapp, ieee_delapp
The APP TLVs are used for communicating priority-to-protocol ID maps for
a given netdevice. Support the following APP TLVs:
- DSCP (selector 5) to configure priority-to-DSCP code point maps. Use
these maps to configure packet priority on ingress, and DSCP code
point rewrite on egress.
- Default priority (selector 1, PID 0) to configure priority for the
DSCP code points that don't have one assigned by the DSCP selector. In
future this could also be used for assigning default port priority
when a packet arrives without DSCP tagging.
Besides setting up the maps themselves, also configure port trust level
and rewrite bits.
Port trust level determines whether, for a packet arriving through a
certain port, the priority should be determined based on PCP or DSCP
header fields. So far, mlxsw kept the device default of trust-PCP. Now,
as soon as the first DSCP APP TLV is configured, switch to trust-DSCP.
Only when all DSCP APP TLVs are removed, switch back to trust-PCP again.
Note that the default priority APP TLV doesn't impact the trust level
configuration.
Rewrite bits determine whether DSCP and PCP fields of egressing packets
should be updated according to switch priority. When port trust is
switched to DSCP, enable rewrite of DSCP field.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-07-27 20:27:01 +08:00
|
|
|
#include "reg.h"
|
2015-10-16 20:01:37 +08:00
|
|
|
|
2018-12-21 03:42:33 +08:00
|
|
|
#define MLXSW_SP_DEFAULT_VID (VLAN_N_VID - 1)
|
2018-12-21 03:42:26 +08:00
|
|
|
|
2017-05-26 14:37:39 +08:00
|
|
|
#define MLXSW_SP_FID_8021D_MAX 1024
|
2015-12-15 23:03:37 +08:00
|
|
|
|
2016-01-11 04:06:26 +08:00
|
|
|
#define MLXSW_SP_MID_MAX 7000
|
|
|
|
|
2017-10-23 05:11:49 +08:00
|
|
|
#define MLXSW_SP_KVD_LINEAR_SIZE 98304 /* entries */
|
2016-09-20 17:16:52 +08:00
|
|
|
#define MLXSW_SP_KVD_GRANULARITY 128
|
2016-07-05 17:27:46 +08:00
|
|
|
|
2018-01-15 15:59:07 +08:00
|
|
|
#define MLXSW_SP_RESOURCE_NAME_KVD "kvd"
|
|
|
|
#define MLXSW_SP_RESOURCE_NAME_KVD_LINEAR "linear"
|
|
|
|
#define MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE "hash_single"
|
|
|
|
#define MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE "hash_double"
|
2018-02-20 15:44:22 +08:00
|
|
|
#define MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_SINGLES "singles"
|
|
|
|
#define MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_CHUNKS "chunks"
|
|
|
|
#define MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_LARGE_CHUNKS "large_chunks"
|
2018-01-15 15:59:07 +08:00
|
|
|
|
2019-10-17 14:55:14 +08:00
|
|
|
#define MLXSW_SP_RESOURCE_NAME_SPAN "span_agents"
|
|
|
|
|
2020-03-18 21:48:53 +08:00
|
|
|
#define MLXSW_SP_RESOURCE_NAME_COUNTERS "counters"
|
|
|
|
#define MLXSW_SP_RESOURCE_NAME_COUNTERS_FLOW "flow"
|
|
|
|
#define MLXSW_SP_RESOURCE_NAME_COUNTERS_RIF "rif"
|
|
|
|
|
2018-01-15 15:59:07 +08:00
|
|
|
enum mlxsw_sp_resource_id {
|
2018-04-01 22:34:59 +08:00
|
|
|
MLXSW_SP_RESOURCE_KVD = 1,
|
2018-01-15 15:59:07 +08:00
|
|
|
MLXSW_SP_RESOURCE_KVD_LINEAR,
|
|
|
|
MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
|
|
|
|
MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
|
2018-02-20 15:44:22 +08:00
|
|
|
MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE,
|
|
|
|
MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS,
|
|
|
|
MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS,
|
2019-10-17 14:55:14 +08:00
|
|
|
MLXSW_SP_RESOURCE_SPAN,
|
2020-03-18 21:48:53 +08:00
|
|
|
MLXSW_SP_RESOURCE_COUNTERS,
|
|
|
|
MLXSW_SP_RESOURCE_COUNTERS_FLOW,
|
|
|
|
MLXSW_SP_RESOURCE_COUNTERS_RIF,
|
2020-07-15 16:27:26 +08:00
|
|
|
MLXSW_SP_RESOURCE_GLOBAL_POLICERS,
|
|
|
|
MLXSW_SP_RESOURCE_SINGLE_RATE_POLICERS,
|
2018-01-15 15:59:07 +08:00
|
|
|
};
|
|
|
|
|
2015-10-16 20:01:37 +08:00
|
|
|
struct mlxsw_sp_port;
|
2017-03-10 15:53:39 +08:00
|
|
|
struct mlxsw_sp_rif;
|
2018-02-13 18:27:48 +08:00
|
|
|
struct mlxsw_sp_span_entry;
|
2018-10-17 16:53:14 +08:00
|
|
|
enum mlxsw_sp_l3proto;
|
|
|
|
union mlxsw_sp_l3addr;
|
2015-10-16 20:01:37 +08:00
|
|
|
|
2015-12-03 19:12:28 +08:00
|
|
|
struct mlxsw_sp_upper {
|
|
|
|
struct net_device *dev;
|
|
|
|
unsigned int ref_count;
|
|
|
|
};
|
|
|
|
|
2017-05-26 14:37:39 +08:00
|
|
|
enum mlxsw_sp_rif_type {
|
|
|
|
MLXSW_SP_RIF_TYPE_SUBPORT,
|
|
|
|
MLXSW_SP_RIF_TYPE_VLAN,
|
|
|
|
MLXSW_SP_RIF_TYPE_FID,
|
2017-09-03 05:49:19 +08:00
|
|
|
MLXSW_SP_RIF_TYPE_IPIP_LB, /* IP-in-IP loopback. */
|
2017-05-26 14:37:39 +08:00
|
|
|
MLXSW_SP_RIF_TYPE_MAX,
|
|
|
|
};
|
|
|
|
|
2019-01-20 14:50:41 +08:00
|
|
|
struct mlxsw_sp_rif_ops;
|
|
|
|
|
|
|
|
extern const struct mlxsw_sp_rif_ops *mlxsw_sp1_rif_ops_arr[];
|
|
|
|
extern const struct mlxsw_sp_rif_ops *mlxsw_sp2_rif_ops_arr[];
|
|
|
|
|
2017-05-26 14:37:39 +08:00
|
|
|
enum mlxsw_sp_fid_type {
|
|
|
|
MLXSW_SP_FID_TYPE_8021Q,
|
|
|
|
MLXSW_SP_FID_TYPE_8021D,
|
|
|
|
MLXSW_SP_FID_TYPE_RFID,
|
|
|
|
MLXSW_SP_FID_TYPE_DUMMY,
|
|
|
|
MLXSW_SP_FID_TYPE_MAX,
|
2015-12-15 23:03:37 +08:00
|
|
|
};
|
|
|
|
|
2018-12-08 03:55:09 +08:00
|
|
|
enum mlxsw_sp_nve_type {
|
|
|
|
MLXSW_SP_NVE_TYPE_VXLAN,
|
|
|
|
};
|
|
|
|
|
2016-01-11 04:06:28 +08:00
|
|
|
struct mlxsw_sp_mid {
|
|
|
|
struct list_head list;
|
|
|
|
unsigned char addr[ETH_ALEN];
|
2016-10-30 17:09:22 +08:00
|
|
|
u16 fid;
|
2016-01-11 04:06:28 +08:00
|
|
|
u16 mid;
|
2017-09-20 22:15:06 +08:00
|
|
|
bool in_hw;
|
2017-09-20 22:15:02 +08:00
|
|
|
unsigned long *ports_in_mid; /* bits array */
|
2016-01-11 04:06:28 +08:00
|
|
|
};
|
|
|
|
|
2017-05-17 01:38:24 +08:00
|
|
|
struct mlxsw_sp_sb;
|
2017-05-17 01:38:26 +08:00
|
|
|
struct mlxsw_sp_bridge;
|
2017-05-17 01:38:25 +08:00
|
|
|
struct mlxsw_sp_router;
|
2017-09-27 14:23:18 +08:00
|
|
|
struct mlxsw_sp_mr;
|
2017-02-03 17:29:07 +08:00
|
|
|
struct mlxsw_sp_acl;
|
2017-03-11 16:42:51 +08:00
|
|
|
struct mlxsw_sp_counter_pool;
|
2017-05-26 14:37:39 +08:00
|
|
|
struct mlxsw_sp_fid_core;
|
2017-10-23 05:11:44 +08:00
|
|
|
struct mlxsw_sp_kvdl;
|
2018-10-17 16:53:14 +08:00
|
|
|
struct mlxsw_sp_nve;
|
2018-07-09 04:51:16 +08:00
|
|
|
struct mlxsw_sp_kvdl_ops;
|
2018-07-09 04:51:19 +08:00
|
|
|
struct mlxsw_sp_mr_tcam_ops;
|
2020-06-21 16:34:33 +08:00
|
|
|
struct mlxsw_sp_acl_rulei_ops;
|
2018-07-09 04:51:20 +08:00
|
|
|
struct mlxsw_sp_acl_tcam_ops;
|
2018-10-17 16:53:14 +08:00
|
|
|
struct mlxsw_sp_nve_ops;
|
2020-09-16 14:35:27 +08:00
|
|
|
struct mlxsw_sp_sb_ops;
|
2019-02-21 03:32:12 +08:00
|
|
|
struct mlxsw_sp_sb_vals;
|
2019-02-22 21:56:40 +08:00
|
|
|
struct mlxsw_sp_port_type_speed_ops;
|
2019-06-30 14:04:54 +08:00
|
|
|
struct mlxsw_sp_ptp_state;
|
2019-06-11 23:45:11 +08:00
|
|
|
struct mlxsw_sp_ptp_ops;
|
2020-01-20 15:52:52 +08:00
|
|
|
struct mlxsw_sp_span_ops;
|
2020-03-05 15:16:41 +08:00
|
|
|
struct mlxsw_sp_qdisc_state;
|
2017-02-03 17:29:07 +08:00
|
|
|
|
2019-10-31 17:42:11 +08:00
|
|
|
struct mlxsw_sp_port_mapping {
|
|
|
|
u8 module;
|
|
|
|
u8 width;
|
|
|
|
u8 lane;
|
|
|
|
};
|
|
|
|
|
2015-10-16 20:01:37 +08:00
|
|
|
struct mlxsw_sp {
|
|
|
|
struct mlxsw_sp_port **ports;
|
|
|
|
struct mlxsw_core *core;
|
|
|
|
const struct mlxsw_bus_info *bus_info;
|
|
|
|
unsigned char base_mac[ETH_ALEN];
|
2018-12-13 19:54:50 +08:00
|
|
|
const unsigned char *mac_mask;
|
2016-09-20 17:16:50 +08:00
|
|
|
struct mlxsw_sp_upper *lags;
|
2019-10-31 17:42:11 +08:00
|
|
|
struct mlxsw_sp_port_mapping **port_mapping;
|
2017-05-17 01:38:24 +08:00
|
|
|
struct mlxsw_sp_sb *sb;
|
2017-05-17 01:38:26 +08:00
|
|
|
struct mlxsw_sp_bridge *bridge;
|
2017-05-17 01:38:25 +08:00
|
|
|
struct mlxsw_sp_router *router;
|
2017-09-27 14:23:17 +08:00
|
|
|
struct mlxsw_sp_mr *mr;
|
2017-09-19 16:00:09 +08:00
|
|
|
struct mlxsw_afa *afa;
|
2017-02-03 17:29:07 +08:00
|
|
|
struct mlxsw_sp_acl *acl;
|
2017-05-26 14:37:39 +08:00
|
|
|
struct mlxsw_sp_fid_core *fid_core;
|
2020-07-15 16:27:25 +08:00
|
|
|
struct mlxsw_sp_policer_core *policer_core;
|
2017-10-23 05:11:44 +08:00
|
|
|
struct mlxsw_sp_kvdl *kvdl;
|
2018-10-17 16:53:14 +08:00
|
|
|
struct mlxsw_sp_nve *nve;
|
2017-10-16 22:26:35 +08:00
|
|
|
struct notifier_block netdevice_nb;
|
2019-06-11 23:45:11 +08:00
|
|
|
struct mlxsw_sp_ptp_clock *clock;
|
2019-06-30 14:04:54 +08:00
|
|
|
struct mlxsw_sp_ptp_state *ptp_state;
|
2017-03-11 16:42:51 +08:00
|
|
|
struct mlxsw_sp_counter_pool *counter_pool;
|
2020-02-20 15:07:48 +08:00
|
|
|
struct mlxsw_sp_span *span;
|
2020-03-31 03:38:26 +08:00
|
|
|
struct mlxsw_sp_trap *trap;
|
2018-07-09 04:51:16 +08:00
|
|
|
const struct mlxsw_sp_kvdl_ops *kvdl_ops;
|
2018-07-08 15:00:19 +08:00
|
|
|
const struct mlxsw_afa_ops *afa_ops;
|
2018-07-09 04:51:22 +08:00
|
|
|
const struct mlxsw_afk_ops *afk_ops;
|
2018-07-09 04:51:19 +08:00
|
|
|
const struct mlxsw_sp_mr_tcam_ops *mr_tcam_ops;
|
2020-06-21 16:34:33 +08:00
|
|
|
const struct mlxsw_sp_acl_rulei_ops *acl_rulei_ops;
|
2018-07-09 04:51:20 +08:00
|
|
|
const struct mlxsw_sp_acl_tcam_ops *acl_tcam_ops;
|
2018-10-17 16:53:14 +08:00
|
|
|
const struct mlxsw_sp_nve_ops **nve_ops_arr;
|
2019-01-20 14:50:41 +08:00
|
|
|
const struct mlxsw_sp_rif_ops **rif_ops_arr;
|
2019-02-21 03:32:12 +08:00
|
|
|
const struct mlxsw_sp_sb_vals *sb_vals;
|
2020-09-16 14:35:27 +08:00
|
|
|
const struct mlxsw_sp_sb_ops *sb_ops;
|
2019-02-22 21:56:40 +08:00
|
|
|
const struct mlxsw_sp_port_type_speed_ops *port_type_speed_ops;
|
2019-06-11 23:45:11 +08:00
|
|
|
const struct mlxsw_sp_ptp_ops *ptp_ops;
|
2020-01-20 15:52:52 +08:00
|
|
|
const struct mlxsw_sp_span_ops *span_ops;
|
2020-07-15 16:27:25 +08:00
|
|
|
const struct mlxsw_sp_policer_core_ops *policer_core_ops;
|
2020-08-04 00:11:37 +08:00
|
|
|
const struct mlxsw_sp_trap_ops *trap_ops;
|
2019-06-30 14:04:50 +08:00
|
|
|
const struct mlxsw_listener *listeners;
|
|
|
|
size_t listeners_count;
|
2020-01-24 21:23:12 +08:00
|
|
|
u32 lowest_shaper_bs;
|
2015-10-16 20:01:37 +08:00
|
|
|
};
|
|
|
|
|
2020-06-30 04:46:13 +08:00
|
|
|
struct mlxsw_sp_ptp_ops {
|
|
|
|
struct mlxsw_sp_ptp_clock *
|
|
|
|
(*clock_init)(struct mlxsw_sp *mlxsw_sp, struct device *dev);
|
|
|
|
void (*clock_fini)(struct mlxsw_sp_ptp_clock *clock);
|
|
|
|
|
|
|
|
struct mlxsw_sp_ptp_state *(*init)(struct mlxsw_sp *mlxsw_sp);
|
|
|
|
void (*fini)(struct mlxsw_sp_ptp_state *ptp_state);
|
|
|
|
|
|
|
|
/* Notify a driver that a packet that might be PTP was received. Driver
|
|
|
|
* is responsible for freeing the passed-in SKB.
|
|
|
|
*/
|
|
|
|
void (*receive)(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
|
|
|
|
u8 local_port);
|
|
|
|
|
|
|
|
/* Notify a driver that a timestamped packet was transmitted. Driver
|
|
|
|
* is responsible for freeing the passed-in SKB.
|
|
|
|
*/
|
|
|
|
void (*transmitted)(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
|
|
|
|
u8 local_port);
|
|
|
|
|
|
|
|
int (*hwtstamp_get)(struct mlxsw_sp_port *mlxsw_sp_port,
|
|
|
|
struct hwtstamp_config *config);
|
|
|
|
int (*hwtstamp_set)(struct mlxsw_sp_port *mlxsw_sp_port,
|
|
|
|
struct hwtstamp_config *config);
|
|
|
|
void (*shaper_work)(struct work_struct *work);
|
|
|
|
int (*get_ts_info)(struct mlxsw_sp *mlxsw_sp,
|
|
|
|
struct ethtool_ts_info *info);
|
|
|
|
int (*get_stats_count)(void);
|
|
|
|
void (*get_stats_strings)(u8 **p);
|
|
|
|
void (*get_stats)(struct mlxsw_sp_port *mlxsw_sp_port,
|
|
|
|
u64 *data, int data_index);
|
|
|
|
};
|
|
|
|
|
2015-12-03 19:12:28 +08:00
|
|
|
static inline struct mlxsw_sp_upper *
|
|
|
|
mlxsw_sp_lag_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
|
|
|
|
{
|
|
|
|
return &mlxsw_sp->lags[lag_id];
|
|
|
|
}
|
|
|
|
|
2015-10-16 20:01:37 +08:00
|
|
|
struct mlxsw_sp_port_pcpu_stats {
|
|
|
|
u64 rx_packets;
|
|
|
|
u64 rx_bytes;
|
|
|
|
u64 tx_packets;
|
|
|
|
u64 tx_bytes;
|
|
|
|
struct u64_stats_sync syncp;
|
|
|
|
u32 tx_dropped;
|
|
|
|
};
|
|
|
|
|
2017-01-23 18:07:11 +08:00
|
|
|
struct mlxsw_sp_port_sample {
|
2020-04-27 23:13:07 +08:00
|
|
|
struct psample_group *psample_group;
|
2017-01-23 18:07:11 +08:00
|
|
|
u32 trunc_size;
|
|
|
|
u32 rate;
|
|
|
|
bool truncate;
|
|
|
|
};
|
|
|
|
|
mlxsw: spectrum: Replace vPorts with Port-VLAN
As explained in the cover letter, since the introduction of the bridge
offload in the mlxsw driver, information related to the offloaded bridge
and bridge ports was stored in the individual port struct,
mlxsw_sp_port.
This lead to a bloated struct storing both physical properties of the
port (e.g., autoneg status) as well as logical properties of an upper
bridge port (e.g., learning, mrouter indication). While this might work
well for simple devices, it proved to be hard to extend when stacked
devices were taken into account and more advanced use-cases (e.g., IGMP
snooping) considered.
This patch removes the excess information from the above struct and
instead stores it in more appropriate structs that represent the bridge
port, the bridge itself and a VLAN configured on the bridge port.
The membership of a port in a bridge is denoted using the Port-VLAN
struct, which points to the bridge port and also member in the bridge
VLAN group of the VLAN it represents. This allows us to completely
remove the vPort abstraction and consolidate many of the code paths
relating to VLAN-aware and unaware bridges.
Note that the FID / vFID code is currently duplicated, but this will
soon go away when the common FID core will be introduced.
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-05-26 14:37:31 +08:00
|
|
|
struct mlxsw_sp_bridge_port;
|
2017-05-26 14:37:39 +08:00
|
|
|
struct mlxsw_sp_fid;
|
mlxsw: spectrum: Replace vPorts with Port-VLAN
As explained in the cover letter, since the introduction of the bridge
offload in the mlxsw driver, information related to the offloaded bridge
and bridge ports was stored in the individual port struct,
mlxsw_sp_port.
This lead to a bloated struct storing both physical properties of the
port (e.g., autoneg status) as well as logical properties of an upper
bridge port (e.g., learning, mrouter indication). While this might work
well for simple devices, it proved to be hard to extend when stacked
devices were taken into account and more advanced use-cases (e.g., IGMP
snooping) considered.
This patch removes the excess information from the above struct and
instead stores it in more appropriate structs that represent the bridge
port, the bridge itself and a VLAN configured on the bridge port.
The membership of a port in a bridge is denoted using the Port-VLAN
struct, which points to the bridge port and also member in the bridge
VLAN group of the VLAN it represents. This allows us to completely
remove the vPort abstraction and consolidate many of the code paths
relating to VLAN-aware and unaware bridges.
Note that the FID / vFID code is currently duplicated, but this will
soon go away when the common FID core will be introduced.
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-05-26 14:37:31 +08:00
|
|
|
|
2017-05-26 14:37:26 +08:00
|
|
|
struct mlxsw_sp_port_vlan {
|
|
|
|
struct list_head list;
|
|
|
|
struct mlxsw_sp_port *mlxsw_sp_port;
|
|
|
|
struct mlxsw_sp_fid *fid;
|
|
|
|
u16 vid;
|
mlxsw: spectrum: Replace vPorts with Port-VLAN
As explained in the cover letter, since the introduction of the bridge
offload in the mlxsw driver, information related to the offloaded bridge
and bridge ports was stored in the individual port struct,
mlxsw_sp_port.
This lead to a bloated struct storing both physical properties of the
port (e.g., autoneg status) as well as logical properties of an upper
bridge port (e.g., learning, mrouter indication). While this might work
well for simple devices, it proved to be hard to extend when stacked
devices were taken into account and more advanced use-cases (e.g., IGMP
snooping) considered.
This patch removes the excess information from the above struct and
instead stores it in more appropriate structs that represent the bridge
port, the bridge itself and a VLAN configured on the bridge port.
The membership of a port in a bridge is denoted using the Port-VLAN
struct, which points to the bridge port and also member in the bridge
VLAN group of the VLAN it represents. This allows us to completely
remove the vPort abstraction and consolidate many of the code paths
relating to VLAN-aware and unaware bridges.
Note that the FID / vFID code is currently duplicated, but this will
soon go away when the common FID core will be introduced.
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-05-26 14:37:31 +08:00
|
|
|
struct mlxsw_sp_bridge_port *bridge_port;
|
|
|
|
struct list_head bridge_vlan_node;
|
2017-05-26 14:37:26 +08:00
|
|
|
};
|
|
|
|
|
2017-11-06 14:23:47 +08:00
|
|
|
/* No need an internal lock; At worse - miss a single periodic iteration */
|
|
|
|
struct mlxsw_sp_port_xstats {
|
|
|
|
u64 ecn;
|
|
|
|
u64 wred_drop[TC_MAX_QUEUE];
|
|
|
|
u64 tail_drop[TC_MAX_QUEUE];
|
|
|
|
u64 backlog[TC_MAX_QUEUE];
|
2018-02-28 17:44:59 +08:00
|
|
|
u64 tx_bytes[IEEE_8021QAZ_MAX_TCS];
|
|
|
|
u64 tx_packets[IEEE_8021QAZ_MAX_TCS];
|
2017-11-06 14:23:47 +08:00
|
|
|
};
|
|
|
|
|
mlxsw: spectrum_ptp: Add counters for GC events
On Spectrum-1, timestamped PTP packets and the corresponding timestamps need to
be kept in caches until both are available, at which point they are matched up
and packets forwarded as appropriate. However, not all packets will ever see
their timestamp, and not all timestamps will ever see their packet. It is
necessary to dispose of such abandoned entries, so a garbage collector was
introduced in commit 5d23e4159772 ("mlxsw: spectrum: PTP: Garbage-collect
unmatched entries").
If these GC events happen often, it is a sign of a problem. However because this
whole mechanism is taking place behind the scenes, there is no direct way to
determine whether garbage collection took place.
Therefore to fix this, on Spectrum-1 only, expose four artificial ethtool
counters for the GC events: GCd timestamps and packets, in TX and RX directions.
Cc: Richard Cochran <richardcochran@gmail.com>
Signed-off-by: Petr Machata <petrm@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-08-28 23:54:37 +08:00
|
|
|
struct mlxsw_sp_ptp_port_dir_stats {
|
|
|
|
u64 packets;
|
|
|
|
u64 timestamps;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct mlxsw_sp_ptp_port_stats {
|
|
|
|
struct mlxsw_sp_ptp_port_dir_stats rx_gcd;
|
|
|
|
struct mlxsw_sp_ptp_port_dir_stats tx_gcd;
|
|
|
|
};
|
|
|
|
|
2015-10-16 20:01:37 +08:00
|
|
|
struct mlxsw_sp_port {
|
|
|
|
struct net_device *dev;
|
|
|
|
struct mlxsw_sp_port_pcpu_stats __percpu *pcpu_stats;
|
|
|
|
struct mlxsw_sp *mlxsw_sp;
|
|
|
|
u8 local_port;
|
mlxsw: spectrum: Replace vPorts with Port-VLAN
As explained in the cover letter, since the introduction of the bridge
offload in the mlxsw driver, information related to the offloaded bridge
and bridge ports was stored in the individual port struct,
mlxsw_sp_port.
This lead to a bloated struct storing both physical properties of the
port (e.g., autoneg status) as well as logical properties of an upper
bridge port (e.g., learning, mrouter indication). While this might work
well for simple devices, it proved to be hard to extend when stacked
devices were taken into account and more advanced use-cases (e.g., IGMP
snooping) considered.
This patch removes the excess information from the above struct and
instead stores it in more appropriate structs that represent the bridge
port, the bridge itself and a VLAN configured on the bridge port.
The membership of a port in a bridge is denoted using the Port-VLAN
struct, which points to the bridge port and also member in the bridge
VLAN group of the VLAN it represents. This allows us to completely
remove the vPort abstraction and consolidate many of the code paths
relating to VLAN-aware and unaware bridges.
Note that the FID / vFID code is currently duplicated, but this will
soon go away when the common FID core will be introduced.
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-05-26 14:37:31 +08:00
|
|
|
u8 lagged:1,
|
2016-02-27 00:32:31 +08:00
|
|
|
split:1;
|
2015-10-16 20:01:37 +08:00
|
|
|
u16 pvid;
|
2015-12-03 19:12:28 +08:00
|
|
|
u16 lag_id;
|
2016-04-06 23:10:14 +08:00
|
|
|
struct {
|
|
|
|
u8 tx_pause:1,
|
2016-09-12 19:26:23 +08:00
|
|
|
rx_pause:1,
|
|
|
|
autoneg:1;
|
2016-04-06 23:10:14 +08:00
|
|
|
} link;
|
2016-04-06 23:10:10 +08:00
|
|
|
struct {
|
|
|
|
struct ieee_ets *ets;
|
2016-04-06 23:10:11 +08:00
|
|
|
struct ieee_maxrate *maxrate;
|
2016-04-06 23:10:16 +08:00
|
|
|
struct ieee_pfc *pfc;
|
mlxsw: spectrum: Support ieee_setapp, ieee_delapp
The APP TLVs are used for communicating priority-to-protocol ID maps for
a given netdevice. Support the following APP TLVs:
- DSCP (selector 5) to configure priority-to-DSCP code point maps. Use
these maps to configure packet priority on ingress, and DSCP code
point rewrite on egress.
- Default priority (selector 1, PID 0) to configure priority for the
DSCP code points that don't have one assigned by the DSCP selector. In
future this could also be used for assigning default port priority
when a packet arrives without DSCP tagging.
Besides setting up the maps themselves, also configure port trust level
and rewrite bits.
Port trust level determines whether, for a packet arriving through a
certain port, the priority should be determined based on PCP or DSCP
header fields. So far, mlxsw kept the device default of trust-PCP. Now,
as soon as the first DSCP APP TLV is configured, switch to trust-DSCP.
Only when all DSCP APP TLVs are removed, switch back to trust-PCP again.
Note that the default priority APP TLV doesn't impact the trust level
configuration.
Rewrite bits determine whether DSCP and PCP fields of egressing packets
should be updated according to switch priority. When port trust is
switched to DSCP, enable rewrite of DSCP field.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-07-27 20:27:01 +08:00
|
|
|
enum mlxsw_reg_qpts_trust_state trust_state;
|
2016-04-06 23:10:10 +08:00
|
|
|
} dcb;
|
2019-10-31 17:42:11 +08:00
|
|
|
struct mlxsw_sp_port_mapping mapping; /* mapping is constant during the
|
|
|
|
* mlxsw_sp_port lifetime, however
|
|
|
|
* the same localport can have
|
|
|
|
* different mapping.
|
|
|
|
*/
|
2016-09-16 21:05:38 +08:00
|
|
|
struct {
|
|
|
|
#define MLXSW_HW_STATS_UPDATE_TIME HZ
|
2017-10-26 16:55:32 +08:00
|
|
|
struct rtnl_link_stats64 stats;
|
2017-11-06 14:23:47 +08:00
|
|
|
struct mlxsw_sp_port_xstats xstats;
|
2016-09-16 21:05:38 +08:00
|
|
|
struct delayed_work update_dw;
|
2017-10-26 16:55:32 +08:00
|
|
|
} periodic_hw_stats;
|
2020-04-27 23:13:07 +08:00
|
|
|
struct mlxsw_sp_port_sample __rcu *sample;
|
2017-05-26 14:37:26 +08:00
|
|
|
struct list_head vlans_list;
|
2018-12-21 03:42:30 +08:00
|
|
|
struct mlxsw_sp_port_vlan *default_vlan;
|
2020-03-05 15:16:41 +08:00
|
|
|
struct mlxsw_sp_qdisc_state *qdisc;
|
2017-12-06 16:41:12 +08:00
|
|
|
unsigned acl_rule_count;
|
2020-04-27 23:12:59 +08:00
|
|
|
struct mlxsw_sp_flow_block *ing_flow_block;
|
|
|
|
struct mlxsw_sp_flow_block *eg_flow_block;
|
2019-06-30 14:04:56 +08:00
|
|
|
struct {
|
2019-07-04 15:07:39 +08:00
|
|
|
struct delayed_work shaper_dw;
|
2019-06-30 14:04:56 +08:00
|
|
|
struct hwtstamp_config hwtstamp_config;
|
|
|
|
u16 ing_types;
|
|
|
|
u16 egr_types;
|
mlxsw: spectrum_ptp: Add counters for GC events
On Spectrum-1, timestamped PTP packets and the corresponding timestamps need to
be kept in caches until both are available, at which point they are matched up
and packets forwarded as appropriate. However, not all packets will ever see
their timestamp, and not all timestamps will ever see their packet. It is
necessary to dispose of such abandoned entries, so a garbage collector was
introduced in commit 5d23e4159772 ("mlxsw: spectrum: PTP: Garbage-collect
unmatched entries").
If these GC events happen often, it is a sign of a problem. However because this
whole mechanism is taking place behind the scenes, there is no direct way to
determine whether garbage collection took place.
Therefore to fix this, on Spectrum-1 only, expose four artificial ethtool
counters for the GC events: GCd timestamps and packets, in TX and RX directions.
Cc: Richard Cochran <richardcochran@gmail.com>
Signed-off-by: Petr Machata <petrm@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-08-28 23:54:37 +08:00
|
|
|
struct mlxsw_sp_ptp_port_stats stats;
|
2019-06-30 14:04:56 +08:00
|
|
|
} ptp;
|
2019-10-31 17:42:17 +08:00
|
|
|
u8 split_base_local_port;
|
2020-09-13 23:46:07 +08:00
|
|
|
int max_mtu;
|
2020-09-13 23:46:08 +08:00
|
|
|
u32 max_speed;
|
2020-09-16 14:35:14 +08:00
|
|
|
struct mlxsw_sp_hdroom *hdroom;
|
2020-09-27 15:50:13 +08:00
|
|
|
u64 module_overheat_initial_val;
|
2015-10-16 20:01:37 +08:00
|
|
|
};
|
|
|
|
|
2019-02-22 21:56:40 +08:00
|
|
|
struct mlxsw_sp_port_type_speed_ops {
|
|
|
|
void (*from_ptys_supported_port)(struct mlxsw_sp *mlxsw_sp,
|
|
|
|
u32 ptys_eth_proto,
|
|
|
|
struct ethtool_link_ksettings *cmd);
|
|
|
|
void (*from_ptys_link)(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto,
|
2019-08-28 23:54:35 +08:00
|
|
|
u8 width, unsigned long *mode);
|
2019-07-04 15:07:37 +08:00
|
|
|
u32 (*from_ptys_speed)(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto);
|
2019-02-22 21:56:40 +08:00
|
|
|
void (*from_ptys_speed_duplex)(struct mlxsw_sp *mlxsw_sp,
|
|
|
|
bool carrier_ok, u32 ptys_eth_proto,
|
|
|
|
struct ethtool_link_ksettings *cmd);
|
mlxsw: spectrum_ethtool: Introduce ptys_max_speed callback
The SBIB register configures the size of an internal buffer that the
Spectrum ASICs use when mirroring traffic on egress. This size should be
taken into account when validating that the port headroom buffers are not
larger than the chip can handle. Up until now this was not done, which is
incidentally not a problem, because the priority group buffers that mlxsw
auto-configures are small enough that the boundary condition could not be
violated.
When dcbnl_setbuffer is implemented, the user gets control over sizes of PG
buffers, and they might overshoot the headroom capacity. However the size
of the SBIB buffer depends on port speed, which cannot be vetoed. There is
obviously no way to retroactively push back on requests for overlarge PG
buffers, or reject an overlarge MTU, or cancel losslessness of a certain
PG.
Therefore, instead of taking into account the current speed when
calculating SBIB buffer size, take into account the maximum speed that a
port with given Ethernet protocol capabilities can have.
To that end, add a new ethtool callback, ptys_max_speed, which determines
this maximum speed.
Signed-off-by: Petr Machata <petrm@nvidia.com>
Signed-off-by: Ido Schimmel <idosch@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-09-13 23:46:06 +08:00
|
|
|
int (*ptys_max_speed)(struct mlxsw_sp_port *mlxsw_sp_port, u32 *p_max_speed);
|
2019-08-28 23:54:35 +08:00
|
|
|
u32 (*to_ptys_advert_link)(struct mlxsw_sp *mlxsw_sp, u8 width,
|
2019-02-22 21:56:40 +08:00
|
|
|
const struct ethtool_link_ksettings *cmd);
|
2019-08-28 23:54:35 +08:00
|
|
|
u32 (*to_ptys_speed)(struct mlxsw_sp *mlxsw_sp, u8 width, u32 speed);
|
2019-02-22 21:56:40 +08:00
|
|
|
void (*reg_ptys_eth_pack)(struct mlxsw_sp *mlxsw_sp, char *payload,
|
|
|
|
u8 local_port, u32 proto_admin, bool autoneg);
|
|
|
|
void (*reg_ptys_eth_unpack)(struct mlxsw_sp *mlxsw_sp, char *payload,
|
|
|
|
u32 *p_eth_proto_cap,
|
|
|
|
u32 *p_eth_proto_admin,
|
|
|
|
u32 *p_eth_proto_oper);
|
2020-10-24 21:37:31 +08:00
|
|
|
u32 (*ptys_proto_cap_masked_get)(u32 eth_proto_cap);
|
2019-02-22 21:56:40 +08:00
|
|
|
};
|
|
|
|
|
2018-10-17 16:53:31 +08:00
|
|
|
static inline struct net_device *
|
|
|
|
mlxsw_sp_bridge_vxlan_dev_find(struct net_device *br_dev)
|
|
|
|
{
|
|
|
|
struct net_device *dev;
|
|
|
|
struct list_head *iter;
|
|
|
|
|
|
|
|
netdev_for_each_lower_dev(br_dev, dev, iter) {
|
|
|
|
if (netif_is_vxlan(dev))
|
|
|
|
return dev;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool mlxsw_sp_bridge_has_vxlan(struct net_device *br_dev)
|
|
|
|
{
|
|
|
|
return !!mlxsw_sp_bridge_vxlan_dev_find(br_dev);
|
|
|
|
}
|
|
|
|
|
2018-11-29 04:07:04 +08:00
|
|
|
static inline int
|
|
|
|
mlxsw_sp_vxlan_mapped_vid(const struct net_device *vxlan_dev, u16 *p_vid)
|
|
|
|
{
|
|
|
|
struct bridge_vlan_info vinfo;
|
|
|
|
u16 vid = 0;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = br_vlan_get_pvid(vxlan_dev, &vid);
|
|
|
|
if (err || !vid)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
err = br_vlan_get_info(vxlan_dev, vid, &vinfo);
|
|
|
|
if (err || !(vinfo.flags & BRIDGE_VLAN_INFO_UNTAGGED))
|
|
|
|
vid = 0;
|
|
|
|
|
|
|
|
out:
|
|
|
|
*p_vid = vid;
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2016-04-06 23:10:14 +08:00
|
|
|
static inline bool
|
|
|
|
mlxsw_sp_port_is_pause_en(const struct mlxsw_sp_port *mlxsw_sp_port)
|
|
|
|
{
|
|
|
|
return mlxsw_sp_port->link.tx_pause || mlxsw_sp_port->link.rx_pause;
|
|
|
|
}
|
|
|
|
|
2015-12-03 19:12:28 +08:00
|
|
|
static inline struct mlxsw_sp_port *
|
|
|
|
mlxsw_sp_port_lagged_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id, u8 port_index)
|
|
|
|
{
|
|
|
|
struct mlxsw_sp_port *mlxsw_sp_port;
|
|
|
|
u8 local_port;
|
|
|
|
|
|
|
|
local_port = mlxsw_core_lag_mapping_get(mlxsw_sp->core,
|
|
|
|
lag_id, port_index);
|
|
|
|
mlxsw_sp_port = mlxsw_sp->ports[local_port];
|
|
|
|
return mlxsw_sp_port && mlxsw_sp_port->lagged ? mlxsw_sp_port : NULL;
|
|
|
|
}
|
|
|
|
|
2017-05-26 14:37:26 +08:00
|
|
|
static inline struct mlxsw_sp_port_vlan *
|
|
|
|
mlxsw_sp_port_vlan_find_by_vid(const struct mlxsw_sp_port *mlxsw_sp_port,
|
|
|
|
u16 vid)
|
|
|
|
{
|
|
|
|
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
|
|
|
|
|
|
|
|
list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
|
|
|
|
list) {
|
|
|
|
if (mlxsw_sp_port_vlan->vid == vid)
|
|
|
|
return mlxsw_sp_port_vlan;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2017-05-26 14:37:39 +08:00
|
|
|
enum mlxsw_sp_flood_type {
|
|
|
|
MLXSW_SP_FLOOD_TYPE_UC,
|
|
|
|
MLXSW_SP_FLOOD_TYPE_BC,
|
|
|
|
MLXSW_SP_FLOOD_TYPE_MC,
|
2015-10-16 20:01:37 +08:00
|
|
|
};
|
|
|
|
|
2020-06-30 04:46:13 +08:00
|
|
|
int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp,
|
|
|
|
int prio, char *ppcnt_pl);
|
|
|
|
int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
|
|
|
|
bool is_up);
|
2020-11-29 20:54:04 +08:00
|
|
|
int
|
|
|
|
mlxsw_sp_port_vlan_classification_set(struct mlxsw_sp_port *mlxsw_sp_port,
|
|
|
|
bool is_8021ad_tagged,
|
|
|
|
bool is_8021q_tagged);
|
2020-06-30 04:46:13 +08:00
|
|
|
|
2017-06-04 22:53:38 +08:00
|
|
|
/* spectrum_buffers.c */
|
mlxsw: spectrum: Track priorities in struct mlxsw_sp_hdroom
The mapping from priorities to buffers determines which buffers should be
configured. Lossiness of these priorities combined with the mapping
determines whether a given buffer should be lossy.
Currently this configuration is stored implicitly in DCB ETS, PFC and
ethtool PAUSE configuration. Keeping it together with the rest of the
headroom configuration and deriving it as needed from PFC / ETS / PAUSE
will make things clearer. To that end, add a field "prios" to struct
mlxsw_sp_hdroom.
Previously, __mlxsw_sp_port_headroom_set() took prio_tc as an argument, and
assumed that the same mapping as we use on the egress should be used on
ingress as well. Instead, track this configuration at each priority, so
that it can be adjusted flexibly.
In the following patches, as dcbnl_setbuffer is implemented, it will need
to store its own mapping, and it will also be sometimes necessary to revert
back to the original ETS mapping. Therefore track two buffer indices: the
one for chip configuration (buf_idx), and the source one (ets_buf_idx).
Introduce a function to configure the chip-level buffer index, and for now
have it simply copy the ETS mapping over to the chip mapping.
Update the ETS handler to project prio_tc to the ets_buf_idx and invoke the
buf_idx recomputation.
Now that there is a canonical place to look for this configuration,
mlxsw_sp_port_headroom_set() does not need to invent def_prio_tc to use if
DCB is compiled out.
Signed-off-by: Petr Machata <petrm@nvidia.com>
Reviewed-by: Jiri Pirko <jiri@nvidia.com>
Signed-off-by: Ido Schimmel <idosch@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-09-16 14:35:17 +08:00
|
|
|
struct mlxsw_sp_hdroom_prio {
|
|
|
|
/* Number of port buffer associated with this priority. This is the
|
|
|
|
* actually configured value.
|
|
|
|
*/
|
|
|
|
u8 buf_idx;
|
|
|
|
/* Value of buf_idx deduced from the DCB ETS configuration. */
|
|
|
|
u8 ets_buf_idx;
|
2020-09-17 14:49:01 +08:00
|
|
|
/* Value of buf_idx taken from the dcbnl_setbuffer configuration. */
|
|
|
|
u8 set_buf_idx;
|
2020-09-16 14:35:18 +08:00
|
|
|
bool lossy;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct mlxsw_sp_hdroom_buf {
|
2020-09-16 14:35:19 +08:00
|
|
|
u32 thres_cells;
|
|
|
|
u32 size_cells;
|
2020-09-17 14:49:01 +08:00
|
|
|
/* Size requirement form dcbnl_setbuffer. */
|
|
|
|
u32 set_size_cells;
|
2020-09-16 14:35:18 +08:00
|
|
|
bool lossy;
|
mlxsw: spectrum: Track priorities in struct mlxsw_sp_hdroom
The mapping from priorities to buffers determines which buffers should be
configured. Lossiness of these priorities combined with the mapping
determines whether a given buffer should be lossy.
Currently this configuration is stored implicitly in DCB ETS, PFC and
ethtool PAUSE configuration. Keeping it together with the rest of the
headroom configuration and deriving it as needed from PFC / ETS / PAUSE
will make things clearer. To that end, add a field "prios" to struct
mlxsw_sp_hdroom.
Previously, __mlxsw_sp_port_headroom_set() took prio_tc as an argument, and
assumed that the same mapping as we use on the egress should be used on
ingress as well. Instead, track this configuration at each priority, so
that it can be adjusted flexibly.
In the following patches, as dcbnl_setbuffer is implemented, it will need
to store its own mapping, and it will also be sometimes necessary to revert
back to the original ETS mapping. Therefore track two buffer indices: the
one for chip configuration (buf_idx), and the source one (ets_buf_idx).
Introduce a function to configure the chip-level buffer index, and for now
have it simply copy the ETS mapping over to the chip mapping.
Update the ETS handler to project prio_tc to the ets_buf_idx and invoke the
buf_idx recomputation.
Now that there is a canonical place to look for this configuration,
mlxsw_sp_port_headroom_set() does not need to invent def_prio_tc to use if
DCB is compiled out.
Signed-off-by: Petr Machata <petrm@nvidia.com>
Reviewed-by: Jiri Pirko <jiri@nvidia.com>
Signed-off-by: Ido Schimmel <idosch@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-09-16 14:35:17 +08:00
|
|
|
};
|
|
|
|
|
2020-09-17 14:49:01 +08:00
|
|
|
enum mlxsw_sp_hdroom_mode {
|
|
|
|
MLXSW_SP_HDROOM_MODE_DCB,
|
|
|
|
MLXSW_SP_HDROOM_MODE_TC,
|
|
|
|
};
|
|
|
|
|
2020-09-16 14:35:18 +08:00
|
|
|
#define MLXSW_SP_PB_COUNT 10
|
|
|
|
|
2020-09-16 14:35:14 +08:00
|
|
|
struct mlxsw_sp_hdroom {
|
2020-09-17 14:49:01 +08:00
|
|
|
enum mlxsw_sp_hdroom_mode mode;
|
|
|
|
|
mlxsw: spectrum: Track priorities in struct mlxsw_sp_hdroom
The mapping from priorities to buffers determines which buffers should be
configured. Lossiness of these priorities combined with the mapping
determines whether a given buffer should be lossy.
Currently this configuration is stored implicitly in DCB ETS, PFC and
ethtool PAUSE configuration. Keeping it together with the rest of the
headroom configuration and deriving it as needed from PFC / ETS / PAUSE
will make things clearer. To that end, add a field "prios" to struct
mlxsw_sp_hdroom.
Previously, __mlxsw_sp_port_headroom_set() took prio_tc as an argument, and
assumed that the same mapping as we use on the egress should be used on
ingress as well. Instead, track this configuration at each priority, so
that it can be adjusted flexibly.
In the following patches, as dcbnl_setbuffer is implemented, it will need
to store its own mapping, and it will also be sometimes necessary to revert
back to the original ETS mapping. Therefore track two buffer indices: the
one for chip configuration (buf_idx), and the source one (ets_buf_idx).
Introduce a function to configure the chip-level buffer index, and for now
have it simply copy the ETS mapping over to the chip mapping.
Update the ETS handler to project prio_tc to the ets_buf_idx and invoke the
buf_idx recomputation.
Now that there is a canonical place to look for this configuration,
mlxsw_sp_port_headroom_set() does not need to invent def_prio_tc to use if
DCB is compiled out.
Signed-off-by: Petr Machata <petrm@nvidia.com>
Reviewed-by: Jiri Pirko <jiri@nvidia.com>
Signed-off-by: Ido Schimmel <idosch@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-09-16 14:35:17 +08:00
|
|
|
struct {
|
|
|
|
struct mlxsw_sp_hdroom_prio prio[IEEE_8021Q_MAX_PRIORITIES];
|
|
|
|
} prios;
|
2020-09-16 14:35:18 +08:00
|
|
|
struct {
|
|
|
|
struct mlxsw_sp_hdroom_buf buf[MLXSW_SP_PB_COUNT];
|
|
|
|
} bufs;
|
2020-09-16 14:35:28 +08:00
|
|
|
struct {
|
|
|
|
/* Size actually configured for the internal buffer. Equal to
|
|
|
|
* reserve when internal buffer is enabled.
|
|
|
|
*/
|
|
|
|
u32 size_cells;
|
|
|
|
/* Space reserved in the headroom for the internal buffer. Port
|
|
|
|
* buffers are not allowed to grow into this space.
|
|
|
|
*/
|
|
|
|
u32 reserve_cells;
|
|
|
|
bool enable;
|
|
|
|
} int_buf;
|
mlxsw: spectrum: Unify delay handling between PFC and pause
When a priority is marked as lossless using DCB PFC, or when pause frames
are enabled on a port, mlxsw adds to port buffers an extra space to cover
the traffic that will arrive between the time that a pause or PFC frame is
emitted, and the time traffic actually stops. This is called the delay. The
concept is the same in PFC and pause, however the way the extra buffer
space is calculated differs.
In this patch, unify this handling. Delay is to be measured in bytes of
extra space, and will not include MTU. PFC handler sets the delay directly
from the parameter it gets through the DCB interface.
To convert pause handler, move MLXSW_SP_PAUSE_DELAY to ethtool module,
convert to bytes, and reduce it by maximum MTU, and divide by two. Then it
has the same meaning as the delay_bytes set by the PFC handler.
Keep the delay_bytes value in struct mlxsw_sp_hdroom introduced in the
previous patch. Change PFC and pause handlers to store the new delay value
there and have __mlxsw_sp_port_headroom_set() take it from there.
Instead of mlxsw_sp_pfc_delay_get() and mlxsw_sp_pg_buf_delay_get(),
introduce mlxsw_sp_hdroom_buf_delay_get() to calculate the delay provision.
Drop the unnecessary MLXSW_SP_CELL_FACTOR, and instead add an explanatory
comment describing the formula used.
Signed-off-by: Petr Machata <petrm@nvidia.com>
Reviewed-by: Jiri Pirko <jiri@nvidia.com>
Signed-off-by: Ido Schimmel <idosch@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-09-16 14:35:15 +08:00
|
|
|
int delay_bytes;
|
2020-09-16 14:35:16 +08:00
|
|
|
int mtu;
|
2020-09-16 14:35:14 +08:00
|
|
|
};
|
|
|
|
|
2015-10-16 20:01:37 +08:00
|
|
|
int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp);
|
2016-04-15 00:19:24 +08:00
|
|
|
void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp);
|
2015-10-16 20:01:37 +08:00
|
|
|
int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port);
|
2020-09-16 14:35:14 +08:00
|
|
|
void mlxsw_sp_port_buffers_fini(struct mlxsw_sp_port *mlxsw_sp_port);
|
2016-04-15 00:19:24 +08:00
|
|
|
int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core,
|
|
|
|
unsigned int sb_index, u16 pool_index,
|
|
|
|
struct devlink_sb_pool_info *pool_info);
|
|
|
|
int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core,
|
|
|
|
unsigned int sb_index, u16 pool_index, u32 size,
|
2019-04-22 20:08:41 +08:00
|
|
|
enum devlink_sb_threshold_type threshold_type,
|
|
|
|
struct netlink_ext_ack *extack);
|
2016-04-15 00:19:24 +08:00
|
|
|
int mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
|
|
|
|
unsigned int sb_index, u16 pool_index,
|
|
|
|
u32 *p_threshold);
|
|
|
|
int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port,
|
|
|
|
unsigned int sb_index, u16 pool_index,
|
2019-04-22 20:08:41 +08:00
|
|
|
u32 threshold, struct netlink_ext_ack *extack);
|
2016-04-15 00:19:24 +08:00
|
|
|
int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port,
|
|
|
|
unsigned int sb_index, u16 tc_index,
|
|
|
|
enum devlink_sb_pool_type pool_type,
|
|
|
|
u16 *p_pool_index, u32 *p_threshold);
|
|
|
|
int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port,
|
|
|
|
unsigned int sb_index, u16 tc_index,
|
|
|
|
enum devlink_sb_pool_type pool_type,
|
2019-04-22 20:08:41 +08:00
|
|
|
u16 pool_index, u32 threshold,
|
|
|
|
struct netlink_ext_ack *extack);
|
2016-04-15 00:19:30 +08:00
|
|
|
int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core,
|
|
|
|
unsigned int sb_index);
|
|
|
|
int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core,
|
|
|
|
unsigned int sb_index);
|
|
|
|
int mlxsw_sp_sb_occ_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
|
|
|
|
unsigned int sb_index, u16 pool_index,
|
|
|
|
u32 *p_cur, u32 *p_max);
|
|
|
|
int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port,
|
|
|
|
unsigned int sb_index, u16 tc_index,
|
|
|
|
enum devlink_sb_pool_type pool_type,
|
|
|
|
u32 *p_cur, u32 *p_max);
|
2017-05-17 01:38:24 +08:00
|
|
|
u32 mlxsw_sp_cells_bytes(const struct mlxsw_sp *mlxsw_sp, u32 cells);
|
|
|
|
u32 mlxsw_sp_bytes_cells(const struct mlxsw_sp *mlxsw_sp, u32 bytes);
|
mlxsw: spectrum: Track priorities in struct mlxsw_sp_hdroom
The mapping from priorities to buffers determines which buffers should be
configured. Lossiness of these priorities combined with the mapping
determines whether a given buffer should be lossy.
Currently this configuration is stored implicitly in DCB ETS, PFC and
ethtool PAUSE configuration. Keeping it together with the rest of the
headroom configuration and deriving it as needed from PFC / ETS / PAUSE
will make things clearer. To that end, add a field "prios" to struct
mlxsw_sp_hdroom.
Previously, __mlxsw_sp_port_headroom_set() took prio_tc as an argument, and
assumed that the same mapping as we use on the egress should be used on
ingress as well. Instead, track this configuration at each priority, so
that it can be adjusted flexibly.
In the following patches, as dcbnl_setbuffer is implemented, it will need
to store its own mapping, and it will also be sometimes necessary to revert
back to the original ETS mapping. Therefore track two buffer indices: the
one for chip configuration (buf_idx), and the source one (ets_buf_idx).
Introduce a function to configure the chip-level buffer index, and for now
have it simply copy the ETS mapping over to the chip mapping.
Update the ETS handler to project prio_tc to the ets_buf_idx and invoke the
buf_idx recomputation.
Now that there is a canonical place to look for this configuration,
mlxsw_sp_port_headroom_set() does not need to invent def_prio_tc to use if
DCB is compiled out.
Signed-off-by: Petr Machata <petrm@nvidia.com>
Reviewed-by: Jiri Pirko <jiri@nvidia.com>
Signed-off-by: Ido Schimmel <idosch@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-09-16 14:35:17 +08:00
|
|
|
void mlxsw_sp_hdroom_prios_reset_buf_idx(struct mlxsw_sp_hdroom *hdroom);
|
2020-09-16 14:35:18 +08:00
|
|
|
void mlxsw_sp_hdroom_bufs_reset_lossiness(struct mlxsw_sp_hdroom *hdroom);
|
mlxsw: spectrum: Split headroom autoresize out of buffer configuration
Split mlxsw_sp_port_headroom_set() to three functions.
mlxsw_sp_hdroom_bufs_reset_sizes() changes the sizes of the individual PG
buffers, and mlxsw_sp_hdroom_configure_buffers() will actually apply the
configuration. A third function, mlxsw_sp_hdroom_bufs_fit(), verifies that
the requested buffer configuration matches total headroom size
requirements.
Add wrappers, mlxsw_sp_hdroom_configure() and __..., that will eventually
perform full headroom configuration, but for now, only have them verify the
configured headroom size, and invoke mlxsw_sp_hdroom_configure_buffers().
Have them take the `force` argument to prepare for a later patch, even
though it is currently unused.
Note that the loop in mlxsw_sp_hdroom_configure_buffers() only goes through
DCBX_MAX_BUFFERS. Since there is no logic to configure the control buffer,
it needs to keep the values queried from the FW. Eventually this function
should configure all the PGs.
Note that conversion of __mlxsw_sp_dcbnl_ieee_setets() is not trivial. That
function performs the headroom configuration in three steps: first it
resizes the buffers and adds any new ones. Then it redirects priorities to
the new buffers. And finally it sets the size of the now-unused buffers to
zero. This way no packet drops are introduced.
So after invoking mlxsw_sp_hdroom_bufs_reset_sizes(), tweak the
configuration to keep the old sizes of PG buffers for those buffers whose
size was set to zero.
Signed-off-by: Petr Machata <petrm@nvidia.com>
Reviewed-by: Jiri Pirko <jiri@nvidia.com>
Signed-off-by: Ido Schimmel <idosch@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-09-16 14:35:20 +08:00
|
|
|
void mlxsw_sp_hdroom_bufs_reset_sizes(struct mlxsw_sp_port *mlxsw_sp_port,
|
|
|
|
struct mlxsw_sp_hdroom *hdroom);
|
|
|
|
int mlxsw_sp_hdroom_configure(struct mlxsw_sp_port *mlxsw_sp_port,
|
|
|
|
const struct mlxsw_sp_hdroom *hdroom);
|
2015-10-16 20:01:37 +08:00
|
|
|
|
2019-02-21 03:32:12 +08:00
|
|
|
extern const struct mlxsw_sp_sb_vals mlxsw_sp1_sb_vals;
|
|
|
|
extern const struct mlxsw_sp_sb_vals mlxsw_sp2_sb_vals;
|
|
|
|
|
2020-09-16 14:35:27 +08:00
|
|
|
extern const struct mlxsw_sp_sb_ops mlxsw_sp1_sb_ops;
|
|
|
|
extern const struct mlxsw_sp_sb_ops mlxsw_sp2_sb_ops;
|
|
|
|
extern const struct mlxsw_sp_sb_ops mlxsw_sp3_sb_ops;
|
|
|
|
|
2017-06-04 22:53:38 +08:00
|
|
|
/* spectrum_switchdev.c */
|
2015-10-16 20:01:37 +08:00
|
|
|
int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp);
|
|
|
|
void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp);
|
2016-07-04 14:23:13 +08:00
|
|
|
int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
|
|
|
|
bool adding);
|
mlxsw: spectrum: Replace vPorts with Port-VLAN
As explained in the cover letter, since the introduction of the bridge
offload in the mlxsw driver, information related to the offloaded bridge
and bridge ports was stored in the individual port struct,
mlxsw_sp_port.
This lead to a bloated struct storing both physical properties of the
port (e.g., autoneg status) as well as logical properties of an upper
bridge port (e.g., learning, mrouter indication). While this might work
well for simple devices, it proved to be hard to extend when stacked
devices were taken into account and more advanced use-cases (e.g., IGMP
snooping) considered.
This patch removes the excess information from the above struct and
instead stores it in more appropriate structs that represent the bridge
port, the bridge itself and a VLAN configured on the bridge port.
The membership of a port in a bridge is denoted using the Port-VLAN
struct, which points to the bridge port and also member in the bridge
VLAN group of the VLAN it represents. This allows us to completely
remove the vPort abstraction and consolidate many of the code paths
relating to VLAN-aware and unaware bridges.
Note that the FID / vFID code is currently duplicated, but this will
soon go away when the common FID core will be introduced.
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-05-26 14:37:31 +08:00
|
|
|
void
|
|
|
|
mlxsw_sp_port_vlan_bridge_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan);
|
|
|
|
int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
|
|
|
|
struct net_device *brport_dev,
|
2017-10-08 17:57:56 +08:00
|
|
|
struct net_device *br_dev,
|
|
|
|
struct netlink_ext_ack *extack);
|
mlxsw: spectrum: Replace vPorts with Port-VLAN
As explained in the cover letter, since the introduction of the bridge
offload in the mlxsw driver, information related to the offloaded bridge
and bridge ports was stored in the individual port struct,
mlxsw_sp_port.
This lead to a bloated struct storing both physical properties of the
port (e.g., autoneg status) as well as logical properties of an upper
bridge port (e.g., learning, mrouter indication). While this might work
well for simple devices, it proved to be hard to extend when stacked
devices were taken into account and more advanced use-cases (e.g., IGMP
snooping) considered.
This patch removes the excess information from the above struct and
instead stores it in more appropriate structs that represent the bridge
port, the bridge itself and a VLAN configured on the bridge port.
The membership of a port in a bridge is denoted using the Port-VLAN
struct, which points to the bridge port and also member in the bridge
VLAN group of the VLAN it represents. This allows us to completely
remove the vPort abstraction and consolidate many of the code paths
relating to VLAN-aware and unaware bridges.
Note that the FID / vFID code is currently duplicated, but this will
soon go away when the common FID core will be introduced.
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-05-26 14:37:31 +08:00
|
|
|
void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
|
|
|
|
struct net_device *brport_dev,
|
|
|
|
struct net_device *br_dev);
|
2017-12-25 16:05:33 +08:00
|
|
|
bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
|
|
|
|
const struct net_device *br_dev);
|
2018-10-17 16:53:31 +08:00
|
|
|
int mlxsw_sp_bridge_vxlan_join(struct mlxsw_sp *mlxsw_sp,
|
|
|
|
const struct net_device *br_dev,
|
2018-11-29 04:07:02 +08:00
|
|
|
const struct net_device *vxlan_dev, u16 vid,
|
2018-10-17 16:53:31 +08:00
|
|
|
struct netlink_ext_ack *extack);
|
|
|
|
void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp,
|
|
|
|
const struct net_device *vxlan_dev);
|
2018-12-08 03:55:11 +08:00
|
|
|
extern struct notifier_block mlxsw_sp_switchdev_notifier;
|
mlxsw: spectrum: Replace vPorts with Port-VLAN
As explained in the cover letter, since the introduction of the bridge
offload in the mlxsw driver, information related to the offloaded bridge
and bridge ports was stored in the individual port struct,
mlxsw_sp_port.
This lead to a bloated struct storing both physical properties of the
port (e.g., autoneg status) as well as logical properties of an upper
bridge port (e.g., learning, mrouter indication). While this might work
well for simple devices, it proved to be hard to extend when stacked
devices were taken into account and more advanced use-cases (e.g., IGMP
snooping) considered.
This patch removes the excess information from the above struct and
instead stores it in more appropriate structs that represent the bridge
port, the bridge itself and a VLAN configured on the bridge port.
The membership of a port in a bridge is denoted using the Port-VLAN
struct, which points to the bridge port and also member in the bridge
VLAN group of the VLAN it represents. This allows us to completely
remove the vPort abstraction and consolidate many of the code paths
relating to VLAN-aware and unaware bridges.
Note that the FID / vFID code is currently duplicated, but this will
soon go away when the common FID core will be introduced.
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-05-26 14:37:31 +08:00
|
|
|
|
2017-06-04 22:53:38 +08:00
|
|
|
/* spectrum.c */
|
mlxsw: spectrum: PTP: Hook into packet receive path
When configured, the Spectrum hardware can recognize PTP packets and
trap them to the CPU using dedicated traps, PTP0 and PTP1.
One reason to get PTP packets under dedicated traps is to have a
separate policer suitable for the amount of PTP traffic expected when
switch is operated as a boundary clock. For this, add two new trap
groups, MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0 and _PTP1, and associate the
two PTP traps with these two groups.
In the driver, specifically for Spectrum-1, event PTP packets will need
to be paired up with their timestamps. Those arrive through a different
set of traps, added later in the patch set. To support this future use,
introduce a new PTP op, ptp_receive.
It is possible to configure which PTP messages should be trapped under
which PTP trap. On Spectrum systems, we will use PTP0 for event
packets (which need timestamping), and PTP1 for control packets (which
do not). Thus configure PTP0 trap with a custom callback that defers to
the ptp_receive op.
Additionally, L2 PTP packets are actually trapped through the LLDP trap,
not through any of the PTP traps. So treat the LLDP trap the same way as
the PTP0 trap. Unlike PTP traps, which are currently still disabled,
LLDP trap is active. Correspondingly, have all the implementations of
the ptp_receive op return true, which the handler treats as a signal to
forward the packet immediately.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Acked-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-06-30 14:04:51 +08:00
|
|
|
void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
|
|
|
|
u8 local_port, void *priv);
|
2020-05-30 02:36:46 +08:00
|
|
|
void mlxsw_sp_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
|
|
|
|
u8 local_port);
|
2020-05-30 02:36:48 +08:00
|
|
|
void mlxsw_sp_sample_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
|
|
|
|
u8 local_port);
|
2020-01-20 15:52:50 +08:00
|
|
|
int mlxsw_sp_port_speed_get(struct mlxsw_sp_port *mlxsw_sp_port, u32 *speed);
|
2016-04-06 23:10:10 +08:00
|
|
|
int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
|
|
|
|
enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
|
|
|
|
bool dwrr, u8 dwrr_weight);
|
|
|
|
int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
|
|
|
|
u8 switch_prio, u8 tclass);
|
2016-04-06 23:10:11 +08:00
|
|
|
int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
|
|
|
|
enum mlxsw_reg_qeec_hr hr, u8 index,
|
2020-01-24 21:23:13 +08:00
|
|
|
u8 next_index, u32 maxrate, u8 burst_size);
|
2018-04-29 15:56:09 +08:00
|
|
|
enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 stp_state);
|
2017-05-17 01:38:31 +08:00
|
|
|
int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
|
|
|
|
u8 state);
|
2017-05-26 14:37:39 +08:00
|
|
|
int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable);
|
2017-05-17 01:38:31 +08:00
|
|
|
int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
|
|
|
|
bool learn_enable);
|
2020-12-08 17:22:47 +08:00
|
|
|
int mlxsw_sp_ethtype_to_sver_type(u16 ethtype, u8 *p_sver_type);
|
2020-11-29 20:54:02 +08:00
|
|
|
int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
|
|
|
|
u16 ethtype);
|
mlxsw: spectrum: Replace vPorts with Port-VLAN
As explained in the cover letter, since the introduction of the bridge
offload in the mlxsw driver, information related to the offloaded bridge
and bridge ports was stored in the individual port struct,
mlxsw_sp_port.
This lead to a bloated struct storing both physical properties of the
port (e.g., autoneg status) as well as logical properties of an upper
bridge port (e.g., learning, mrouter indication). While this might work
well for simple devices, it proved to be hard to extend when stacked
devices were taken into account and more advanced use-cases (e.g., IGMP
snooping) considered.
This patch removes the excess information from the above struct and
instead stores it in more appropriate structs that represent the bridge
port, the bridge itself and a VLAN configured on the bridge port.
The membership of a port in a bridge is denoted using the Port-VLAN
struct, which points to the bridge port and also member in the bridge
VLAN group of the VLAN it represents. This allows us to completely
remove the vPort abstraction and consolidate many of the code paths
relating to VLAN-aware and unaware bridges.
Note that the FID / vFID code is currently duplicated, but this will
soon go away when the common FID core will be introduced.
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-05-26 14:37:31 +08:00
|
|
|
struct mlxsw_sp_port_vlan *
|
2018-12-19 14:08:43 +08:00
|
|
|
mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
|
|
|
|
void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan);
|
2017-06-04 22:53:38 +08:00
|
|
|
int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
|
|
|
|
u16 vid_end, bool is_member, bool untagged);
|
|
|
|
int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
|
|
|
|
unsigned int counter_index, u64 *packets,
|
|
|
|
u64 *bytes);
|
|
|
|
int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
|
|
|
|
unsigned int *p_counter_index);
|
|
|
|
void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
|
|
|
|
unsigned int counter_index);
|
|
|
|
bool mlxsw_sp_port_dev_check(const struct net_device *dev);
|
|
|
|
struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev);
|
|
|
|
struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev);
|
|
|
|
struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev);
|
|
|
|
void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port);
|
2017-06-08 14:44:20 +08:00
|
|
|
struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev);
|
2015-10-16 20:01:37 +08:00
|
|
|
|
2017-06-04 22:53:38 +08:00
|
|
|
/* spectrum_dcb.c */
|
2016-04-06 23:10:09 +08:00
|
|
|
#ifdef CONFIG_MLXSW_SPECTRUM_DCB
|
|
|
|
int mlxsw_sp_port_dcb_init(struct mlxsw_sp_port *mlxsw_sp_port);
|
|
|
|
void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port);
|
|
|
|
#else
|
|
|
|
static inline int mlxsw_sp_port_dcb_init(struct mlxsw_sp_port *mlxsw_sp_port)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
static inline void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port)
|
|
|
|
{}
|
|
|
|
#endif
|
|
|
|
|
2017-06-04 22:53:38 +08:00
|
|
|
/* spectrum_router.c */
|
2018-10-11 15:47:54 +08:00
|
|
|
enum mlxsw_sp_l3proto {
|
|
|
|
MLXSW_SP_L3_PROTO_IPV4,
|
|
|
|
MLXSW_SP_L3_PROTO_IPV6,
|
|
|
|
#define MLXSW_SP_L3_PROTO_MAX (MLXSW_SP_L3_PROTO_IPV6 + 1)
|
|
|
|
};
|
|
|
|
|
|
|
|
union mlxsw_sp_l3addr {
|
|
|
|
__be32 addr4;
|
|
|
|
struct in6_addr addr6;
|
|
|
|
};
|
|
|
|
|
2019-10-03 17:49:34 +08:00
|
|
|
int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
|
|
|
|
struct netlink_ext_ack *extack);
|
2016-07-02 17:00:15 +08:00
|
|
|
void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp);
|
2018-12-13 19:54:48 +08:00
|
|
|
int mlxsw_sp_netdevice_router_port_event(struct net_device *dev,
|
|
|
|
unsigned long event, void *ptr);
|
2018-07-14 16:39:52 +08:00
|
|
|
void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
|
|
|
|
const struct net_device *macvlan_dev);
|
2017-10-19 00:56:55 +08:00
|
|
|
int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
|
|
|
|
unsigned long event, void *ptr);
|
|
|
|
int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused,
|
|
|
|
unsigned long event, void *ptr);
|
2017-05-01 00:47:14 +08:00
|
|
|
int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
|
|
|
|
struct netdev_notifier_changeupper_info *info);
|
2017-11-03 17:03:29 +08:00
|
|
|
bool mlxsw_sp_netdev_is_ipip_ol(const struct mlxsw_sp *mlxsw_sp,
|
|
|
|
const struct net_device *dev);
|
2020-02-22 01:54:12 +08:00
|
|
|
bool mlxsw_sp_netdev_is_ipip_ul(struct mlxsw_sp *mlxsw_sp,
|
2017-11-03 17:03:41 +08:00
|
|
|
const struct net_device *dev);
|
2017-11-03 17:03:36 +08:00
|
|
|
int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp,
|
|
|
|
struct net_device *l3_dev,
|
|
|
|
unsigned long event,
|
|
|
|
struct netdev_notifier_info *info);
|
2017-11-03 17:03:41 +08:00
|
|
|
int
|
|
|
|
mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
|
|
|
|
struct net_device *l3_dev,
|
|
|
|
unsigned long event,
|
|
|
|
struct netdev_notifier_info *info);
|
2020-12-06 16:22:21 +08:00
|
|
|
int
|
|
|
|
mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
|
|
|
|
struct net_device *l3_dev,
|
|
|
|
struct netlink_ext_ack *extack);
|
2017-05-26 14:37:39 +08:00
|
|
|
void
|
|
|
|
mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan);
|
2018-08-24 20:41:35 +08:00
|
|
|
void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
|
|
|
|
struct net_device *dev);
|
2020-02-20 15:07:59 +08:00
|
|
|
bool mlxsw_sp_rif_exists(struct mlxsw_sp *mlxsw_sp,
|
|
|
|
const struct net_device *dev);
|
2020-02-20 15:07:58 +08:00
|
|
|
u16 mlxsw_sp_rif_vid(struct mlxsw_sp *mlxsw_sp, const struct net_device *dev);
|
2018-10-11 15:47:49 +08:00
|
|
|
u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp);
|
2018-10-17 16:53:07 +08:00
|
|
|
int mlxsw_sp_router_nve_promote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
|
|
|
|
enum mlxsw_sp_l3proto ul_proto,
|
|
|
|
const union mlxsw_sp_l3addr *ul_sip,
|
|
|
|
u32 tunnel_index);
|
|
|
|
void mlxsw_sp_router_nve_demote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
|
|
|
|
enum mlxsw_sp_l3proto ul_proto,
|
|
|
|
const union mlxsw_sp_l3addr *ul_sip);
|
2018-10-17 16:53:08 +08:00
|
|
|
int mlxsw_sp_router_tb_id_vr_id(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
|
|
|
|
u16 *vr_id);
|
2019-01-23 22:32:55 +08:00
|
|
|
int mlxsw_sp_router_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
|
|
|
|
u16 *ul_rif_index);
|
|
|
|
void mlxsw_sp_router_ul_rif_put(struct mlxsw_sp *mlxsw_sp, u16 ul_rif_index);
|
2016-07-02 17:00:15 +08:00
|
|
|
|
2017-06-04 22:53:38 +08:00
|
|
|
/* spectrum_kvdl.c */
|
2018-07-09 04:51:17 +08:00
|
|
|
enum mlxsw_sp_kvdl_entry_type {
|
|
|
|
MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
|
|
|
|
MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET,
|
|
|
|
MLXSW_SP_KVDL_ENTRY_TYPE_PBS,
|
|
|
|
MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR,
|
2018-10-11 15:47:55 +08:00
|
|
|
MLXSW_SP_KVDL_ENTRY_TYPE_TNUMT,
|
2018-07-09 04:51:17 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static inline unsigned int
|
|
|
|
mlxsw_sp_kvdl_entry_size(enum mlxsw_sp_kvdl_entry_type type)
|
|
|
|
{
|
|
|
|
switch (type) {
|
2020-08-24 06:36:59 +08:00
|
|
|
case MLXSW_SP_KVDL_ENTRY_TYPE_ADJ:
|
|
|
|
case MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET:
|
|
|
|
case MLXSW_SP_KVDL_ENTRY_TYPE_PBS:
|
|
|
|
case MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR:
|
|
|
|
case MLXSW_SP_KVDL_ENTRY_TYPE_TNUMT:
|
2018-07-09 04:51:17 +08:00
|
|
|
default:
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-07-09 04:51:16 +08:00
|
|
|
struct mlxsw_sp_kvdl_ops {
|
|
|
|
size_t priv_size;
|
|
|
|
int (*init)(struct mlxsw_sp *mlxsw_sp, void *priv);
|
|
|
|
void (*fini)(struct mlxsw_sp *mlxsw_sp, void *priv);
|
|
|
|
int (*alloc)(struct mlxsw_sp *mlxsw_sp, void *priv,
|
2018-07-09 04:51:17 +08:00
|
|
|
enum mlxsw_sp_kvdl_entry_type type,
|
2018-07-09 04:51:16 +08:00
|
|
|
unsigned int entry_count, u32 *p_entry_index);
|
|
|
|
void (*free)(struct mlxsw_sp *mlxsw_sp, void *priv,
|
2018-07-09 04:51:17 +08:00
|
|
|
enum mlxsw_sp_kvdl_entry_type type,
|
2018-07-09 04:51:18 +08:00
|
|
|
unsigned int entry_count, int entry_index);
|
2018-07-09 04:51:16 +08:00
|
|
|
int (*alloc_size_query)(struct mlxsw_sp *mlxsw_sp, void *priv,
|
2018-07-09 04:51:17 +08:00
|
|
|
enum mlxsw_sp_kvdl_entry_type type,
|
2018-07-09 04:51:16 +08:00
|
|
|
unsigned int entry_count,
|
2018-07-09 04:51:17 +08:00
|
|
|
unsigned int *p_alloc_count);
|
2018-07-09 04:51:16 +08:00
|
|
|
int (*resources_register)(struct mlxsw_sp *mlxsw_sp, void *priv);
|
|
|
|
};
|
|
|
|
|
2017-10-23 05:11:44 +08:00
|
|
|
int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp);
|
|
|
|
void mlxsw_sp_kvdl_fini(struct mlxsw_sp *mlxsw_sp);
|
2018-07-09 04:51:17 +08:00
|
|
|
int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp,
|
|
|
|
enum mlxsw_sp_kvdl_entry_type type,
|
|
|
|
unsigned int entry_count, u32 *p_entry_index);
|
|
|
|
void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp,
|
|
|
|
enum mlxsw_sp_kvdl_entry_type type,
|
2018-07-09 04:51:18 +08:00
|
|
|
unsigned int entry_count, int entry_index);
|
2018-07-09 04:51:17 +08:00
|
|
|
int mlxsw_sp_kvdl_alloc_count_query(struct mlxsw_sp *mlxsw_sp,
|
|
|
|
enum mlxsw_sp_kvdl_entry_type type,
|
|
|
|
unsigned int entry_count,
|
|
|
|
unsigned int *p_alloc_count);
|
2018-07-09 04:51:16 +08:00
|
|
|
|
|
|
|
/* spectrum1_kvdl.c */
|
|
|
|
extern const struct mlxsw_sp_kvdl_ops mlxsw_sp1_kvdl_ops;
|
|
|
|
int mlxsw_sp1_kvdl_resources_register(struct mlxsw_core *mlxsw_core);
|
2016-07-05 17:27:47 +08:00
|
|
|
|
2018-07-18 16:14:31 +08:00
|
|
|
/* spectrum2_kvdl.c */
|
|
|
|
extern const struct mlxsw_sp_kvdl_ops mlxsw_sp2_kvdl_ops;
|
|
|
|
|
2017-02-03 17:29:07 +08:00
|
|
|
struct mlxsw_sp_acl_rule_info {
|
|
|
|
unsigned int priority;
|
|
|
|
struct mlxsw_afk_element_values values;
|
|
|
|
struct mlxsw_afa_block *act_block;
|
2019-07-28 01:32:56 +08:00
|
|
|
u8 action_created:1,
|
2020-02-24 15:35:49 +08:00
|
|
|
ingress_bind_blocker:1,
|
2020-03-07 19:40:18 +08:00
|
|
|
egress_bind_blocker:1,
|
2020-07-15 16:27:29 +08:00
|
|
|
counter_valid:1,
|
|
|
|
policer_index_valid:1;
|
2017-03-11 16:42:58 +08:00
|
|
|
unsigned int counter_index;
|
2020-07-15 16:27:29 +08:00
|
|
|
u16 policer_index;
|
2017-02-03 17:29:07 +08:00
|
|
|
};
|
|
|
|
|
2020-04-27 23:13:00 +08:00
|
|
|
/* spectrum_flow.c */
|
2020-04-27 23:12:59 +08:00
|
|
|
struct mlxsw_sp_flow_block {
|
2019-06-19 14:41:08 +08:00
|
|
|
struct list_head binding_list;
|
2020-05-10 04:06:04 +08:00
|
|
|
struct {
|
|
|
|
struct list_head list;
|
2020-05-10 04:06:05 +08:00
|
|
|
unsigned int min_prio;
|
|
|
|
unsigned int max_prio;
|
2020-05-10 04:06:04 +08:00
|
|
|
} mall;
|
2019-06-19 14:41:08 +08:00
|
|
|
struct mlxsw_sp_acl_ruleset *ruleset_zero;
|
|
|
|
struct mlxsw_sp *mlxsw_sp;
|
|
|
|
unsigned int rule_count;
|
|
|
|
unsigned int disable_count;
|
2020-02-24 15:35:49 +08:00
|
|
|
unsigned int ingress_blocker_rule_count;
|
2019-07-28 01:32:56 +08:00
|
|
|
unsigned int egress_blocker_rule_count;
|
2020-02-24 15:35:48 +08:00
|
|
|
unsigned int ingress_binding_count;
|
|
|
|
unsigned int egress_binding_count;
|
2019-06-19 14:41:08 +08:00
|
|
|
struct net *net;
|
|
|
|
};
|
|
|
|
|
2020-04-27 23:13:00 +08:00
|
|
|
struct mlxsw_sp_flow_block_binding {
|
|
|
|
struct list_head list;
|
|
|
|
struct mlxsw_sp_port *mlxsw_sp_port;
|
|
|
|
bool ingress;
|
|
|
|
};
|
2020-04-27 23:12:58 +08:00
|
|
|
|
|
|
|
static inline struct mlxsw_sp *
|
2020-04-27 23:12:59 +08:00
|
|
|
mlxsw_sp_flow_block_mlxsw_sp(struct mlxsw_sp_flow_block *block)
|
2020-04-27 23:12:58 +08:00
|
|
|
{
|
|
|
|
return block->mlxsw_sp;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned int
|
2020-04-27 23:12:59 +08:00
|
|
|
mlxsw_sp_flow_block_rule_count(const struct mlxsw_sp_flow_block *block)
|
2020-04-27 23:12:58 +08:00
|
|
|
{
|
|
|
|
return block ? block->rule_count : 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
2020-04-27 23:12:59 +08:00
|
|
|
mlxsw_sp_flow_block_disable_inc(struct mlxsw_sp_flow_block *block)
|
2020-04-27 23:12:58 +08:00
|
|
|
{
|
|
|
|
if (block)
|
|
|
|
block->disable_count++;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
2020-04-27 23:12:59 +08:00
|
|
|
mlxsw_sp_flow_block_disable_dec(struct mlxsw_sp_flow_block *block)
|
2020-04-27 23:12:58 +08:00
|
|
|
{
|
|
|
|
if (block)
|
|
|
|
block->disable_count--;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool
|
2020-04-27 23:12:59 +08:00
|
|
|
mlxsw_sp_flow_block_disabled(const struct mlxsw_sp_flow_block *block)
|
2020-04-27 23:12:58 +08:00
|
|
|
{
|
|
|
|
return block->disable_count;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool
|
2020-04-27 23:12:59 +08:00
|
|
|
mlxsw_sp_flow_block_is_egress_bound(const struct mlxsw_sp_flow_block *block)
|
2020-04-27 23:12:58 +08:00
|
|
|
{
|
|
|
|
return block->egress_binding_count;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool
|
2020-04-27 23:12:59 +08:00
|
|
|
mlxsw_sp_flow_block_is_ingress_bound(const struct mlxsw_sp_flow_block *block)
|
2020-04-27 23:12:58 +08:00
|
|
|
{
|
|
|
|
return block->ingress_binding_count;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool
|
2020-04-27 23:12:59 +08:00
|
|
|
mlxsw_sp_flow_block_is_mixed_bound(const struct mlxsw_sp_flow_block *block)
|
2020-04-27 23:12:58 +08:00
|
|
|
{
|
|
|
|
return block->ingress_binding_count && block->egress_binding_count;
|
|
|
|
}
|
|
|
|
|
2020-04-27 23:12:59 +08:00
|
|
|
struct mlxsw_sp_flow_block *mlxsw_sp_flow_block_create(struct mlxsw_sp *mlxsw_sp,
|
|
|
|
struct net *net);
|
|
|
|
void mlxsw_sp_flow_block_destroy(struct mlxsw_sp_flow_block *block);
|
2020-07-11 05:55:13 +08:00
|
|
|
int mlxsw_sp_setup_tc_block_clsact(struct mlxsw_sp_port *mlxsw_sp_port,
|
|
|
|
struct flow_block_offload *f,
|
|
|
|
bool ingress);
|
2020-04-27 23:13:00 +08:00
|
|
|
|
|
|
|
/* spectrum_acl.c */
|
|
|
|
struct mlxsw_sp_acl_ruleset;
|
|
|
|
|
|
|
|
enum mlxsw_sp_acl_profile {
|
|
|
|
MLXSW_SP_ACL_PROFILE_FLOWER,
|
|
|
|
MLXSW_SP_ACL_PROFILE_MR,
|
|
|
|
};
|
|
|
|
|
|
|
|
struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl);
|
|
|
|
|
|
|
|
int mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
|
|
|
|
struct mlxsw_sp_flow_block *block,
|
|
|
|
struct mlxsw_sp_flow_block_binding *binding);
|
|
|
|
void mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
|
|
|
|
struct mlxsw_sp_flow_block *block,
|
|
|
|
struct mlxsw_sp_flow_block_binding *binding);
|
2017-02-03 17:29:07 +08:00
|
|
|
struct mlxsw_sp_acl_ruleset *
|
2018-01-17 18:46:56 +08:00
|
|
|
mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp,
|
2020-04-27 23:12:59 +08:00
|
|
|
struct mlxsw_sp_flow_block *block, u32 chain_index,
|
2017-08-23 16:08:21 +08:00
|
|
|
enum mlxsw_sp_acl_profile profile);
|
|
|
|
struct mlxsw_sp_acl_ruleset *
|
2018-01-17 18:46:56 +08:00
|
|
|
mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp,
|
2020-04-27 23:12:59 +08:00
|
|
|
struct mlxsw_sp_flow_block *block, u32 chain_index,
|
2018-07-23 15:23:12 +08:00
|
|
|
enum mlxsw_sp_acl_profile profile,
|
|
|
|
struct mlxsw_afk_element_usage *tmplt_elusage);
|
2017-02-03 17:29:07 +08:00
|
|
|
void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp,
|
|
|
|
struct mlxsw_sp_acl_ruleset *ruleset);
|
2017-08-23 16:08:20 +08:00
|
|
|
u16 mlxsw_sp_acl_ruleset_group_id(struct mlxsw_sp_acl_ruleset *ruleset);
|
2020-05-10 04:06:03 +08:00
|
|
|
void mlxsw_sp_acl_ruleset_prio_get(struct mlxsw_sp_acl_ruleset *ruleset,
|
|
|
|
unsigned int *p_min_prio,
|
|
|
|
unsigned int *p_max_prio);
|
2017-02-03 17:29:07 +08:00
|
|
|
|
|
|
|
struct mlxsw_sp_acl_rule_info *
|
2018-12-10 15:11:43 +08:00
|
|
|
mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl,
|
|
|
|
struct mlxsw_afa_block *afa_block);
|
2017-02-03 17:29:07 +08:00
|
|
|
void mlxsw_sp_acl_rulei_destroy(struct mlxsw_sp_acl_rule_info *rulei);
|
|
|
|
int mlxsw_sp_acl_rulei_commit(struct mlxsw_sp_acl_rule_info *rulei);
|
|
|
|
void mlxsw_sp_acl_rulei_priority(struct mlxsw_sp_acl_rule_info *rulei,
|
|
|
|
unsigned int priority);
|
|
|
|
void mlxsw_sp_acl_rulei_keymask_u32(struct mlxsw_sp_acl_rule_info *rulei,
|
|
|
|
enum mlxsw_afk_element element,
|
|
|
|
u32 key_value, u32 mask_value);
|
|
|
|
void mlxsw_sp_acl_rulei_keymask_buf(struct mlxsw_sp_acl_rule_info *rulei,
|
|
|
|
enum mlxsw_afk_element element,
|
|
|
|
const char *key_value,
|
|
|
|
const char *mask_value, unsigned int len);
|
2017-09-25 16:58:20 +08:00
|
|
|
int mlxsw_sp_acl_rulei_act_continue(struct mlxsw_sp_acl_rule_info *rulei);
|
|
|
|
int mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei,
|
|
|
|
u16 group_id);
|
2018-03-09 21:33:52 +08:00
|
|
|
int mlxsw_sp_acl_rulei_act_terminate(struct mlxsw_sp_acl_rule_info *rulei);
|
2020-02-24 15:35:50 +08:00
|
|
|
int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei,
|
2020-02-25 18:45:23 +08:00
|
|
|
bool ingress,
|
|
|
|
const struct flow_action_cookie *fa_cookie,
|
|
|
|
struct netlink_ext_ack *extack);
|
2017-06-06 20:12:06 +08:00
|
|
|
int mlxsw_sp_acl_rulei_act_trap(struct mlxsw_sp_acl_rule_info *rulei);
|
2018-01-19 16:24:52 +08:00
|
|
|
int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp,
|
|
|
|
struct mlxsw_sp_acl_rule_info *rulei,
|
2020-04-27 23:12:59 +08:00
|
|
|
struct mlxsw_sp_flow_block *block,
|
2018-07-24 22:13:11 +08:00
|
|
|
struct net_device *out_dev,
|
|
|
|
struct netlink_ext_ack *extack);
|
2017-02-03 17:29:07 +08:00
|
|
|
int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
|
|
|
|
struct mlxsw_sp_acl_rule_info *rulei,
|
2018-07-24 22:13:11 +08:00
|
|
|
struct net_device *out_dev,
|
|
|
|
struct netlink_ext_ack *extack);
|
2017-03-09 16:25:19 +08:00
|
|
|
int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp,
|
|
|
|
struct mlxsw_sp_acl_rule_info *rulei,
|
2018-07-24 22:13:11 +08:00
|
|
|
u32 action, u16 vid, u16 proto, u8 prio,
|
|
|
|
struct netlink_ext_ack *extack);
|
2020-03-19 21:47:23 +08:00
|
|
|
int mlxsw_sp_acl_rulei_act_priority(struct mlxsw_sp *mlxsw_sp,
|
|
|
|
struct mlxsw_sp_acl_rule_info *rulei,
|
|
|
|
u32 prio, struct netlink_ext_ack *extack);
|
2020-03-26 22:01:12 +08:00
|
|
|
int mlxsw_sp_acl_rulei_act_mangle(struct mlxsw_sp *mlxsw_sp,
|
|
|
|
struct mlxsw_sp_acl_rule_info *rulei,
|
|
|
|
enum flow_action_mangle_base htype,
|
|
|
|
u32 offset, u32 mask, u32 val,
|
|
|
|
struct netlink_ext_ack *extack);
|
2020-07-15 16:27:29 +08:00
|
|
|
int mlxsw_sp_acl_rulei_act_police(struct mlxsw_sp *mlxsw_sp,
|
|
|
|
struct mlxsw_sp_acl_rule_info *rulei,
|
|
|
|
u32 index, u64 rate_bytes_ps,
|
|
|
|
u32 burst, struct netlink_ext_ack *extack);
|
2017-03-11 16:42:58 +08:00
|
|
|
int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp,
|
2018-07-24 22:13:11 +08:00
|
|
|
struct mlxsw_sp_acl_rule_info *rulei,
|
|
|
|
struct netlink_ext_ack *extack);
|
2017-04-18 22:55:32 +08:00
|
|
|
int mlxsw_sp_acl_rulei_act_fid_set(struct mlxsw_sp *mlxsw_sp,
|
|
|
|
struct mlxsw_sp_acl_rule_info *rulei,
|
2018-07-24 22:13:11 +08:00
|
|
|
u16 fid, struct netlink_ext_ack *extack);
|
2017-02-03 17:29:07 +08:00
|
|
|
|
|
|
|
struct mlxsw_sp_acl_rule;
|
|
|
|
|
|
|
|
struct mlxsw_sp_acl_rule *
|
|
|
|
mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp,
|
|
|
|
struct mlxsw_sp_acl_ruleset *ruleset,
|
2018-07-24 22:13:11 +08:00
|
|
|
unsigned long cookie,
|
2018-12-10 15:11:43 +08:00
|
|
|
struct mlxsw_afa_block *afa_block,
|
2018-07-24 22:13:11 +08:00
|
|
|
struct netlink_ext_ack *extack);
|
2017-02-03 17:29:07 +08:00
|
|
|
void mlxsw_sp_acl_rule_destroy(struct mlxsw_sp *mlxsw_sp,
|
|
|
|
struct mlxsw_sp_acl_rule *rule);
|
|
|
|
int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp,
|
|
|
|
struct mlxsw_sp_acl_rule *rule);
|
|
|
|
void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp,
|
|
|
|
struct mlxsw_sp_acl_rule *rule);
|
2018-12-10 15:11:41 +08:00
|
|
|
int mlxsw_sp_acl_rule_action_replace(struct mlxsw_sp *mlxsw_sp,
|
|
|
|
struct mlxsw_sp_acl_rule *rule,
|
|
|
|
struct mlxsw_afa_block *afa_block);
|
2017-02-03 17:29:07 +08:00
|
|
|
struct mlxsw_sp_acl_rule *
|
|
|
|
mlxsw_sp_acl_rule_lookup(struct mlxsw_sp *mlxsw_sp,
|
|
|
|
struct mlxsw_sp_acl_ruleset *ruleset,
|
|
|
|
unsigned long cookie);
|
|
|
|
struct mlxsw_sp_acl_rule_info *
|
|
|
|
mlxsw_sp_acl_rule_rulei(struct mlxsw_sp_acl_rule *rule);
|
2017-03-11 16:42:58 +08:00
|
|
|
int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp,
|
|
|
|
struct mlxsw_sp_acl_rule *rule,
|
2020-07-15 16:27:29 +08:00
|
|
|
u64 *packets, u64 *bytes, u64 *drops,
|
|
|
|
u64 *last_use,
|
2020-03-28 23:37:43 +08:00
|
|
|
enum flow_action_hw_stats *used_hw_stats);
|
2017-02-03 17:29:07 +08:00
|
|
|
|
2017-05-26 14:37:39 +08:00
|
|
|
struct mlxsw_sp_fid *mlxsw_sp_acl_dummy_fid(struct mlxsw_sp *mlxsw_sp);
|
|
|
|
|
2020-02-25 18:45:25 +08:00
|
|
|
static inline const struct flow_action_cookie *
|
|
|
|
mlxsw_sp_acl_act_cookie_lookup(struct mlxsw_sp *mlxsw_sp, u32 cookie_index)
|
|
|
|
{
|
|
|
|
return mlxsw_afa_cookie_lookup(mlxsw_sp->afa, cookie_index);
|
|
|
|
}
|
|
|
|
|
2017-02-03 17:29:07 +08:00
|
|
|
int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp);
|
|
|
|
void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp);
|
2019-02-07 19:22:55 +08:00
|
|
|
u32 mlxsw_sp_acl_region_rehash_intrvl_get(struct mlxsw_sp *mlxsw_sp);
|
|
|
|
int mlxsw_sp_acl_region_rehash_intrvl_set(struct mlxsw_sp *mlxsw_sp, u32 val);
|
2017-02-03 17:29:07 +08:00
|
|
|
|
2020-06-21 16:34:33 +08:00
|
|
|
struct mlxsw_sp_acl_mangle_action;
|
|
|
|
|
|
|
|
struct mlxsw_sp_acl_rulei_ops {
|
|
|
|
int (*act_mangle_field)(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_rule_info *rulei,
|
|
|
|
struct mlxsw_sp_acl_mangle_action *mact, u32 val,
|
|
|
|
struct netlink_ext_ack *extack);
|
|
|
|
};
|
|
|
|
|
|
|
|
extern struct mlxsw_sp_acl_rulei_ops mlxsw_sp1_acl_rulei_ops;
|
|
|
|
extern struct mlxsw_sp_acl_rulei_ops mlxsw_sp2_acl_rulei_ops;
|
|
|
|
|
2017-06-04 22:53:38 +08:00
|
|
|
/* spectrum_acl_tcam.c */
|
2018-07-09 04:51:21 +08:00
|
|
|
struct mlxsw_sp_acl_tcam;
|
2018-07-09 04:51:20 +08:00
|
|
|
struct mlxsw_sp_acl_tcam_region;
|
|
|
|
|
|
|
|
struct mlxsw_sp_acl_tcam_ops {
|
|
|
|
enum mlxsw_reg_ptar_key_type key_type;
|
2018-07-09 04:51:21 +08:00
|
|
|
size_t priv_size;
|
|
|
|
int (*init)(struct mlxsw_sp *mlxsw_sp, void *priv,
|
|
|
|
struct mlxsw_sp_acl_tcam *tcam);
|
|
|
|
void (*fini)(struct mlxsw_sp *mlxsw_sp, void *priv);
|
2018-07-09 04:51:20 +08:00
|
|
|
size_t region_priv_size;
|
|
|
|
int (*region_init)(struct mlxsw_sp *mlxsw_sp, void *region_priv,
|
2018-07-25 14:24:02 +08:00
|
|
|
void *tcam_priv,
|
2019-02-07 19:22:52 +08:00
|
|
|
struct mlxsw_sp_acl_tcam_region *region,
|
|
|
|
void *hints_priv);
|
2018-07-09 04:51:20 +08:00
|
|
|
void (*region_fini)(struct mlxsw_sp *mlxsw_sp, void *region_priv);
|
2018-07-18 16:14:43 +08:00
|
|
|
int (*region_associate)(struct mlxsw_sp *mlxsw_sp,
|
|
|
|
struct mlxsw_sp_acl_tcam_region *region);
|
2019-02-07 19:22:51 +08:00
|
|
|
void * (*region_rehash_hints_get)(void *region_priv);
|
|
|
|
void (*region_rehash_hints_put)(void *hints_priv);
|
2018-07-09 04:51:20 +08:00
|
|
|
size_t chunk_priv_size;
|
|
|
|
void (*chunk_init)(void *region_priv, void *chunk_priv,
|
|
|
|
unsigned int priority);
|
|
|
|
void (*chunk_fini)(void *chunk_priv);
|
|
|
|
size_t entry_priv_size;
|
|
|
|
int (*entry_add)(struct mlxsw_sp *mlxsw_sp,
|
|
|
|
void *region_priv, void *chunk_priv,
|
|
|
|
void *entry_priv,
|
|
|
|
struct mlxsw_sp_acl_rule_info *rulei);
|
|
|
|
void (*entry_del)(struct mlxsw_sp *mlxsw_sp,
|
|
|
|
void *region_priv, void *chunk_priv,
|
|
|
|
void *entry_priv);
|
2018-12-10 15:11:41 +08:00
|
|
|
int (*entry_action_replace)(struct mlxsw_sp *mlxsw_sp,
|
2019-01-28 20:02:07 +08:00
|
|
|
void *region_priv, void *entry_priv,
|
2018-12-10 15:11:41 +08:00
|
|
|
struct mlxsw_sp_acl_rule_info *rulei);
|
2018-07-09 04:51:20 +08:00
|
|
|
int (*entry_activity_get)(struct mlxsw_sp *mlxsw_sp,
|
|
|
|
void *region_priv, void *entry_priv,
|
|
|
|
bool *activity);
|
|
|
|
};
|
|
|
|
|
|
|
|
/* spectrum1_acl_tcam.c */
|
|
|
|
extern const struct mlxsw_sp_acl_tcam_ops mlxsw_sp1_acl_tcam_ops;
|
2017-02-03 17:29:07 +08:00
|
|
|
|
2018-07-18 16:14:44 +08:00
|
|
|
/* spectrum2_acl_tcam.c */
|
|
|
|
extern const struct mlxsw_sp_acl_tcam_ops mlxsw_sp2_acl_tcam_ops;
|
|
|
|
|
2018-07-08 15:00:19 +08:00
|
|
|
/* spectrum_acl_flex_actions.c */
|
|
|
|
extern const struct mlxsw_afa_ops mlxsw_sp1_act_afa_ops;
|
2018-07-18 16:14:33 +08:00
|
|
|
extern const struct mlxsw_afa_ops mlxsw_sp2_act_afa_ops;
|
2018-07-08 15:00:19 +08:00
|
|
|
|
2018-07-09 04:51:22 +08:00
|
|
|
/* spectrum_acl_flex_keys.c */
|
|
|
|
extern const struct mlxsw_afk_ops mlxsw_sp1_afk_ops;
|
2018-07-18 16:14:34 +08:00
|
|
|
extern const struct mlxsw_afk_ops mlxsw_sp2_afk_ops;
|
2018-07-09 04:51:22 +08:00
|
|
|
|
2020-04-27 23:13:01 +08:00
|
|
|
/* spectrum_matchall.c */
|
2020-07-11 05:55:12 +08:00
|
|
|
enum mlxsw_sp_mall_action_type {
|
|
|
|
MLXSW_SP_MALL_ACTION_TYPE_MIRROR,
|
|
|
|
MLXSW_SP_MALL_ACTION_TYPE_SAMPLE,
|
|
|
|
MLXSW_SP_MALL_ACTION_TYPE_TRAP,
|
|
|
|
};
|
|
|
|
|
|
|
|
struct mlxsw_sp_mall_mirror_entry {
|
|
|
|
const struct net_device *to_dev;
|
|
|
|
int span_id;
|
|
|
|
};
|
|
|
|
|
2020-08-04 00:11:40 +08:00
|
|
|
struct mlxsw_sp_mall_trap_entry {
|
|
|
|
int span_id;
|
|
|
|
};
|
|
|
|
|
2020-07-11 05:55:12 +08:00
|
|
|
struct mlxsw_sp_mall_entry {
|
|
|
|
struct list_head list;
|
|
|
|
unsigned long cookie;
|
|
|
|
unsigned int priority;
|
|
|
|
enum mlxsw_sp_mall_action_type type;
|
|
|
|
bool ingress;
|
|
|
|
union {
|
|
|
|
struct mlxsw_sp_mall_mirror_entry mirror;
|
2020-08-04 00:11:40 +08:00
|
|
|
struct mlxsw_sp_mall_trap_entry trap;
|
2020-07-11 05:55:12 +08:00
|
|
|
struct mlxsw_sp_port_sample sample;
|
|
|
|
};
|
|
|
|
struct rcu_head rcu;
|
|
|
|
};
|
|
|
|
|
2020-05-10 04:06:06 +08:00
|
|
|
int mlxsw_sp_mall_replace(struct mlxsw_sp *mlxsw_sp,
|
|
|
|
struct mlxsw_sp_flow_block *block,
|
2020-04-27 23:13:08 +08:00
|
|
|
struct tc_cls_matchall_offload *f);
|
|
|
|
void mlxsw_sp_mall_destroy(struct mlxsw_sp_flow_block *block,
|
2020-04-27 23:13:01 +08:00
|
|
|
struct tc_cls_matchall_offload *f);
|
2020-04-27 23:13:08 +08:00
|
|
|
int mlxsw_sp_mall_port_bind(struct mlxsw_sp_flow_block *block,
|
|
|
|
struct mlxsw_sp_port *mlxsw_sp_port);
|
|
|
|
void mlxsw_sp_mall_port_unbind(struct mlxsw_sp_flow_block *block,
|
|
|
|
struct mlxsw_sp_port *mlxsw_sp_port);
|
2020-05-10 04:06:05 +08:00
|
|
|
int mlxsw_sp_mall_prio_get(struct mlxsw_sp_flow_block *block, u32 chain_index,
|
|
|
|
unsigned int *p_min_prio, unsigned int *p_max_prio);
|
2020-04-27 23:13:01 +08:00
|
|
|
|
2017-06-04 22:53:38 +08:00
|
|
|
/* spectrum_flower.c */
|
2018-01-17 18:46:56 +08:00
|
|
|
int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp,
|
2020-04-27 23:12:59 +08:00
|
|
|
struct mlxsw_sp_flow_block *block,
|
2019-07-10 04:55:49 +08:00
|
|
|
struct flow_cls_offload *f);
|
2018-01-17 18:46:56 +08:00
|
|
|
void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp,
|
2020-04-27 23:12:59 +08:00
|
|
|
struct mlxsw_sp_flow_block *block,
|
2019-07-10 04:55:49 +08:00
|
|
|
struct flow_cls_offload *f);
|
2018-01-17 18:46:56 +08:00
|
|
|
int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp,
|
2020-04-27 23:12:59 +08:00
|
|
|
struct mlxsw_sp_flow_block *block,
|
2019-07-10 04:55:49 +08:00
|
|
|
struct flow_cls_offload *f);
|
2018-07-23 15:23:12 +08:00
|
|
|
int mlxsw_sp_flower_tmplt_create(struct mlxsw_sp *mlxsw_sp,
|
2020-04-27 23:12:59 +08:00
|
|
|
struct mlxsw_sp_flow_block *block,
|
2019-07-10 04:55:49 +08:00
|
|
|
struct flow_cls_offload *f);
|
2018-07-23 15:23:12 +08:00
|
|
|
void mlxsw_sp_flower_tmplt_destroy(struct mlxsw_sp *mlxsw_sp,
|
2020-04-27 23:12:59 +08:00
|
|
|
struct mlxsw_sp_flow_block *block,
|
2019-07-10 04:55:49 +08:00
|
|
|
struct flow_cls_offload *f);
|
2020-05-10 04:06:03 +08:00
|
|
|
int mlxsw_sp_flower_prio_get(struct mlxsw_sp *mlxsw_sp,
|
|
|
|
struct mlxsw_sp_flow_block *block,
|
|
|
|
u32 chain_index, unsigned int *p_min_prio,
|
|
|
|
unsigned int *p_max_prio);
|
2017-02-03 17:29:09 +08:00
|
|
|
|
2017-11-06 14:23:45 +08:00
|
|
|
/* spectrum_qdisc.c */
|
2018-01-10 21:59:57 +08:00
|
|
|
int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port);
|
|
|
|
void mlxsw_sp_tc_qdisc_fini(struct mlxsw_sp_port *mlxsw_sp_port);
|
2017-11-06 14:23:45 +08:00
|
|
|
int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port,
|
|
|
|
struct tc_red_qopt_offload *p);
|
2018-01-14 19:33:16 +08:00
|
|
|
int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
|
|
|
|
struct tc_prio_qopt_offload *p);
|
2019-12-18 22:55:19 +08:00
|
|
|
int mlxsw_sp_setup_tc_ets(struct mlxsw_sp_port *mlxsw_sp_port,
|
|
|
|
struct tc_ets_qopt_offload *p);
|
2020-01-24 21:23:14 +08:00
|
|
|
int mlxsw_sp_setup_tc_tbf(struct mlxsw_sp_port *mlxsw_sp_port,
|
|
|
|
struct tc_tbf_qopt_offload *p);
|
2020-03-05 15:16:43 +08:00
|
|
|
int mlxsw_sp_setup_tc_fifo(struct mlxsw_sp_port *mlxsw_sp_port,
|
|
|
|
struct tc_fifo_qopt_offload *p);
|
2020-07-11 05:55:14 +08:00
|
|
|
int mlxsw_sp_setup_tc_block_qevent_early_drop(struct mlxsw_sp_port *mlxsw_sp_port,
|
|
|
|
struct flow_block_offload *f);
|
2017-11-06 14:23:45 +08:00
|
|
|
|
2017-06-04 22:53:38 +08:00
|
|
|
/* spectrum_fid.c */
|
2019-07-18 04:29:08 +08:00
|
|
|
bool mlxsw_sp_fid_is_dummy(struct mlxsw_sp *mlxsw_sp, u16 fid_index);
|
2018-11-25 17:43:54 +08:00
|
|
|
bool mlxsw_sp_fid_lag_vid_valid(const struct mlxsw_sp_fid *fid);
|
2018-11-21 16:02:45 +08:00
|
|
|
struct mlxsw_sp_fid *mlxsw_sp_fid_lookup_by_index(struct mlxsw_sp *mlxsw_sp,
|
|
|
|
u16 fid_index);
|
2018-11-21 16:02:44 +08:00
|
|
|
int mlxsw_sp_fid_nve_ifindex(const struct mlxsw_sp_fid *fid, int *nve_ifindex);
|
2018-12-08 03:55:09 +08:00
|
|
|
int mlxsw_sp_fid_nve_type(const struct mlxsw_sp_fid *fid,
|
|
|
|
enum mlxsw_sp_nve_type *p_type);
|
2018-10-17 16:53:05 +08:00
|
|
|
struct mlxsw_sp_fid *mlxsw_sp_fid_lookup_by_vni(struct mlxsw_sp *mlxsw_sp,
|
|
|
|
__be32 vni);
|
2018-10-17 16:53:03 +08:00
|
|
|
int mlxsw_sp_fid_vni(const struct mlxsw_sp_fid *fid, __be32 *vni);
|
|
|
|
int mlxsw_sp_fid_nve_flood_index_set(struct mlxsw_sp_fid *fid,
|
|
|
|
u32 nve_flood_index);
|
|
|
|
void mlxsw_sp_fid_nve_flood_index_clear(struct mlxsw_sp_fid *fid);
|
|
|
|
bool mlxsw_sp_fid_nve_flood_index_is_set(const struct mlxsw_sp_fid *fid);
|
2018-12-08 03:55:09 +08:00
|
|
|
int mlxsw_sp_fid_vni_set(struct mlxsw_sp_fid *fid, enum mlxsw_sp_nve_type type,
|
|
|
|
__be32 vni, int nve_ifindex);
|
2018-10-17 16:53:03 +08:00
|
|
|
void mlxsw_sp_fid_vni_clear(struct mlxsw_sp_fid *fid);
|
|
|
|
bool mlxsw_sp_fid_vni_is_set(const struct mlxsw_sp_fid *fid);
|
2018-12-08 03:55:15 +08:00
|
|
|
void mlxsw_sp_fid_fdb_clear_offload(const struct mlxsw_sp_fid *fid,
|
|
|
|
const struct net_device *nve_dev);
|
2017-05-26 14:37:39 +08:00
|
|
|
int mlxsw_sp_fid_flood_set(struct mlxsw_sp_fid *fid,
|
|
|
|
enum mlxsw_sp_flood_type packet_type, u8 local_port,
|
|
|
|
bool member);
|
|
|
|
int mlxsw_sp_fid_port_vid_map(struct mlxsw_sp_fid *fid,
|
|
|
|
struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
|
|
|
|
void mlxsw_sp_fid_port_vid_unmap(struct mlxsw_sp_fid *fid,
|
|
|
|
struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
|
|
|
|
u16 mlxsw_sp_fid_index(const struct mlxsw_sp_fid *fid);
|
|
|
|
enum mlxsw_sp_fid_type mlxsw_sp_fid_type(const struct mlxsw_sp_fid *fid);
|
|
|
|
void mlxsw_sp_fid_rif_set(struct mlxsw_sp_fid *fid, struct mlxsw_sp_rif *rif);
|
mlxsw: spectrum_router: Do not destroy RIFs based on FID's reference count
Currently, when a RIF is constructed on top of a FID, the RIF increments
the FID's reference count and the RIF is destroyed when the FID's
reference count drops to 1. This effectively means that when no local
ports are member in the FID, the FID is destroyed regardless if the
router port is a member in the FID or not.
The above can lead to the unexpected behavior in which routes using a
VLAN interface as their nexthop device are no longer offloaded after the
last local port leaves the corresponding VLAN (FID).
Example:
# ip -4 route show dev br0.10
192.0.2.0/24 proto kernel scope link src 192.0.2.1 offload
# bridge vlan del vid 10 dev swp3
# ip -4 route show dev br0.10
192.0.2.0/24 proto kernel scope link src 192.0.2.1
After the patch, the route is offloaded before and after the VLAN is
removed from local port 'swp3', as the RIF corresponding to 'br0.10'
continues to exists.
In order to remove RIFs' reliance on the underlying FID's reference
count, we need to add a reference count to sub-port RIFs, which are RIFs
that correspond to physical ports and their uppers (e.g., LAG devices).
In this case, each {Port, VID} ('struct mlxsw_sp_port_vlan') needs to
hold a reference on the RIF. For example:
bond0.10
|
bond0
|
+-------+
| |
swp1 swp2
Both {Port 1, VID 10} and {Port 2, VID 10} will hold a reference on the
RIF corresponding to 'bond0.10'. When the last reference is dropped, the
RIF will be destroyed.
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Reviewed-by: Petr Machata <petrm@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-12-19 14:08:38 +08:00
|
|
|
struct mlxsw_sp_rif *mlxsw_sp_fid_rif(const struct mlxsw_sp_fid *fid);
|
2017-05-26 14:37:40 +08:00
|
|
|
enum mlxsw_sp_rif_type
|
|
|
|
mlxsw_sp_fid_type_rif_type(const struct mlxsw_sp *mlxsw_sp,
|
|
|
|
enum mlxsw_sp_fid_type type);
|
|
|
|
u16 mlxsw_sp_fid_8021q_vid(const struct mlxsw_sp_fid *fid);
|
2017-05-26 14:37:39 +08:00
|
|
|
struct mlxsw_sp_fid *mlxsw_sp_fid_8021q_get(struct mlxsw_sp *mlxsw_sp, u16 vid);
|
|
|
|
struct mlxsw_sp_fid *mlxsw_sp_fid_8021d_get(struct mlxsw_sp *mlxsw_sp,
|
|
|
|
int br_ifindex);
|
2018-11-29 04:06:59 +08:00
|
|
|
struct mlxsw_sp_fid *mlxsw_sp_fid_8021q_lookup(struct mlxsw_sp *mlxsw_sp,
|
|
|
|
u16 vid);
|
2018-10-17 16:53:05 +08:00
|
|
|
struct mlxsw_sp_fid *mlxsw_sp_fid_8021d_lookup(struct mlxsw_sp *mlxsw_sp,
|
|
|
|
int br_ifindex);
|
2017-05-26 14:37:39 +08:00
|
|
|
struct mlxsw_sp_fid *mlxsw_sp_fid_rfid_get(struct mlxsw_sp *mlxsw_sp,
|
|
|
|
u16 rif_index);
|
|
|
|
struct mlxsw_sp_fid *mlxsw_sp_fid_dummy_get(struct mlxsw_sp *mlxsw_sp);
|
|
|
|
void mlxsw_sp_fid_put(struct mlxsw_sp_fid *fid);
|
|
|
|
int mlxsw_sp_port_fids_init(struct mlxsw_sp_port *mlxsw_sp_port);
|
|
|
|
void mlxsw_sp_port_fids_fini(struct mlxsw_sp_port *mlxsw_sp_port);
|
|
|
|
int mlxsw_sp_fids_init(struct mlxsw_sp *mlxsw_sp);
|
|
|
|
void mlxsw_sp_fids_fini(struct mlxsw_sp *mlxsw_sp);
|
|
|
|
|
2018-07-09 04:51:19 +08:00
|
|
|
/* spectrum_mr.c */
|
|
|
|
enum mlxsw_sp_mr_route_prio {
|
|
|
|
MLXSW_SP_MR_ROUTE_PRIO_SG,
|
|
|
|
MLXSW_SP_MR_ROUTE_PRIO_STARG,
|
|
|
|
MLXSW_SP_MR_ROUTE_PRIO_CATCHALL,
|
|
|
|
__MLXSW_SP_MR_ROUTE_PRIO_MAX
|
|
|
|
};
|
|
|
|
|
|
|
|
#define MLXSW_SP_MR_ROUTE_PRIO_MAX (__MLXSW_SP_MR_ROUTE_PRIO_MAX - 1)
|
|
|
|
|
|
|
|
struct mlxsw_sp_mr_route_key;
|
|
|
|
|
|
|
|
struct mlxsw_sp_mr_tcam_ops {
|
|
|
|
size_t priv_size;
|
|
|
|
int (*init)(struct mlxsw_sp *mlxsw_sp, void *priv);
|
|
|
|
void (*fini)(void *priv);
|
|
|
|
size_t route_priv_size;
|
|
|
|
int (*route_create)(struct mlxsw_sp *mlxsw_sp, void *priv,
|
|
|
|
void *route_priv,
|
|
|
|
struct mlxsw_sp_mr_route_key *key,
|
|
|
|
struct mlxsw_afa_block *afa_block,
|
|
|
|
enum mlxsw_sp_mr_route_prio prio);
|
|
|
|
void (*route_destroy)(struct mlxsw_sp *mlxsw_sp, void *priv,
|
|
|
|
void *route_priv,
|
|
|
|
struct mlxsw_sp_mr_route_key *key);
|
|
|
|
int (*route_update)(struct mlxsw_sp *mlxsw_sp, void *route_priv,
|
|
|
|
struct mlxsw_sp_mr_route_key *key,
|
|
|
|
struct mlxsw_afa_block *afa_block);
|
|
|
|
};
|
|
|
|
|
|
|
|
/* spectrum1_mr_tcam.c */
|
|
|
|
extern const struct mlxsw_sp_mr_tcam_ops mlxsw_sp1_mr_tcam_ops;
|
|
|
|
|
2018-07-18 16:14:32 +08:00
|
|
|
/* spectrum2_mr_tcam.c */
|
|
|
|
extern const struct mlxsw_sp_mr_tcam_ops mlxsw_sp2_mr_tcam_ops;
|
|
|
|
|
2018-10-17 16:53:14 +08:00
|
|
|
/* spectrum_nve.c */
|
|
|
|
struct mlxsw_sp_nve_params {
|
|
|
|
enum mlxsw_sp_nve_type type;
|
|
|
|
__be32 vni;
|
|
|
|
const struct net_device *dev;
|
2020-12-08 17:22:45 +08:00
|
|
|
u16 ethertype;
|
2018-10-17 16:53:14 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
extern const struct mlxsw_sp_nve_ops *mlxsw_sp1_nve_ops_arr[];
|
|
|
|
extern const struct mlxsw_sp_nve_ops *mlxsw_sp2_nve_ops_arr[];
|
|
|
|
|
2018-11-21 16:02:46 +08:00
|
|
|
int mlxsw_sp_nve_learned_ip_resolve(struct mlxsw_sp *mlxsw_sp, u32 uip,
|
|
|
|
enum mlxsw_sp_l3proto proto,
|
|
|
|
union mlxsw_sp_l3addr *addr);
|
2018-10-17 16:53:14 +08:00
|
|
|
int mlxsw_sp_nve_flood_ip_add(struct mlxsw_sp *mlxsw_sp,
|
|
|
|
struct mlxsw_sp_fid *fid,
|
|
|
|
enum mlxsw_sp_l3proto proto,
|
|
|
|
union mlxsw_sp_l3addr *addr);
|
|
|
|
void mlxsw_sp_nve_flood_ip_del(struct mlxsw_sp *mlxsw_sp,
|
|
|
|
struct mlxsw_sp_fid *fid,
|
|
|
|
enum mlxsw_sp_l3proto proto,
|
|
|
|
union mlxsw_sp_l3addr *addr);
|
|
|
|
int mlxsw_sp_nve_fid_enable(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *fid,
|
|
|
|
struct mlxsw_sp_nve_params *params,
|
|
|
|
struct netlink_ext_ack *extack);
|
|
|
|
void mlxsw_sp_nve_fid_disable(struct mlxsw_sp *mlxsw_sp,
|
|
|
|
struct mlxsw_sp_fid *fid);
|
|
|
|
int mlxsw_sp_port_nve_init(struct mlxsw_sp_port *mlxsw_sp_port);
|
|
|
|
void mlxsw_sp_port_nve_fini(struct mlxsw_sp_port *mlxsw_sp_port);
|
|
|
|
int mlxsw_sp_nve_init(struct mlxsw_sp *mlxsw_sp);
|
|
|
|
void mlxsw_sp_nve_fini(struct mlxsw_sp *mlxsw_sp);
|
|
|
|
|
mlxsw: spectrum_ptp: Increase parsing depth when PTP is enabled
Spectrum systems have a configurable limit on how far into the packet they
parse. By default, the limit is 96 bytes.
An IPv6 PTP packet is layered as Ethernet/IPv6/UDP (14+40+8 bytes), and
sequence ID of a PTP event is only available 32 bytes into payload, for a
total of 94 bytes. When an additional 802.1q header is present as
well (such as when ptp4l is running on a VLAN port), the parsing limit is
exceeded. Such packets are not recognized as PTP, and are not timestamped.
Therefore generalize the current VXLAN-specific parsing depth setting to
allow reference-counted requests from other modules as well. Keep it in the
VXLAN module, because the MPRS register also configures UDP destination
port number used for VXLAN, and is thus closely tied to the VXLAN code
anyway.
Then invoke the new interfaces from both VXLAN (in obvious places), as well
as from PTP code, when the (global) timestamping configuration changes from
disabled to enabled or vice versa.
Fixes: 8748642751ed ("mlxsw: spectrum: PTP: Support SIOCGHWTSTAMP, SIOCSHWTSTAMP ioctls")
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-07-30 02:26:14 +08:00
|
|
|
/* spectrum_nve_vxlan.c */
|
|
|
|
int mlxsw_sp_nve_inc_parsing_depth_get(struct mlxsw_sp *mlxsw_sp);
|
|
|
|
void mlxsw_sp_nve_inc_parsing_depth_put(struct mlxsw_sp *mlxsw_sp);
|
|
|
|
|
2019-08-21 15:19:35 +08:00
|
|
|
/* spectrum_trap.c */
|
|
|
|
int mlxsw_sp_devlink_traps_init(struct mlxsw_sp *mlxsw_sp);
|
|
|
|
void mlxsw_sp_devlink_traps_fini(struct mlxsw_sp *mlxsw_sp);
|
|
|
|
int mlxsw_sp_trap_init(struct mlxsw_core *mlxsw_core,
|
|
|
|
const struct devlink_trap *trap, void *trap_ctx);
|
|
|
|
void mlxsw_sp_trap_fini(struct mlxsw_core *mlxsw_core,
|
|
|
|
const struct devlink_trap *trap, void *trap_ctx);
|
|
|
|
int mlxsw_sp_trap_action_set(struct mlxsw_core *mlxsw_core,
|
|
|
|
const struct devlink_trap *trap,
|
2020-08-04 00:11:34 +08:00
|
|
|
enum devlink_trap_action action,
|
|
|
|
struct netlink_ext_ack *extack);
|
2019-08-21 15:19:35 +08:00
|
|
|
int mlxsw_sp_trap_group_init(struct mlxsw_core *mlxsw_core,
|
|
|
|
const struct devlink_trap_group *group);
|
2020-03-31 03:38:31 +08:00
|
|
|
int mlxsw_sp_trap_group_set(struct mlxsw_core *mlxsw_core,
|
|
|
|
const struct devlink_trap_group *group,
|
2020-08-04 00:11:34 +08:00
|
|
|
const struct devlink_trap_policer *policer,
|
|
|
|
struct netlink_ext_ack *extack);
|
2020-03-31 03:38:28 +08:00
|
|
|
int
|
|
|
|
mlxsw_sp_trap_policer_init(struct mlxsw_core *mlxsw_core,
|
|
|
|
const struct devlink_trap_policer *policer);
|
|
|
|
void mlxsw_sp_trap_policer_fini(struct mlxsw_core *mlxsw_core,
|
|
|
|
const struct devlink_trap_policer *policer);
|
|
|
|
int
|
|
|
|
mlxsw_sp_trap_policer_set(struct mlxsw_core *mlxsw_core,
|
|
|
|
const struct devlink_trap_policer *policer,
|
|
|
|
u64 rate, u64 burst, struct netlink_ext_ack *extack);
|
|
|
|
int
|
|
|
|
mlxsw_sp_trap_policer_counter_get(struct mlxsw_core *mlxsw_core,
|
|
|
|
const struct devlink_trap_policer *policer,
|
|
|
|
u64 *p_drops);
|
2020-08-04 00:11:40 +08:00
|
|
|
int mlxsw_sp_trap_group_policer_hw_id_get(struct mlxsw_sp *mlxsw_sp, u16 id,
|
|
|
|
bool *p_enabled, u16 *p_hw_id);
|
2019-08-21 15:19:35 +08:00
|
|
|
|
2019-10-03 17:49:32 +08:00
|
|
|
static inline struct net *mlxsw_sp_net(struct mlxsw_sp *mlxsw_sp)
|
|
|
|
{
|
|
|
|
return mlxsw_core_net(mlxsw_sp->core);
|
|
|
|
}
|
|
|
|
|
2020-06-30 04:46:13 +08:00
|
|
|
/* spectrum_ethtool.c */
|
|
|
|
extern const struct ethtool_ops mlxsw_sp_port_ethtool_ops;
|
2020-06-30 04:46:14 +08:00
|
|
|
extern const struct mlxsw_sp_port_type_speed_ops mlxsw_sp1_port_type_speed_ops;
|
|
|
|
extern const struct mlxsw_sp_port_type_speed_ops mlxsw_sp2_port_type_speed_ops;
|
2020-06-30 04:46:13 +08:00
|
|
|
|
2020-07-15 16:27:25 +08:00
|
|
|
/* spectrum_policer.c */
|
|
|
|
extern const struct mlxsw_sp_policer_core_ops mlxsw_sp1_policer_core_ops;
|
|
|
|
extern const struct mlxsw_sp_policer_core_ops mlxsw_sp2_policer_core_ops;
|
|
|
|
|
|
|
|
enum mlxsw_sp_policer_type {
|
|
|
|
MLXSW_SP_POLICER_TYPE_SINGLE_RATE,
|
|
|
|
|
|
|
|
__MLXSW_SP_POLICER_TYPE_MAX,
|
|
|
|
MLXSW_SP_POLICER_TYPE_MAX = __MLXSW_SP_POLICER_TYPE_MAX - 1,
|
|
|
|
};
|
|
|
|
|
|
|
|
struct mlxsw_sp_policer_params {
|
|
|
|
u64 rate;
|
|
|
|
u64 burst;
|
|
|
|
bool bytes;
|
|
|
|
};
|
|
|
|
|
|
|
|
int mlxsw_sp_policer_add(struct mlxsw_sp *mlxsw_sp,
|
|
|
|
enum mlxsw_sp_policer_type type,
|
|
|
|
const struct mlxsw_sp_policer_params *params,
|
|
|
|
struct netlink_ext_ack *extack, u16 *p_policer_index);
|
|
|
|
void mlxsw_sp_policer_del(struct mlxsw_sp *mlxsw_sp,
|
|
|
|
enum mlxsw_sp_policer_type type,
|
|
|
|
u16 policer_index);
|
|
|
|
int mlxsw_sp_policer_drops_counter_get(struct mlxsw_sp *mlxsw_sp,
|
|
|
|
enum mlxsw_sp_policer_type type,
|
|
|
|
u16 policer_index, u64 *p_drops);
|
|
|
|
int mlxsw_sp_policers_init(struct mlxsw_sp *mlxsw_sp);
|
|
|
|
void mlxsw_sp_policers_fini(struct mlxsw_sp *mlxsw_sp);
|
2020-07-15 16:27:26 +08:00
|
|
|
int mlxsw_sp_policer_resources_register(struct mlxsw_core *mlxsw_core);
|
2020-07-15 16:27:25 +08:00
|
|
|
|
2015-10-16 20:01:37 +08:00
|
|
|
#endif
|