Merge branch 'mlxsw-port-mirroring'
Jiri Pirko says: ==================== mlxsw: implement port mirroring offload This patchset introduces tc matchall classifier and its offload to Spectrum hardware. In combination with mirred action, defined port mirroring setup is offloaded by mlxsw/spectrum driver. The commands used for creating mirror ports: tc qdisc add dev eth25 handle ffff: ingress tc filter add dev eth25 parent ffff: \ matchall skip_sw \ action mirred egress mirror \ dev eth27 tc qdisc add dev eth25 handle 1: root prio tc filter add dev eth25 parent 1: \ matchall skip_sw \ action mirred egress mirror \ dev eth27 These patches contain: - Resource query implementation - Hardware port mirorring support for spectrum. - Definition of the matchall traffic classifier. - General support for hw-offloading for that classifier. - Specific spectrum implementaion for matchall offloading. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
bc0c419e0b
|
@ -105,6 +105,7 @@ enum mlxsw_cmd_opcode {
|
|||
MLXSW_CMD_OPCODE_SW2HW_EQ = 0x013,
|
||||
MLXSW_CMD_OPCODE_HW2SW_EQ = 0x014,
|
||||
MLXSW_CMD_OPCODE_QUERY_EQ = 0x015,
|
||||
MLXSW_CMD_OPCODE_QUERY_RESOURCES = 0x101,
|
||||
};
|
||||
|
||||
static inline const char *mlxsw_cmd_opcode_str(u16 opcode)
|
||||
|
@ -144,6 +145,8 @@ static inline const char *mlxsw_cmd_opcode_str(u16 opcode)
|
|||
return "HW2SW_EQ";
|
||||
case MLXSW_CMD_OPCODE_QUERY_EQ:
|
||||
return "QUERY_EQ";
|
||||
case MLXSW_CMD_OPCODE_QUERY_RESOURCES:
|
||||
return "QUERY_RESOURCES";
|
||||
default:
|
||||
return "*UNKNOWN*";
|
||||
}
|
||||
|
@ -500,6 +503,35 @@ static inline int mlxsw_cmd_unmap_fa(struct mlxsw_core *mlxsw_core)
|
|||
return mlxsw_cmd_exec_none(mlxsw_core, MLXSW_CMD_OPCODE_UNMAP_FA, 0, 0);
|
||||
}
|
||||
|
||||
/* QUERY_RESOURCES - Query chip resources
|
||||
* --------------------------------------
|
||||
* OpMod == 0 (N/A) , INMmod is index
|
||||
* ----------------------------------
|
||||
* The QUERY_RESOURCES command retrieves information related to chip resources
|
||||
* by resource ID. Every command returns 32 entries. INmod is being use as base.
|
||||
* for example, index 1 will return entries 32-63. When the tables end and there
|
||||
* are no more sources in the table, will return resource id 0xFFF to indicate
|
||||
* it.
|
||||
*/
|
||||
static inline int mlxsw_cmd_query_resources(struct mlxsw_core *mlxsw_core,
|
||||
char *out_mbox, int index)
|
||||
{
|
||||
return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_QUERY_RESOURCES,
|
||||
0, index, false, out_mbox,
|
||||
MLXSW_CMD_MBOX_SIZE);
|
||||
}
|
||||
|
||||
/* cmd_mbox_query_resource_id
|
||||
* The resource id. 0xFFFF indicates table's end.
|
||||
*/
|
||||
MLXSW_ITEM32_INDEXED(cmd_mbox, query_resource, id, 0x00, 16, 16, 0x8, 0, false);
|
||||
|
||||
/* cmd_mbox_query_resource_data
|
||||
* The resource
|
||||
*/
|
||||
MLXSW_ITEM64_INDEXED(cmd_mbox, query_resource, data,
|
||||
0x00, 0, 40, 0x8, 0, false);
|
||||
|
||||
/* CONFIG_PROFILE (Set) - Configure Switch Profile
|
||||
* ------------------------------
|
||||
* OpMod == 1 (Set), INMmod == 0 (N/A)
|
||||
|
|
|
@ -111,6 +111,7 @@ struct mlxsw_core {
|
|||
struct {
|
||||
u8 *mapping; /* lag_id+port_index to local_port mapping */
|
||||
} lag;
|
||||
struct mlxsw_resources resources;
|
||||
struct mlxsw_hwmon *hwmon;
|
||||
unsigned long driver_priv[0];
|
||||
/* driver_priv has to be always the last item */
|
||||
|
@ -1110,7 +1111,8 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
|
|||
}
|
||||
}
|
||||
|
||||
err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile);
|
||||
err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile,
|
||||
&mlxsw_core->resources);
|
||||
if (err)
|
||||
goto err_bus_init;
|
||||
|
||||
|
@ -1652,6 +1654,12 @@ void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core,
|
|||
}
|
||||
EXPORT_SYMBOL(mlxsw_core_lag_mapping_clear);
|
||||
|
||||
struct mlxsw_resources *mlxsw_core_resources_get(struct mlxsw_core *mlxsw_core)
|
||||
{
|
||||
return &mlxsw_core->resources;
|
||||
}
|
||||
EXPORT_SYMBOL(mlxsw_core_resources_get);
|
||||
|
||||
int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core,
|
||||
struct mlxsw_core_port *mlxsw_core_port, u8 local_port,
|
||||
struct net_device *dev, bool split, u32 split_group)
|
||||
|
|
|
@ -215,6 +215,7 @@ struct mlxsw_config_profile {
|
|||
u32 kvd_linear_size;
|
||||
u32 kvd_hash_single_size;
|
||||
u32 kvd_hash_double_size;
|
||||
u8 resource_query_enable;
|
||||
struct mlxsw_swid_config swid_config[MLXSW_CONFIG_PROFILE_SWID_COUNT];
|
||||
};
|
||||
|
||||
|
@ -266,10 +267,18 @@ struct mlxsw_driver {
|
|||
const struct mlxsw_config_profile *profile;
|
||||
};
|
||||
|
||||
struct mlxsw_resources {
|
||||
u8 max_span_valid:1;
|
||||
u8 max_span;
|
||||
};
|
||||
|
||||
struct mlxsw_resources *mlxsw_core_resources_get(struct mlxsw_core *mlxsw_core);
|
||||
|
||||
struct mlxsw_bus {
|
||||
const char *kind;
|
||||
int (*init)(void *bus_priv, struct mlxsw_core *mlxsw_core,
|
||||
const struct mlxsw_config_profile *profile);
|
||||
const struct mlxsw_config_profile *profile,
|
||||
struct mlxsw_resources *resources);
|
||||
void (*fini)(void *bus_priv);
|
||||
bool (*skb_transmit_busy)(void *bus_priv,
|
||||
const struct mlxsw_tx_info *tx_info);
|
||||
|
|
|
@ -1154,6 +1154,61 @@ mlxsw_pci_config_profile_swid_config(struct mlxsw_pci *mlxsw_pci,
|
|||
mlxsw_cmd_mbox_config_profile_swid_config_mask_set(mbox, index, mask);
|
||||
}
|
||||
|
||||
#define MLXSW_RESOURCES_TABLE_END_ID 0xffff
|
||||
#define MLXSW_MAX_SPAN_ID 0x2420
|
||||
#define MLXSW_RESOURCES_QUERY_MAX_QUERIES 100
|
||||
#define MLXSW_RESOURCES_PER_QUERY 32
|
||||
|
||||
static void mlxsw_pci_resources_query_parse(int id, u64 val,
|
||||
struct mlxsw_resources *resources)
|
||||
{
|
||||
switch (id) {
|
||||
case MLXSW_MAX_SPAN_ID:
|
||||
resources->max_span = val;
|
||||
resources->max_span_valid = 1;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static int mlxsw_pci_resources_query(struct mlxsw_pci *mlxsw_pci, char *mbox,
|
||||
struct mlxsw_resources *resources,
|
||||
u8 query_enabled)
|
||||
{
|
||||
int index, i;
|
||||
u64 data;
|
||||
u16 id;
|
||||
int err;
|
||||
|
||||
/* Not all the versions support resources query */
|
||||
if (!query_enabled)
|
||||
return 0;
|
||||
|
||||
mlxsw_cmd_mbox_zero(mbox);
|
||||
|
||||
for (index = 0; index < MLXSW_RESOURCES_QUERY_MAX_QUERIES; index++) {
|
||||
err = mlxsw_cmd_query_resources(mlxsw_pci->core, mbox, index);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
for (i = 0; i < MLXSW_RESOURCES_PER_QUERY; i++) {
|
||||
id = mlxsw_cmd_mbox_query_resource_id_get(mbox, i);
|
||||
data = mlxsw_cmd_mbox_query_resource_data_get(mbox, i);
|
||||
|
||||
if (id == MLXSW_RESOURCES_TABLE_END_ID)
|
||||
return 0;
|
||||
|
||||
mlxsw_pci_resources_query_parse(id, data, resources);
|
||||
}
|
||||
}
|
||||
|
||||
/* If after MLXSW_RESOURCES_QUERY_MAX_QUERIES we still didn't get
|
||||
* MLXSW_RESOURCES_TABLE_END_ID, something went bad in the FW.
|
||||
*/
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
|
||||
const struct mlxsw_config_profile *profile)
|
||||
{
|
||||
|
@ -1404,7 +1459,8 @@ static void mlxsw_pci_mbox_free(struct mlxsw_pci *mlxsw_pci,
|
|||
}
|
||||
|
||||
static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
|
||||
const struct mlxsw_config_profile *profile)
|
||||
const struct mlxsw_config_profile *profile,
|
||||
struct mlxsw_resources *resources)
|
||||
{
|
||||
struct mlxsw_pci *mlxsw_pci = bus_priv;
|
||||
struct pci_dev *pdev = mlxsw_pci->pdev;
|
||||
|
@ -1463,6 +1519,11 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
|
|||
if (err)
|
||||
goto err_boardinfo;
|
||||
|
||||
err = mlxsw_pci_resources_query(mlxsw_pci, mbox, resources,
|
||||
profile->resource_query_enable);
|
||||
if (err)
|
||||
goto err_query_resources;
|
||||
|
||||
err = mlxsw_pci_config_profile(mlxsw_pci, mbox, profile);
|
||||
if (err)
|
||||
goto err_config_profile;
|
||||
|
@ -1485,6 +1546,7 @@ err_request_eq_irq:
|
|||
mlxsw_pci_aqs_fini(mlxsw_pci);
|
||||
err_aqs_init:
|
||||
err_config_profile:
|
||||
err_query_resources:
|
||||
err_boardinfo:
|
||||
mlxsw_pci_fw_area_fini(mlxsw_pci);
|
||||
err_fw_area_init:
|
||||
|
|
|
@ -4633,6 +4633,123 @@ static inline void mlxsw_reg_mtmp_unpack(char *payload, unsigned int *p_temp,
|
|||
mlxsw_reg_mtmp_sensor_name_memcpy_from(payload, sensor_name);
|
||||
}
|
||||
|
||||
/* MPAT - Monitoring Port Analyzer Table
|
||||
* -------------------------------------
|
||||
* MPAT Register is used to query and configure the Switch PortAnalyzer Table.
|
||||
* For an enabled analyzer, all fields except e (enable) cannot be modified.
|
||||
*/
|
||||
#define MLXSW_REG_MPAT_ID 0x901A
|
||||
#define MLXSW_REG_MPAT_LEN 0x78
|
||||
|
||||
static const struct mlxsw_reg_info mlxsw_reg_mpat = {
|
||||
.id = MLXSW_REG_MPAT_ID,
|
||||
.len = MLXSW_REG_MPAT_LEN,
|
||||
};
|
||||
|
||||
/* reg_mpat_pa_id
|
||||
* Port Analyzer ID.
|
||||
* Access: Index
|
||||
*/
|
||||
MLXSW_ITEM32(reg, mpat, pa_id, 0x00, 28, 4);
|
||||
|
||||
/* reg_mpat_system_port
|
||||
* A unique port identifier for the final destination of the packet.
|
||||
* Access: RW
|
||||
*/
|
||||
MLXSW_ITEM32(reg, mpat, system_port, 0x00, 0, 16);
|
||||
|
||||
/* reg_mpat_e
|
||||
* Enable. Indicating the Port Analyzer is enabled.
|
||||
* Access: RW
|
||||
*/
|
||||
MLXSW_ITEM32(reg, mpat, e, 0x04, 31, 1);
|
||||
|
||||
/* reg_mpat_qos
|
||||
* Quality Of Service Mode.
|
||||
* 0: CONFIGURED - QoS parameters (Switch Priority, and encapsulation
|
||||
* PCP, DEI, DSCP or VL) are configured.
|
||||
* 1: MAINTAIN - QoS parameters (Switch Priority, Color) are the
|
||||
* same as in the original packet that has triggered the mirroring. For
|
||||
* SPAN also the pcp,dei are maintained.
|
||||
* Access: RW
|
||||
*/
|
||||
MLXSW_ITEM32(reg, mpat, qos, 0x04, 26, 1);
|
||||
|
||||
/* reg_mpat_be
|
||||
* Best effort mode. Indicates mirroring traffic should not cause packet
|
||||
* drop or back pressure, but will discard the mirrored packets. Mirrored
|
||||
* packets will be forwarded on a best effort manner.
|
||||
* 0: Do not discard mirrored packets
|
||||
* 1: Discard mirrored packets if causing congestion
|
||||
* Access: RW
|
||||
*/
|
||||
MLXSW_ITEM32(reg, mpat, be, 0x04, 25, 1);
|
||||
|
||||
static inline void mlxsw_reg_mpat_pack(char *payload, u8 pa_id,
|
||||
u16 system_port, bool e)
|
||||
{
|
||||
MLXSW_REG_ZERO(mpat, payload);
|
||||
mlxsw_reg_mpat_pa_id_set(payload, pa_id);
|
||||
mlxsw_reg_mpat_system_port_set(payload, system_port);
|
||||
mlxsw_reg_mpat_e_set(payload, e);
|
||||
mlxsw_reg_mpat_qos_set(payload, 1);
|
||||
mlxsw_reg_mpat_be_set(payload, 1);
|
||||
}
|
||||
|
||||
/* MPAR - Monitoring Port Analyzer Register
|
||||
* ----------------------------------------
|
||||
* MPAR register is used to query and configure the port analyzer port mirroring
|
||||
* properties.
|
||||
*/
|
||||
#define MLXSW_REG_MPAR_ID 0x901B
|
||||
#define MLXSW_REG_MPAR_LEN 0x08
|
||||
|
||||
static const struct mlxsw_reg_info mlxsw_reg_mpar = {
|
||||
.id = MLXSW_REG_MPAR_ID,
|
||||
.len = MLXSW_REG_MPAR_LEN,
|
||||
};
|
||||
|
||||
/* reg_mpar_local_port
|
||||
* The local port to mirror the packets from.
|
||||
* Access: Index
|
||||
*/
|
||||
MLXSW_ITEM32(reg, mpar, local_port, 0x00, 16, 8);
|
||||
|
||||
enum mlxsw_reg_mpar_i_e {
|
||||
MLXSW_REG_MPAR_TYPE_EGRESS,
|
||||
MLXSW_REG_MPAR_TYPE_INGRESS,
|
||||
};
|
||||
|
||||
/* reg_mpar_i_e
|
||||
* Ingress/Egress
|
||||
* Access: Index
|
||||
*/
|
||||
MLXSW_ITEM32(reg, mpar, i_e, 0x00, 0, 4);
|
||||
|
||||
/* reg_mpar_enable
|
||||
* Enable mirroring
|
||||
* By default, port mirroring is disabled for all ports.
|
||||
* Access: RW
|
||||
*/
|
||||
MLXSW_ITEM32(reg, mpar, enable, 0x04, 31, 1);
|
||||
|
||||
/* reg_mpar_pa_id
|
||||
* Port Analyzer ID.
|
||||
* Access: RW
|
||||
*/
|
||||
MLXSW_ITEM32(reg, mpar, pa_id, 0x04, 0, 4);
|
||||
|
||||
static inline void mlxsw_reg_mpar_pack(char *payload, u8 local_port,
|
||||
enum mlxsw_reg_mpar_i_e i_e,
|
||||
bool enable, u8 pa_id)
|
||||
{
|
||||
MLXSW_REG_ZERO(mpar, payload);
|
||||
mlxsw_reg_mpar_local_port_set(payload, local_port);
|
||||
mlxsw_reg_mpar_enable_set(payload, enable);
|
||||
mlxsw_reg_mpar_i_e_set(payload, i_e);
|
||||
mlxsw_reg_mpar_pa_id_set(payload, pa_id);
|
||||
}
|
||||
|
||||
/* MLCR - Management LED Control Register
|
||||
* --------------------------------------
|
||||
* Controls the system LEDs.
|
||||
|
@ -5062,6 +5179,45 @@ static inline void mlxsw_reg_sbsr_rec_unpack(char *payload, int rec_index,
|
|||
mlxsw_reg_sbsr_rec_max_buff_occupancy_get(payload, rec_index);
|
||||
}
|
||||
|
||||
/* SBIB - Shared Buffer Internal Buffer Register
|
||||
* ---------------------------------------------
|
||||
* The SBIB register configures per port buffers for internal use. The internal
|
||||
* buffers consume memory on the port buffers (note that the port buffers are
|
||||
* used also by PBMC).
|
||||
*
|
||||
* For Spectrum this is used for egress mirroring.
|
||||
*/
|
||||
#define MLXSW_REG_SBIB_ID 0xB006
|
||||
#define MLXSW_REG_SBIB_LEN 0x10
|
||||
|
||||
static const struct mlxsw_reg_info mlxsw_reg_sbib = {
|
||||
.id = MLXSW_REG_SBIB_ID,
|
||||
.len = MLXSW_REG_SBIB_LEN,
|
||||
};
|
||||
|
||||
/* reg_sbib_local_port
|
||||
* Local port number
|
||||
* Not supported for CPU port and router port
|
||||
* Access: Index
|
||||
*/
|
||||
MLXSW_ITEM32(reg, sbib, local_port, 0x00, 16, 8);
|
||||
|
||||
/* reg_sbib_buff_size
|
||||
* Units represented in cells
|
||||
* Allowed range is 0 to (cap_max_headroom_size - 1)
|
||||
* Default is 0
|
||||
* Access: RW
|
||||
*/
|
||||
MLXSW_ITEM32(reg, sbib, buff_size, 0x08, 0, 24);
|
||||
|
||||
static inline void mlxsw_reg_sbib_pack(char *payload, u8 local_port,
|
||||
u32 buff_size)
|
||||
{
|
||||
MLXSW_REG_ZERO(sbib, payload);
|
||||
mlxsw_reg_sbib_local_port_set(payload, local_port);
|
||||
mlxsw_reg_sbib_buff_size_set(payload, buff_size);
|
||||
}
|
||||
|
||||
static inline const char *mlxsw_reg_id_str(u16 reg_id)
|
||||
{
|
||||
switch (reg_id) {
|
||||
|
@ -5165,6 +5321,10 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id)
|
|||
return "MFSM";
|
||||
case MLXSW_REG_MTCAP_ID:
|
||||
return "MTCAP";
|
||||
case MLXSW_REG_MPAT_ID:
|
||||
return "MPAT";
|
||||
case MLXSW_REG_MPAR_ID:
|
||||
return "MPAR";
|
||||
case MLXSW_REG_MTMP_ID:
|
||||
return "MTMP";
|
||||
case MLXSW_REG_MLCR_ID:
|
||||
|
@ -5179,6 +5339,8 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id)
|
|||
return "SBMM";
|
||||
case MLXSW_REG_SBSR_ID:
|
||||
return "SBSR";
|
||||
case MLXSW_REG_SBIB_ID:
|
||||
return "SBIB";
|
||||
default:
|
||||
return "*UNKNOWN*";
|
||||
}
|
||||
|
|
|
@ -54,6 +54,8 @@
|
|||
#include <linux/inetdevice.h>
|
||||
#include <net/switchdev.h>
|
||||
#include <generated/utsrelease.h>
|
||||
#include <net/pkt_cls.h>
|
||||
#include <net/tc_act/tc_mirred.h>
|
||||
|
||||
#include "spectrum.h"
|
||||
#include "core.h"
|
||||
|
@ -133,6 +135,8 @@ MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
|
|||
*/
|
||||
MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
|
||||
|
||||
static bool mlxsw_sp_port_dev_check(const struct net_device *dev);
|
||||
|
||||
static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
|
||||
const struct mlxsw_tx_info *tx_info)
|
||||
{
|
||||
|
@ -161,6 +165,303 @@ static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
|
||||
{
|
||||
struct mlxsw_resources *resources;
|
||||
int i;
|
||||
|
||||
resources = mlxsw_core_resources_get(mlxsw_sp->core);
|
||||
if (!resources->max_span_valid)
|
||||
return -EIO;
|
||||
|
||||
mlxsw_sp->span.entries_count = resources->max_span;
|
||||
mlxsw_sp->span.entries = kcalloc(mlxsw_sp->span.entries_count,
|
||||
sizeof(struct mlxsw_sp_span_entry),
|
||||
GFP_KERNEL);
|
||||
if (!mlxsw_sp->span.entries)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < mlxsw_sp->span.entries_count; i++)
|
||||
INIT_LIST_HEAD(&mlxsw_sp->span.entries[i].bound_ports_list);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
|
||||
struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
|
||||
|
||||
WARN_ON_ONCE(!list_empty(&curr->bound_ports_list));
|
||||
}
|
||||
kfree(mlxsw_sp->span.entries);
|
||||
}
|
||||
|
||||
static struct mlxsw_sp_span_entry *
|
||||
mlxsw_sp_span_entry_create(struct mlxsw_sp_port *port)
|
||||
{
|
||||
struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
|
||||
struct mlxsw_sp_span_entry *span_entry;
|
||||
char mpat_pl[MLXSW_REG_MPAT_LEN];
|
||||
u8 local_port = port->local_port;
|
||||
int index;
|
||||
int i;
|
||||
int err;
|
||||
|
||||
/* find a free entry to use */
|
||||
index = -1;
|
||||
for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
|
||||
if (!mlxsw_sp->span.entries[i].used) {
|
||||
index = i;
|
||||
span_entry = &mlxsw_sp->span.entries[i];
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (index < 0)
|
||||
return NULL;
|
||||
|
||||
/* create a new port analayzer entry for local_port */
|
||||
mlxsw_reg_mpat_pack(mpat_pl, index, local_port, true);
|
||||
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
|
||||
if (err)
|
||||
return NULL;
|
||||
|
||||
span_entry->used = true;
|
||||
span_entry->id = index;
|
||||
span_entry->ref_count = 0;
|
||||
span_entry->local_port = local_port;
|
||||
return span_entry;
|
||||
}
|
||||
|
||||
static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp,
|
||||
struct mlxsw_sp_span_entry *span_entry)
|
||||
{
|
||||
u8 local_port = span_entry->local_port;
|
||||
char mpat_pl[MLXSW_REG_MPAT_LEN];
|
||||
int pa_id = span_entry->id;
|
||||
|
||||
mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false);
|
||||
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
|
||||
span_entry->used = false;
|
||||
}
|
||||
|
||||
struct mlxsw_sp_span_entry *mlxsw_sp_span_entry_find(struct mlxsw_sp_port *port)
|
||||
{
|
||||
struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
|
||||
struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
|
||||
|
||||
if (curr->used && curr->local_port == port->local_port)
|
||||
return curr;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct mlxsw_sp_span_entry *mlxsw_sp_span_entry_get(struct mlxsw_sp_port *port)
|
||||
{
|
||||
struct mlxsw_sp_span_entry *span_entry;
|
||||
|
||||
span_entry = mlxsw_sp_span_entry_find(port);
|
||||
if (span_entry) {
|
||||
span_entry->ref_count++;
|
||||
return span_entry;
|
||||
}
|
||||
|
||||
return mlxsw_sp_span_entry_create(port);
|
||||
}
|
||||
|
||||
static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp,
|
||||
struct mlxsw_sp_span_entry *span_entry)
|
||||
{
|
||||
if (--span_entry->ref_count == 0)
|
||||
mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port)
|
||||
{
|
||||
struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
|
||||
struct mlxsw_sp_span_inspected_port *p;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
|
||||
struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
|
||||
|
||||
list_for_each_entry(p, &curr->bound_ports_list, list)
|
||||
if (p->local_port == port->local_port &&
|
||||
p->type == MLXSW_SP_SPAN_EGRESS)
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static int mlxsw_sp_span_mtu_to_buffsize(int mtu)
|
||||
{
|
||||
return MLXSW_SP_BYTES_TO_CELLS(mtu * 5 / 2) + 1;
|
||||
}
|
||||
|
||||
static int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu)
|
||||
{
|
||||
struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
|
||||
char sbib_pl[MLXSW_REG_SBIB_LEN];
|
||||
int err;
|
||||
|
||||
/* If port is egress mirrored, the shared buffer size should be
|
||||
* updated according to the mtu value
|
||||
*/
|
||||
if (mlxsw_sp_span_is_egress_mirror(port)) {
|
||||
mlxsw_reg_sbib_pack(sbib_pl, port->local_port,
|
||||
mlxsw_sp_span_mtu_to_buffsize(mtu));
|
||||
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
|
||||
if (err) {
|
||||
netdev_err(port->dev, "Could not update shared buffer for mirroring\n");
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct mlxsw_sp_span_inspected_port *
|
||||
mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_port *port,
|
||||
struct mlxsw_sp_span_entry *span_entry)
|
||||
{
|
||||
struct mlxsw_sp_span_inspected_port *p;
|
||||
|
||||
list_for_each_entry(p, &span_entry->bound_ports_list, list)
|
||||
if (port->local_port == p->local_port)
|
||||
return p;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int
|
||||
mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port,
|
||||
struct mlxsw_sp_span_entry *span_entry,
|
||||
enum mlxsw_sp_span_type type)
|
||||
{
|
||||
struct mlxsw_sp_span_inspected_port *inspected_port;
|
||||
struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
|
||||
char mpar_pl[MLXSW_REG_MPAR_LEN];
|
||||
char sbib_pl[MLXSW_REG_SBIB_LEN];
|
||||
int pa_id = span_entry->id;
|
||||
int err;
|
||||
|
||||
/* if it is an egress SPAN, bind a shared buffer to it */
|
||||
if (type == MLXSW_SP_SPAN_EGRESS) {
|
||||
mlxsw_reg_sbib_pack(sbib_pl, port->local_port,
|
||||
mlxsw_sp_span_mtu_to_buffsize(port->dev->mtu));
|
||||
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
|
||||
if (err) {
|
||||
netdev_err(port->dev, "Could not create shared buffer for mirroring\n");
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
/* bind the port to the SPAN entry */
|
||||
mlxsw_reg_mpar_pack(mpar_pl, port->local_port, type, true, pa_id);
|
||||
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
|
||||
if (err)
|
||||
goto err_mpar_reg_write;
|
||||
|
||||
inspected_port = kzalloc(sizeof(*inspected_port), GFP_KERNEL);
|
||||
if (!inspected_port) {
|
||||
err = -ENOMEM;
|
||||
goto err_inspected_port_alloc;
|
||||
}
|
||||
inspected_port->local_port = port->local_port;
|
||||
inspected_port->type = type;
|
||||
list_add_tail(&inspected_port->list, &span_entry->bound_ports_list);
|
||||
|
||||
return 0;
|
||||
|
||||
err_mpar_reg_write:
|
||||
err_inspected_port_alloc:
|
||||
if (type == MLXSW_SP_SPAN_EGRESS) {
|
||||
mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
|
||||
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
static void
|
||||
mlxsw_sp_span_inspected_port_unbind(struct mlxsw_sp_port *port,
|
||||
struct mlxsw_sp_span_entry *span_entry,
|
||||
enum mlxsw_sp_span_type type)
|
||||
{
|
||||
struct mlxsw_sp_span_inspected_port *inspected_port;
|
||||
struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
|
||||
char mpar_pl[MLXSW_REG_MPAR_LEN];
|
||||
char sbib_pl[MLXSW_REG_SBIB_LEN];
|
||||
int pa_id = span_entry->id;
|
||||
|
||||
inspected_port = mlxsw_sp_span_entry_bound_port_find(port, span_entry);
|
||||
if (!inspected_port)
|
||||
return;
|
||||
|
||||
/* remove the inspected port */
|
||||
mlxsw_reg_mpar_pack(mpar_pl, port->local_port, type, false, pa_id);
|
||||
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
|
||||
|
||||
/* remove the SBIB buffer if it was egress SPAN */
|
||||
if (type == MLXSW_SP_SPAN_EGRESS) {
|
||||
mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
|
||||
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
|
||||
}
|
||||
|
||||
mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
|
||||
|
||||
list_del(&inspected_port->list);
|
||||
kfree(inspected_port);
|
||||
}
|
||||
|
||||
static int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
|
||||
struct mlxsw_sp_port *to,
|
||||
enum mlxsw_sp_span_type type)
|
||||
{
|
||||
struct mlxsw_sp *mlxsw_sp = from->mlxsw_sp;
|
||||
struct mlxsw_sp_span_entry *span_entry;
|
||||
int err;
|
||||
|
||||
span_entry = mlxsw_sp_span_entry_get(to);
|
||||
if (!span_entry)
|
||||
return -ENOENT;
|
||||
|
||||
netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n",
|
||||
span_entry->id);
|
||||
|
||||
err = mlxsw_sp_span_inspected_port_bind(from, span_entry, type);
|
||||
if (err)
|
||||
goto err_port_bind;
|
||||
|
||||
return 0;
|
||||
|
||||
err_port_bind:
|
||||
mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void mlxsw_sp_span_mirror_remove(struct mlxsw_sp_port *from,
|
||||
struct mlxsw_sp_port *to,
|
||||
enum mlxsw_sp_span_type type)
|
||||
{
|
||||
struct mlxsw_sp_span_entry *span_entry;
|
||||
|
||||
span_entry = mlxsw_sp_span_entry_find(to);
|
||||
if (!span_entry) {
|
||||
netdev_err(from->dev, "no span entry found\n");
|
||||
return;
|
||||
}
|
||||
|
||||
netdev_dbg(from->dev, "removing inspected port from SPAN entry %d\n",
|
||||
span_entry->id);
|
||||
mlxsw_sp_span_inspected_port_unbind(from, span_entry, type);
|
||||
}
|
||||
|
||||
static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
|
||||
bool is_up)
|
||||
{
|
||||
|
@ -493,6 +794,9 @@ static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
|
|||
err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en);
|
||||
if (err)
|
||||
return err;
|
||||
err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu);
|
||||
if (err)
|
||||
goto err_span_port_mtu_update;
|
||||
err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
|
||||
if (err)
|
||||
goto err_port_mtu_set;
|
||||
|
@ -500,6 +804,8 @@ static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
|
|||
return 0;
|
||||
|
||||
err_port_mtu_set:
|
||||
mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu);
|
||||
err_span_port_mtu_update:
|
||||
mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
|
||||
return err;
|
||||
}
|
||||
|
@ -776,10 +1082,155 @@ static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static struct mlxsw_sp_port_mall_tc_entry *
|
||||
mlxsw_sp_port_mirror_entry_find(struct mlxsw_sp_port *port,
|
||||
unsigned long cookie) {
|
||||
struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
|
||||
|
||||
list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list)
|
||||
if (mall_tc_entry->cookie == cookie)
|
||||
return mall_tc_entry;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int
|
||||
mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
|
||||
struct tc_cls_matchall_offload *cls,
|
||||
const struct tc_action *a,
|
||||
bool ingress)
|
||||
{
|
||||
struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
|
||||
struct net *net = dev_net(mlxsw_sp_port->dev);
|
||||
enum mlxsw_sp_span_type span_type;
|
||||
struct mlxsw_sp_port *to_port;
|
||||
struct net_device *to_dev;
|
||||
int ifindex;
|
||||
int err;
|
||||
|
||||
ifindex = tcf_mirred_ifindex(a);
|
||||
to_dev = __dev_get_by_index(net, ifindex);
|
||||
if (!to_dev) {
|
||||
netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!mlxsw_sp_port_dev_check(to_dev)) {
|
||||
netdev_err(mlxsw_sp_port->dev, "Cannot mirror to a non-spectrum port");
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
to_port = netdev_priv(to_dev);
|
||||
|
||||
mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
|
||||
if (!mall_tc_entry)
|
||||
return -ENOMEM;
|
||||
|
||||
mall_tc_entry->cookie = cls->cookie;
|
||||
mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR;
|
||||
mall_tc_entry->mirror.to_local_port = to_port->local_port;
|
||||
mall_tc_entry->mirror.ingress = ingress;
|
||||
list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list);
|
||||
|
||||
span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
|
||||
err = mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_port, span_type);
|
||||
if (err)
|
||||
goto err_mirror_add;
|
||||
return 0;
|
||||
|
||||
err_mirror_add:
|
||||
list_del(&mall_tc_entry->list);
|
||||
kfree(mall_tc_entry);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
|
||||
__be16 protocol,
|
||||
struct tc_cls_matchall_offload *cls,
|
||||
bool ingress)
|
||||
{
|
||||
struct tcf_exts *exts = cls->exts;
|
||||
const struct tc_action *a;
|
||||
int err;
|
||||
|
||||
if (!list_is_singular(&exts->actions)) {
|
||||
netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n");
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
|
||||
a = list_first_entry(&exts->actions, struct tc_action, list);
|
||||
if (is_tcf_mirred_mirror(a) && protocol == htons(ETH_P_ALL)) {
|
||||
err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port, cls,
|
||||
a, ingress);
|
||||
if (err)
|
||||
return err;
|
||||
} else {
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
|
||||
struct tc_cls_matchall_offload *cls)
|
||||
{
|
||||
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
|
||||
struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
|
||||
enum mlxsw_sp_span_type span_type;
|
||||
struct mlxsw_sp_port *to_port;
|
||||
|
||||
mall_tc_entry = mlxsw_sp_port_mirror_entry_find(mlxsw_sp_port,
|
||||
cls->cookie);
|
||||
if (!mall_tc_entry) {
|
||||
netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n");
|
||||
return;
|
||||
}
|
||||
|
||||
switch (mall_tc_entry->type) {
|
||||
case MLXSW_SP_PORT_MALL_MIRROR:
|
||||
to_port = mlxsw_sp->ports[mall_tc_entry->mirror.to_local_port];
|
||||
span_type = mall_tc_entry->mirror.ingress ?
|
||||
MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
|
||||
|
||||
mlxsw_sp_span_mirror_remove(mlxsw_sp_port, to_port, span_type);
|
||||
break;
|
||||
default:
|
||||
WARN_ON(1);
|
||||
}
|
||||
|
||||
list_del(&mall_tc_entry->list);
|
||||
kfree(mall_tc_entry);
|
||||
}
|
||||
|
||||
static int mlxsw_sp_setup_tc(struct net_device *dev, u32 handle,
|
||||
__be16 proto, struct tc_to_netdev *tc)
|
||||
{
|
||||
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
|
||||
bool ingress = TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS);
|
||||
|
||||
if (tc->type == TC_SETUP_MATCHALL) {
|
||||
switch (tc->cls_mall->command) {
|
||||
case TC_CLSMATCHALL_REPLACE:
|
||||
return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port,
|
||||
proto,
|
||||
tc->cls_mall,
|
||||
ingress);
|
||||
case TC_CLSMATCHALL_DESTROY:
|
||||
mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port,
|
||||
tc->cls_mall);
|
||||
return 0;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
|
||||
static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
|
||||
.ndo_open = mlxsw_sp_port_open,
|
||||
.ndo_stop = mlxsw_sp_port_stop,
|
||||
.ndo_start_xmit = mlxsw_sp_port_xmit,
|
||||
.ndo_setup_tc = mlxsw_sp_setup_tc,
|
||||
.ndo_set_rx_mode = mlxsw_sp_set_rx_mode,
|
||||
.ndo_set_mac_address = mlxsw_sp_port_set_mac_address,
|
||||
.ndo_change_mtu = mlxsw_sp_port_change_mtu,
|
||||
|
@ -1657,6 +2108,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
|
|||
goto err_port_untagged_vlans_alloc;
|
||||
}
|
||||
INIT_LIST_HEAD(&mlxsw_sp_port->vports_list);
|
||||
INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list);
|
||||
|
||||
mlxsw_sp_port->pcpu_stats =
|
||||
netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
|
||||
|
@ -1678,7 +2130,8 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
|
|||
netif_carrier_off(dev);
|
||||
|
||||
dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
|
||||
NETIF_F_HW_VLAN_CTAG_FILTER;
|
||||
NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC;
|
||||
dev->hw_features |= NETIF_F_HW_TC;
|
||||
|
||||
/* Each packet needs to have a Tx header (metadata) on top all other
|
||||
* headers.
|
||||
|
@ -2410,6 +2863,12 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
|
|||
goto err_router_init;
|
||||
}
|
||||
|
||||
err = mlxsw_sp_span_init(mlxsw_sp);
|
||||
if (err) {
|
||||
dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n");
|
||||
goto err_span_init;
|
||||
}
|
||||
|
||||
err = mlxsw_sp_ports_create(mlxsw_sp);
|
||||
if (err) {
|
||||
dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
|
||||
|
@ -2419,6 +2878,8 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
|
|||
return 0;
|
||||
|
||||
err_ports_create:
|
||||
mlxsw_sp_span_fini(mlxsw_sp);
|
||||
err_span_init:
|
||||
mlxsw_sp_router_fini(mlxsw_sp);
|
||||
err_router_init:
|
||||
mlxsw_sp_switchdev_fini(mlxsw_sp);
|
||||
|
@ -2439,6 +2900,7 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
|
|||
int i;
|
||||
|
||||
mlxsw_sp_ports_remove(mlxsw_sp);
|
||||
mlxsw_sp_span_fini(mlxsw_sp);
|
||||
mlxsw_sp_router_fini(mlxsw_sp);
|
||||
mlxsw_sp_switchdev_fini(mlxsw_sp);
|
||||
mlxsw_sp_buffers_fini(mlxsw_sp);
|
||||
|
@ -2488,6 +2950,7 @@ static struct mlxsw_config_profile mlxsw_sp_config_profile = {
|
|||
.type = MLXSW_PORT_SWID_TYPE_ETH,
|
||||
}
|
||||
},
|
||||
.resource_query_enable = 1,
|
||||
};
|
||||
|
||||
static struct mlxsw_driver mlxsw_sp_driver = {
|
||||
|
|
|
@ -214,6 +214,43 @@ struct mlxsw_sp_vr {
|
|||
struct mlxsw_sp_fib *fib;
|
||||
};
|
||||
|
||||
enum mlxsw_sp_span_type {
|
||||
MLXSW_SP_SPAN_EGRESS,
|
||||
MLXSW_SP_SPAN_INGRESS
|
||||
};
|
||||
|
||||
struct mlxsw_sp_span_inspected_port {
|
||||
struct list_head list;
|
||||
enum mlxsw_sp_span_type type;
|
||||
u8 local_port;
|
||||
};
|
||||
|
||||
struct mlxsw_sp_span_entry {
|
||||
u8 local_port;
|
||||
bool used;
|
||||
struct list_head bound_ports_list;
|
||||
int ref_count;
|
||||
int id;
|
||||
};
|
||||
|
||||
enum mlxsw_sp_port_mall_action_type {
|
||||
MLXSW_SP_PORT_MALL_MIRROR,
|
||||
};
|
||||
|
||||
struct mlxsw_sp_port_mall_mirror_tc_entry {
|
||||
u8 to_local_port;
|
||||
bool ingress;
|
||||
};
|
||||
|
||||
struct mlxsw_sp_port_mall_tc_entry {
|
||||
struct list_head list;
|
||||
unsigned long cookie;
|
||||
enum mlxsw_sp_port_mall_action_type type;
|
||||
union {
|
||||
struct mlxsw_sp_port_mall_mirror_tc_entry mirror;
|
||||
};
|
||||
};
|
||||
|
||||
struct mlxsw_sp_router {
|
||||
struct mlxsw_sp_lpm_tree lpm_trees[MLXSW_SP_LPM_TREE_COUNT];
|
||||
struct mlxsw_sp_vr vrs[MLXSW_SP_VIRTUAL_ROUTER_MAX];
|
||||
|
@ -260,6 +297,11 @@ struct mlxsw_sp {
|
|||
struct {
|
||||
DECLARE_BITMAP(usage, MLXSW_SP_KVD_LINEAR_SIZE);
|
||||
} kvdl;
|
||||
|
||||
struct {
|
||||
struct mlxsw_sp_span_entry *entries;
|
||||
int entries_count;
|
||||
} span;
|
||||
};
|
||||
|
||||
static inline struct mlxsw_sp_upper *
|
||||
|
@ -316,6 +358,8 @@ struct mlxsw_sp_port {
|
|||
unsigned long *untagged_vlans;
|
||||
/* VLAN interfaces */
|
||||
struct list_head vports_list;
|
||||
/* TC handles */
|
||||
struct list_head mall_tc_list;
|
||||
};
|
||||
|
||||
struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev);
|
||||
|
|
|
@ -1541,6 +1541,7 @@ static struct mlxsw_config_profile mlxsw_sx_config_profile = {
|
|||
.type = MLXSW_PORT_SWID_TYPE_ETH,
|
||||
}
|
||||
},
|
||||
.resource_query_enable = 0,
|
||||
};
|
||||
|
||||
static struct mlxsw_driver mlxsw_sx_driver = {
|
||||
|
|
|
@ -787,6 +787,7 @@ enum {
|
|||
TC_SETUP_MQPRIO,
|
||||
TC_SETUP_CLSU32,
|
||||
TC_SETUP_CLSFLOWER,
|
||||
TC_SETUP_MATCHALL,
|
||||
};
|
||||
|
||||
struct tc_cls_u32_offload;
|
||||
|
@ -797,6 +798,7 @@ struct tc_to_netdev {
|
|||
u8 tc;
|
||||
struct tc_cls_u32_offload *cls_u32;
|
||||
struct tc_cls_flower_offload *cls_flower;
|
||||
struct tc_cls_matchall_offload *cls_mall;
|
||||
};
|
||||
};
|
||||
|
||||
|
|
|
@ -442,4 +442,15 @@ struct tc_cls_flower_offload {
|
|||
struct tcf_exts *exts;
|
||||
};
|
||||
|
||||
enum tc_matchall_command {
|
||||
TC_CLSMATCHALL_REPLACE,
|
||||
TC_CLSMATCHALL_DESTROY,
|
||||
};
|
||||
|
||||
struct tc_cls_matchall_offload {
|
||||
enum tc_matchall_command command;
|
||||
struct tcf_exts *exts;
|
||||
unsigned long cookie;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
|
|
@ -24,6 +24,15 @@ static inline bool is_tcf_mirred_redirect(const struct tc_action *a)
|
|||
return false;
|
||||
}
|
||||
|
||||
static inline bool is_tcf_mirred_mirror(const struct tc_action *a)
|
||||
{
|
||||
#ifdef CONFIG_NET_CLS_ACT
|
||||
if (a->ops && a->ops->type == TCA_ACT_MIRRED)
|
||||
return to_mirred(a)->tcfm_eaction == TCA_EGRESS_MIRROR;
|
||||
#endif
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline int tcf_mirred_ifindex(const struct tc_action *a)
|
||||
{
|
||||
return to_mirred(a)->tcfm_ifindex;
|
||||
|
|
|
@ -433,6 +433,18 @@ enum {
|
|||
|
||||
#define TCA_FLOWER_MAX (__TCA_FLOWER_MAX - 1)
|
||||
|
||||
/* Match-all classifier */
|
||||
|
||||
enum {
|
||||
TCA_MATCHALL_UNSPEC,
|
||||
TCA_MATCHALL_CLASSID,
|
||||
TCA_MATCHALL_ACT,
|
||||
TCA_MATCHALL_FLAGS,
|
||||
__TCA_MATCHALL_MAX,
|
||||
};
|
||||
|
||||
#define TCA_MATCHALL_MAX (__TCA_MATCHALL_MAX - 1)
|
||||
|
||||
/* Extended Matches */
|
||||
|
||||
struct tcf_ematch_tree_hdr {
|
||||
|
|
|
@ -494,6 +494,16 @@ config NET_CLS_FLOWER
|
|||
To compile this code as a module, choose M here: the module will
|
||||
be called cls_flower.
|
||||
|
||||
config NET_CLS_MATCHALL
|
||||
tristate "Match-all classifier"
|
||||
select NET_CLS
|
||||
---help---
|
||||
If you say Y here, you will be able to classify packets based on
|
||||
nothing. Every packet will match.
|
||||
|
||||
To compile this code as a module, choose M here: the module will
|
||||
be called cls_matchall.
|
||||
|
||||
config NET_EMATCH
|
||||
bool "Extended Matches"
|
||||
select NET_CLS
|
||||
|
|
|
@ -60,6 +60,7 @@ obj-$(CONFIG_NET_CLS_FLOW) += cls_flow.o
|
|||
obj-$(CONFIG_NET_CLS_CGROUP) += cls_cgroup.o
|
||||
obj-$(CONFIG_NET_CLS_BPF) += cls_bpf.o
|
||||
obj-$(CONFIG_NET_CLS_FLOWER) += cls_flower.o
|
||||
obj-$(CONFIG_NET_CLS_MATCHALL) += cls_matchall.o
|
||||
obj-$(CONFIG_NET_EMATCH) += ematch.o
|
||||
obj-$(CONFIG_NET_EMATCH_CMP) += em_cmp.o
|
||||
obj-$(CONFIG_NET_EMATCH_NBYTE) += em_nbyte.o
|
||||
|
|
|
@ -0,0 +1,318 @@
|
|||
/*
|
||||
* net/sched/cls_matchll.c Match-all classifier
|
||||
*
|
||||
* Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
#include <net/sch_generic.h>
|
||||
#include <net/pkt_cls.h>
|
||||
|
||||
struct cls_mall_filter {
|
||||
struct tcf_exts exts;
|
||||
struct tcf_result res;
|
||||
u32 handle;
|
||||
struct rcu_head rcu;
|
||||
u32 flags;
|
||||
};
|
||||
|
||||
struct cls_mall_head {
|
||||
struct cls_mall_filter *filter;
|
||||
struct rcu_head rcu;
|
||||
};
|
||||
|
||||
static int mall_classify(struct sk_buff *skb, const struct tcf_proto *tp,
|
||||
struct tcf_result *res)
|
||||
{
|
||||
struct cls_mall_head *head = rcu_dereference_bh(tp->root);
|
||||
struct cls_mall_filter *f = head->filter;
|
||||
|
||||
if (tc_skip_sw(f->flags))
|
||||
return -1;
|
||||
|
||||
return tcf_exts_exec(skb, &f->exts, res);
|
||||
}
|
||||
|
||||
static int mall_init(struct tcf_proto *tp)
|
||||
{
|
||||
struct cls_mall_head *head;
|
||||
|
||||
head = kzalloc(sizeof(*head), GFP_KERNEL);
|
||||
if (!head)
|
||||
return -ENOBUFS;
|
||||
|
||||
rcu_assign_pointer(tp->root, head);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mall_destroy_filter(struct rcu_head *head)
|
||||
{
|
||||
struct cls_mall_filter *f = container_of(head, struct cls_mall_filter, rcu);
|
||||
|
||||
tcf_exts_destroy(&f->exts);
|
||||
|
||||
kfree(f);
|
||||
}
|
||||
|
||||
static int mall_replace_hw_filter(struct tcf_proto *tp,
|
||||
struct cls_mall_filter *f,
|
||||
unsigned long cookie)
|
||||
{
|
||||
struct net_device *dev = tp->q->dev_queue->dev;
|
||||
struct tc_to_netdev offload;
|
||||
struct tc_cls_matchall_offload mall_offload = {0};
|
||||
|
||||
offload.type = TC_SETUP_MATCHALL;
|
||||
offload.cls_mall = &mall_offload;
|
||||
offload.cls_mall->command = TC_CLSMATCHALL_REPLACE;
|
||||
offload.cls_mall->exts = &f->exts;
|
||||
offload.cls_mall->cookie = cookie;
|
||||
|
||||
return dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol,
|
||||
&offload);
|
||||
}
|
||||
|
||||
static void mall_destroy_hw_filter(struct tcf_proto *tp,
|
||||
struct cls_mall_filter *f,
|
||||
unsigned long cookie)
|
||||
{
|
||||
struct net_device *dev = tp->q->dev_queue->dev;
|
||||
struct tc_to_netdev offload;
|
||||
struct tc_cls_matchall_offload mall_offload = {0};
|
||||
|
||||
offload.type = TC_SETUP_MATCHALL;
|
||||
offload.cls_mall = &mall_offload;
|
||||
offload.cls_mall->command = TC_CLSMATCHALL_DESTROY;
|
||||
offload.cls_mall->exts = NULL;
|
||||
offload.cls_mall->cookie = cookie;
|
||||
|
||||
dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol,
|
||||
&offload);
|
||||
}
|
||||
|
||||
static bool mall_destroy(struct tcf_proto *tp, bool force)
|
||||
{
|
||||
struct cls_mall_head *head = rtnl_dereference(tp->root);
|
||||
struct net_device *dev = tp->q->dev_queue->dev;
|
||||
struct cls_mall_filter *f = head->filter;
|
||||
|
||||
if (!force && f)
|
||||
return false;
|
||||
|
||||
if (f) {
|
||||
if (tc_should_offload(dev, tp, f->flags))
|
||||
mall_destroy_hw_filter(tp, f, (unsigned long) f);
|
||||
|
||||
call_rcu(&f->rcu, mall_destroy_filter);
|
||||
}
|
||||
RCU_INIT_POINTER(tp->root, NULL);
|
||||
kfree_rcu(head, rcu);
|
||||
return true;
|
||||
}
|
||||
|
||||
static unsigned long mall_get(struct tcf_proto *tp, u32 handle)
|
||||
{
|
||||
struct cls_mall_head *head = rtnl_dereference(tp->root);
|
||||
struct cls_mall_filter *f = head->filter;
|
||||
|
||||
if (f && f->handle == handle)
|
||||
return (unsigned long) f;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = {
|
||||
[TCA_MATCHALL_UNSPEC] = { .type = NLA_UNSPEC },
|
||||
[TCA_MATCHALL_CLASSID] = { .type = NLA_U32 },
|
||||
};
|
||||
|
||||
static int mall_set_parms(struct net *net, struct tcf_proto *tp,
|
||||
struct cls_mall_filter *f,
|
||||
unsigned long base, struct nlattr **tb,
|
||||
struct nlattr *est, bool ovr)
|
||||
{
|
||||
struct tcf_exts e;
|
||||
int err;
|
||||
|
||||
tcf_exts_init(&e, TCA_MATCHALL_ACT, 0);
|
||||
err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
if (tb[TCA_MATCHALL_CLASSID]) {
|
||||
f->res.classid = nla_get_u32(tb[TCA_MATCHALL_CLASSID]);
|
||||
tcf_bind_filter(tp, &f->res, base);
|
||||
}
|
||||
|
||||
tcf_exts_change(tp, &f->exts, &e);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mall_change(struct net *net, struct sk_buff *in_skb,
|
||||
struct tcf_proto *tp, unsigned long base,
|
||||
u32 handle, struct nlattr **tca,
|
||||
unsigned long *arg, bool ovr)
|
||||
{
|
||||
struct cls_mall_head *head = rtnl_dereference(tp->root);
|
||||
struct cls_mall_filter *fold = (struct cls_mall_filter *) *arg;
|
||||
struct net_device *dev = tp->q->dev_queue->dev;
|
||||
struct cls_mall_filter *f;
|
||||
struct nlattr *tb[TCA_MATCHALL_MAX + 1];
|
||||
u32 flags = 0;
|
||||
int err;
|
||||
|
||||
if (!tca[TCA_OPTIONS])
|
||||
return -EINVAL;
|
||||
|
||||
if (head->filter)
|
||||
return -EBUSY;
|
||||
|
||||
if (fold)
|
||||
return -EINVAL;
|
||||
|
||||
err = nla_parse_nested(tb, TCA_MATCHALL_MAX,
|
||||
tca[TCA_OPTIONS], mall_policy);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
if (tb[TCA_MATCHALL_FLAGS]) {
|
||||
flags = nla_get_u32(tb[TCA_MATCHALL_FLAGS]);
|
||||
if (!tc_flags_valid(flags))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
f = kzalloc(sizeof(*f), GFP_KERNEL);
|
||||
if (!f)
|
||||
return -ENOBUFS;
|
||||
|
||||
tcf_exts_init(&f->exts, TCA_MATCHALL_ACT, 0);
|
||||
|
||||
if (!handle)
|
||||
handle = 1;
|
||||
f->handle = handle;
|
||||
f->flags = flags;
|
||||
|
||||
err = mall_set_parms(net, tp, f, base, tb, tca[TCA_RATE], ovr);
|
||||
if (err)
|
||||
goto errout;
|
||||
|
||||
if (tc_should_offload(dev, tp, flags)) {
|
||||
err = mall_replace_hw_filter(tp, f, (unsigned long) f);
|
||||
if (err) {
|
||||
if (tc_skip_sw(flags))
|
||||
goto errout;
|
||||
else
|
||||
err = 0;
|
||||
}
|
||||
}
|
||||
|
||||
*arg = (unsigned long) f;
|
||||
rcu_assign_pointer(head->filter, f);
|
||||
|
||||
return 0;
|
||||
|
||||
errout:
|
||||
kfree(f);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int mall_delete(struct tcf_proto *tp, unsigned long arg)
|
||||
{
|
||||
struct cls_mall_head *head = rtnl_dereference(tp->root);
|
||||
struct cls_mall_filter *f = (struct cls_mall_filter *) arg;
|
||||
struct net_device *dev = tp->q->dev_queue->dev;
|
||||
|
||||
if (tc_should_offload(dev, tp, f->flags))
|
||||
mall_destroy_hw_filter(tp, f, (unsigned long) f);
|
||||
|
||||
RCU_INIT_POINTER(head->filter, NULL);
|
||||
tcf_unbind_filter(tp, &f->res);
|
||||
call_rcu(&f->rcu, mall_destroy_filter);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mall_walk(struct tcf_proto *tp, struct tcf_walker *arg)
|
||||
{
|
||||
struct cls_mall_head *head = rtnl_dereference(tp->root);
|
||||
struct cls_mall_filter *f = head->filter;
|
||||
|
||||
if (arg->count < arg->skip)
|
||||
goto skip;
|
||||
if (arg->fn(tp, (unsigned long) f, arg) < 0)
|
||||
arg->stop = 1;
|
||||
skip:
|
||||
arg->count++;
|
||||
}
|
||||
|
||||
static int mall_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
|
||||
struct sk_buff *skb, struct tcmsg *t)
|
||||
{
|
||||
struct cls_mall_filter *f = (struct cls_mall_filter *) fh;
|
||||
struct nlattr *nest;
|
||||
|
||||
if (!f)
|
||||
return skb->len;
|
||||
|
||||
t->tcm_handle = f->handle;
|
||||
|
||||
nest = nla_nest_start(skb, TCA_OPTIONS);
|
||||
if (!nest)
|
||||
goto nla_put_failure;
|
||||
|
||||
if (f->res.classid &&
|
||||
nla_put_u32(skb, TCA_MATCHALL_CLASSID, f->res.classid))
|
||||
goto nla_put_failure;
|
||||
|
||||
if (tcf_exts_dump(skb, &f->exts))
|
||||
goto nla_put_failure;
|
||||
|
||||
nla_nest_end(skb, nest);
|
||||
|
||||
if (tcf_exts_dump_stats(skb, &f->exts) < 0)
|
||||
goto nla_put_failure;
|
||||
|
||||
return skb->len;
|
||||
|
||||
nla_put_failure:
|
||||
nla_nest_cancel(skb, nest);
|
||||
return -1;
|
||||
}
|
||||
|
||||
static struct tcf_proto_ops cls_mall_ops __read_mostly = {
|
||||
.kind = "matchall",
|
||||
.classify = mall_classify,
|
||||
.init = mall_init,
|
||||
.destroy = mall_destroy,
|
||||
.get = mall_get,
|
||||
.change = mall_change,
|
||||
.delete = mall_delete,
|
||||
.walk = mall_walk,
|
||||
.dump = mall_dump,
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
static int __init cls_mall_init(void)
|
||||
{
|
||||
return register_tcf_proto_ops(&cls_mall_ops);
|
||||
}
|
||||
|
||||
static void __exit cls_mall_exit(void)
|
||||
{
|
||||
unregister_tcf_proto_ops(&cls_mall_ops);
|
||||
}
|
||||
|
||||
module_init(cls_mall_init);
|
||||
module_exit(cls_mall_exit);
|
||||
|
||||
MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
|
||||
MODULE_DESCRIPTION("Match-all classifier");
|
||||
MODULE_LICENSE("GPL v2");
|
Loading…
Reference in New Issue