2018-08-09 16:59:11 +08:00
|
|
|
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
|
|
|
|
/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
|
2015-07-30 05:33:49 +08:00
|
|
|
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/types.h>
|
2016-10-27 21:12:59 +08:00
|
|
|
#include <linux/pci.h>
|
2015-07-30 05:33:49 +08:00
|
|
|
#include <linux/netdevice.h>
|
|
|
|
#include <linux/etherdevice.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/device.h>
|
|
|
|
#include <linux/skbuff.h>
|
|
|
|
#include <linux/if_vlan.h>
|
|
|
|
|
2016-10-27 21:12:59 +08:00
|
|
|
#include "pci.h"
|
2015-07-30 05:33:49 +08:00
|
|
|
#include "core.h"
|
|
|
|
#include "reg.h"
|
|
|
|
#include "port.h"
|
|
|
|
#include "trap.h"
|
|
|
|
#include "txheader.h"
|
2016-10-29 03:36:00 +08:00
|
|
|
#include "ib.h"
|
2015-07-30 05:33:49 +08:00
|
|
|
|
|
|
|
static const char mlxsw_sx_driver_name[] = "mlxsw_switchx2";
|
|
|
|
static const char mlxsw_sx_driver_version[] = "1.0";
|
|
|
|
|
|
|
|
struct mlxsw_sx_port;
|
|
|
|
|
|
|
|
struct mlxsw_sx {
|
|
|
|
struct mlxsw_sx_port **ports;
|
|
|
|
struct mlxsw_core *core;
|
|
|
|
const struct mlxsw_bus_info *bus_info;
|
2015-10-15 23:43:16 +08:00
|
|
|
u8 hw_id[ETH_ALEN];
|
2015-07-30 05:33:49 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct mlxsw_sx_port_pcpu_stats {
|
|
|
|
u64 rx_packets;
|
|
|
|
u64 rx_bytes;
|
|
|
|
u64 tx_packets;
|
|
|
|
u64 tx_bytes;
|
|
|
|
struct u64_stats_sync syncp;
|
|
|
|
u32 tx_dropped;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct mlxsw_sx_port {
|
|
|
|
struct net_device *dev;
|
|
|
|
struct mlxsw_sx_port_pcpu_stats __percpu *pcpu_stats;
|
|
|
|
struct mlxsw_sx *mlxsw_sx;
|
|
|
|
u8 local_port;
|
2016-10-29 03:35:50 +08:00
|
|
|
struct {
|
|
|
|
u8 module;
|
|
|
|
} mapping;
|
2015-07-30 05:33:49 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
/* tx_hdr_version
|
|
|
|
* Tx header version.
|
|
|
|
* Must be set to 0.
|
|
|
|
*/
|
|
|
|
MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
|
|
|
|
|
|
|
|
/* tx_hdr_ctl
|
|
|
|
* Packet control type.
|
|
|
|
* 0 - Ethernet control (e.g. EMADs, LACP)
|
|
|
|
* 1 - Ethernet data
|
|
|
|
*/
|
|
|
|
MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
|
|
|
|
|
|
|
|
/* tx_hdr_proto
|
|
|
|
* Packet protocol type. Must be set to 1 (Ethernet).
|
|
|
|
*/
|
|
|
|
MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
|
|
|
|
|
|
|
|
/* tx_hdr_etclass
|
|
|
|
* Egress TClass to be used on the egress device on the egress port.
|
|
|
|
* The MSB is specified in the 'ctclass3' field.
|
|
|
|
* Range is 0-15, where 15 is the highest priority.
|
|
|
|
*/
|
|
|
|
MLXSW_ITEM32(tx, hdr, etclass, 0x00, 18, 3);
|
|
|
|
|
|
|
|
/* tx_hdr_swid
|
|
|
|
* Switch partition ID.
|
|
|
|
*/
|
|
|
|
MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
|
|
|
|
|
|
|
|
/* tx_hdr_port_mid
|
|
|
|
* Destination local port for unicast packets.
|
|
|
|
* Destination multicast ID for multicast packets.
|
|
|
|
*
|
|
|
|
* Control packets are directed to a specific egress port, while data
|
|
|
|
* packets are transmitted through the CPU port (0) into the switch partition,
|
|
|
|
* where forwarding rules are applied.
|
|
|
|
*/
|
|
|
|
MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
|
|
|
|
|
|
|
|
/* tx_hdr_ctclass3
|
|
|
|
* See field 'etclass'.
|
|
|
|
*/
|
|
|
|
MLXSW_ITEM32(tx, hdr, ctclass3, 0x04, 14, 1);
|
|
|
|
|
|
|
|
/* tx_hdr_rdq
|
|
|
|
* RDQ for control packets sent to remote CPU.
|
|
|
|
* Must be set to 0x1F for EMADs, otherwise 0.
|
|
|
|
*/
|
|
|
|
MLXSW_ITEM32(tx, hdr, rdq, 0x04, 9, 5);
|
|
|
|
|
|
|
|
/* tx_hdr_cpu_sig
|
|
|
|
* Signature control for packets going to CPU. Must be set to 0.
|
|
|
|
*/
|
|
|
|
MLXSW_ITEM32(tx, hdr, cpu_sig, 0x04, 0, 9);
|
|
|
|
|
|
|
|
/* tx_hdr_sig
|
|
|
|
* Stacking protocl signature. Must be set to 0xE0E0.
|
|
|
|
*/
|
|
|
|
MLXSW_ITEM32(tx, hdr, sig, 0x0C, 16, 16);
|
|
|
|
|
|
|
|
/* tx_hdr_stclass
|
|
|
|
* Stacking TClass.
|
|
|
|
*/
|
|
|
|
MLXSW_ITEM32(tx, hdr, stclass, 0x0C, 13, 3);
|
|
|
|
|
|
|
|
/* tx_hdr_emad
|
|
|
|
* EMAD bit. Must be set for EMADs.
|
|
|
|
*/
|
|
|
|
MLXSW_ITEM32(tx, hdr, emad, 0x0C, 5, 1);
|
|
|
|
|
|
|
|
/* tx_hdr_type
|
|
|
|
* 0 - Data packets
|
|
|
|
* 6 - Control packets
|
|
|
|
*/
|
|
|
|
MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
|
|
|
|
|
|
|
|
static void mlxsw_sx_txhdr_construct(struct sk_buff *skb,
|
|
|
|
const struct mlxsw_tx_info *tx_info)
|
|
|
|
{
|
|
|
|
char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
|
|
|
|
bool is_emad = tx_info->is_emad;
|
|
|
|
|
|
|
|
memset(txhdr, 0, MLXSW_TXHDR_LEN);
|
|
|
|
|
|
|
|
/* We currently set default values for the egress tclass (QoS). */
|
|
|
|
mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_0);
|
|
|
|
mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
|
|
|
|
mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
|
|
|
|
mlxsw_tx_hdr_etclass_set(txhdr, is_emad ? MLXSW_TXHDR_ETCLASS_6 :
|
|
|
|
MLXSW_TXHDR_ETCLASS_5);
|
|
|
|
mlxsw_tx_hdr_swid_set(txhdr, 0);
|
|
|
|
mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
|
|
|
|
mlxsw_tx_hdr_ctclass3_set(txhdr, MLXSW_TXHDR_CTCLASS3);
|
|
|
|
mlxsw_tx_hdr_rdq_set(txhdr, is_emad ? MLXSW_TXHDR_RDQ_EMAD :
|
|
|
|
MLXSW_TXHDR_RDQ_OTHER);
|
|
|
|
mlxsw_tx_hdr_cpu_sig_set(txhdr, MLXSW_TXHDR_CPU_SIG);
|
|
|
|
mlxsw_tx_hdr_sig_set(txhdr, MLXSW_TXHDR_SIG);
|
|
|
|
mlxsw_tx_hdr_stclass_set(txhdr, MLXSW_TXHDR_STCLASS_NONE);
|
|
|
|
mlxsw_tx_hdr_emad_set(txhdr, is_emad ? MLXSW_TXHDR_EMAD :
|
|
|
|
MLXSW_TXHDR_NOT_EMAD);
|
|
|
|
mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mlxsw_sx_port_admin_status_set(struct mlxsw_sx_port *mlxsw_sx_port,
|
|
|
|
bool is_up)
|
|
|
|
{
|
|
|
|
struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
|
|
|
|
char paos_pl[MLXSW_REG_PAOS_LEN];
|
|
|
|
|
|
|
|
mlxsw_reg_paos_pack(paos_pl, mlxsw_sx_port->local_port,
|
|
|
|
is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
|
|
|
|
MLXSW_PORT_ADMIN_STATUS_DOWN);
|
|
|
|
return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(paos), paos_pl);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mlxsw_sx_port_oper_status_get(struct mlxsw_sx_port *mlxsw_sx_port,
|
|
|
|
bool *p_is_up)
|
|
|
|
{
|
|
|
|
struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
|
|
|
|
char paos_pl[MLXSW_REG_PAOS_LEN];
|
|
|
|
u8 oper_status;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
mlxsw_reg_paos_pack(paos_pl, mlxsw_sx_port->local_port, 0);
|
|
|
|
err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(paos), paos_pl);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
oper_status = mlxsw_reg_paos_oper_status_get(paos_pl);
|
2020-03-27 16:55:24 +08:00
|
|
|
*p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP;
|
2015-07-30 05:33:49 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-10-29 03:36:00 +08:00
|
|
|
static int __mlxsw_sx_port_mtu_set(struct mlxsw_sx_port *mlxsw_sx_port,
|
|
|
|
u16 mtu)
|
2015-07-30 05:33:49 +08:00
|
|
|
{
|
|
|
|
struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
|
|
|
|
char pmtu_pl[MLXSW_REG_PMTU_LEN];
|
|
|
|
int max_mtu;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sx_port->local_port, 0);
|
|
|
|
err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(pmtu), pmtu_pl);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
|
|
|
|
|
|
|
|
if (mtu > max_mtu)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sx_port->local_port, mtu);
|
|
|
|
return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(pmtu), pmtu_pl);
|
|
|
|
}
|
|
|
|
|
2016-10-29 03:36:00 +08:00
|
|
|
static int mlxsw_sx_port_mtu_eth_set(struct mlxsw_sx_port *mlxsw_sx_port,
|
|
|
|
u16 mtu)
|
|
|
|
{
|
|
|
|
mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
|
|
|
|
return __mlxsw_sx_port_mtu_set(mlxsw_sx_port, mtu);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mlxsw_sx_port_mtu_ib_set(struct mlxsw_sx_port *mlxsw_sx_port,
|
|
|
|
u16 mtu)
|
|
|
|
{
|
|
|
|
return __mlxsw_sx_port_mtu_set(mlxsw_sx_port, mtu);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mlxsw_sx_port_ib_port_set(struct mlxsw_sx_port *mlxsw_sx_port,
|
|
|
|
u8 ib_port)
|
|
|
|
{
|
|
|
|
struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
|
|
|
|
char plib_pl[MLXSW_REG_PLIB_LEN] = {0};
|
|
|
|
int err;
|
|
|
|
|
|
|
|
mlxsw_reg_plib_local_port_set(plib_pl, mlxsw_sx_port->local_port);
|
|
|
|
mlxsw_reg_plib_ib_port_set(plib_pl, ib_port);
|
|
|
|
err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(plib), plib_pl);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2015-07-30 05:33:49 +08:00
|
|
|
static int mlxsw_sx_port_swid_set(struct mlxsw_sx_port *mlxsw_sx_port, u8 swid)
|
|
|
|
{
|
|
|
|
struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
|
|
|
|
char pspa_pl[MLXSW_REG_PSPA_LEN];
|
|
|
|
|
|
|
|
mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sx_port->local_port);
|
|
|
|
return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(pspa), pspa_pl);
|
|
|
|
}
|
|
|
|
|
2015-08-06 22:41:53 +08:00
|
|
|
static int
|
|
|
|
mlxsw_sx_port_system_port_mapping_set(struct mlxsw_sx_port *mlxsw_sx_port)
|
|
|
|
{
|
|
|
|
struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
|
|
|
|
char sspr_pl[MLXSW_REG_SSPR_LEN];
|
|
|
|
|
|
|
|
mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sx_port->local_port);
|
|
|
|
return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sspr), sspr_pl);
|
|
|
|
}
|
|
|
|
|
2016-10-29 03:35:47 +08:00
|
|
|
static int mlxsw_sx_port_module_info_get(struct mlxsw_sx *mlxsw_sx,
|
2016-10-29 03:35:50 +08:00
|
|
|
u8 local_port, u8 *p_module,
|
|
|
|
u8 *p_width)
|
2015-07-30 05:33:49 +08:00
|
|
|
{
|
|
|
|
char pmlp_pl[MLXSW_REG_PMLP_LEN];
|
|
|
|
int err;
|
|
|
|
|
2016-10-29 03:35:47 +08:00
|
|
|
mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
|
2015-07-30 05:33:49 +08:00
|
|
|
err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(pmlp), pmlp_pl);
|
|
|
|
if (err)
|
|
|
|
return err;
|
2016-10-29 03:35:50 +08:00
|
|
|
*p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
|
2016-10-29 03:35:47 +08:00
|
|
|
*p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
|
2015-07-30 05:33:49 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mlxsw_sx_port_open(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, true);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
netif_start_queue(dev);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mlxsw_sx_port_stop(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
|
|
|
|
|
|
|
|
netif_stop_queue(dev);
|
|
|
|
return mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb,
|
|
|
|
struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
|
|
|
|
struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
|
|
|
|
struct mlxsw_sx_port_pcpu_stats *pcpu_stats;
|
|
|
|
const struct mlxsw_tx_info tx_info = {
|
|
|
|
.local_port = mlxsw_sx_port->local_port,
|
|
|
|
.is_emad = false,
|
|
|
|
};
|
2015-08-06 22:41:58 +08:00
|
|
|
u64 len;
|
2015-07-30 05:33:49 +08:00
|
|
|
int err;
|
|
|
|
|
2020-01-15 19:53:46 +08:00
|
|
|
if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
|
|
|
|
this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped);
|
|
|
|
dev_kfree_skb_any(skb);
|
|
|
|
return NETDEV_TX_OK;
|
|
|
|
}
|
|
|
|
|
2019-06-30 14:04:52 +08:00
|
|
|
memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb));
|
|
|
|
|
2016-04-09 01:11:22 +08:00
|
|
|
if (mlxsw_core_skb_transmit_busy(mlxsw_sx->core, &tx_info))
|
2015-08-06 22:41:56 +08:00
|
|
|
return NETDEV_TX_BUSY;
|
|
|
|
|
2015-07-30 05:33:49 +08:00
|
|
|
mlxsw_sx_txhdr_construct(skb, &tx_info);
|
2016-06-17 21:09:06 +08:00
|
|
|
/* TX header is consumed by HW on the way so we shouldn't count its
|
|
|
|
* bytes as being sent.
|
|
|
|
*/
|
|
|
|
len = skb->len - MLXSW_TXHDR_LEN;
|
2015-08-06 22:41:56 +08:00
|
|
|
/* Due to a race we might fail here because of a full queue. In that
|
|
|
|
* unlikely case we simply drop the packet.
|
|
|
|
*/
|
2016-04-09 01:11:22 +08:00
|
|
|
err = mlxsw_core_skb_transmit(mlxsw_sx->core, skb, &tx_info);
|
2015-07-30 05:33:49 +08:00
|
|
|
|
|
|
|
if (!err) {
|
|
|
|
pcpu_stats = this_cpu_ptr(mlxsw_sx_port->pcpu_stats);
|
|
|
|
u64_stats_update_begin(&pcpu_stats->syncp);
|
|
|
|
pcpu_stats->tx_packets++;
|
2015-08-06 22:41:58 +08:00
|
|
|
pcpu_stats->tx_bytes += len;
|
2015-07-30 05:33:49 +08:00
|
|
|
u64_stats_update_end(&pcpu_stats->syncp);
|
|
|
|
} else {
|
|
|
|
this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped);
|
|
|
|
dev_kfree_skb_any(skb);
|
|
|
|
}
|
|
|
|
return NETDEV_TX_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mlxsw_sx_port_change_mtu(struct net_device *dev, int mtu)
|
|
|
|
{
|
|
|
|
struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
|
|
|
|
int err;
|
|
|
|
|
2016-10-29 03:36:00 +08:00
|
|
|
err = mlxsw_sx_port_mtu_eth_set(mlxsw_sx_port, mtu);
|
2015-07-30 05:33:49 +08:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
dev->mtu = mtu;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-01-07 11:12:52 +08:00
|
|
|
static void
|
2015-07-30 05:33:49 +08:00
|
|
|
mlxsw_sx_port_get_stats64(struct net_device *dev,
|
|
|
|
struct rtnl_link_stats64 *stats)
|
|
|
|
{
|
|
|
|
struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
|
|
|
|
struct mlxsw_sx_port_pcpu_stats *p;
|
|
|
|
u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
|
|
|
|
u32 tx_dropped = 0;
|
|
|
|
unsigned int start;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for_each_possible_cpu(i) {
|
|
|
|
p = per_cpu_ptr(mlxsw_sx_port->pcpu_stats, i);
|
|
|
|
do {
|
|
|
|
start = u64_stats_fetch_begin_irq(&p->syncp);
|
|
|
|
rx_packets = p->rx_packets;
|
|
|
|
rx_bytes = p->rx_bytes;
|
|
|
|
tx_packets = p->tx_packets;
|
|
|
|
tx_bytes = p->tx_bytes;
|
|
|
|
} while (u64_stats_fetch_retry_irq(&p->syncp, start));
|
|
|
|
|
|
|
|
stats->rx_packets += rx_packets;
|
|
|
|
stats->rx_bytes += rx_bytes;
|
|
|
|
stats->tx_packets += tx_packets;
|
|
|
|
stats->tx_bytes += tx_bytes;
|
|
|
|
/* tx_dropped is u32, updated without syncp protection. */
|
|
|
|
tx_dropped += p->tx_dropped;
|
|
|
|
}
|
|
|
|
stats->tx_dropped = tx_dropped;
|
|
|
|
}
|
|
|
|
|
2019-03-28 20:56:38 +08:00
|
|
|
static struct devlink_port *
|
|
|
|
mlxsw_sx_port_get_devlink_port(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
|
|
|
|
struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
|
|
|
|
|
|
|
|
return mlxsw_core_port_devlink_port_get(mlxsw_sx->core,
|
|
|
|
mlxsw_sx_port->local_port);
|
|
|
|
}
|
|
|
|
|
2015-07-30 05:33:49 +08:00
|
|
|
static const struct net_device_ops mlxsw_sx_port_netdev_ops = {
|
|
|
|
.ndo_open = mlxsw_sx_port_open,
|
|
|
|
.ndo_stop = mlxsw_sx_port_stop,
|
|
|
|
.ndo_start_xmit = mlxsw_sx_port_xmit,
|
|
|
|
.ndo_change_mtu = mlxsw_sx_port_change_mtu,
|
|
|
|
.ndo_get_stats64 = mlxsw_sx_port_get_stats64,
|
2019-03-28 20:56:38 +08:00
|
|
|
.ndo_get_devlink_port = mlxsw_sx_port_get_devlink_port,
|
2015-07-30 05:33:49 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static void mlxsw_sx_port_get_drvinfo(struct net_device *dev,
|
|
|
|
struct ethtool_drvinfo *drvinfo)
|
|
|
|
{
|
|
|
|
struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
|
|
|
|
struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
|
|
|
|
|
|
|
|
strlcpy(drvinfo->driver, mlxsw_sx_driver_name, sizeof(drvinfo->driver));
|
|
|
|
strlcpy(drvinfo->version, mlxsw_sx_driver_version,
|
|
|
|
sizeof(drvinfo->version));
|
|
|
|
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
|
|
|
|
"%d.%d.%d",
|
|
|
|
mlxsw_sx->bus_info->fw_rev.major,
|
|
|
|
mlxsw_sx->bus_info->fw_rev.minor,
|
|
|
|
mlxsw_sx->bus_info->fw_rev.subminor);
|
|
|
|
strlcpy(drvinfo->bus_info, mlxsw_sx->bus_info->device_name,
|
|
|
|
sizeof(drvinfo->bus_info));
|
|
|
|
}
|
|
|
|
|
|
|
|
struct mlxsw_sx_port_hw_stats {
|
|
|
|
char str[ETH_GSTRING_LEN];
|
2016-10-21 22:07:19 +08:00
|
|
|
u64 (*getter)(const char *payload);
|
2015-07-30 05:33:49 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static const struct mlxsw_sx_port_hw_stats mlxsw_sx_port_hw_stats[] = {
|
|
|
|
{
|
|
|
|
.str = "a_frames_transmitted_ok",
|
|
|
|
.getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.str = "a_frames_received_ok",
|
|
|
|
.getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.str = "a_frame_check_sequence_errors",
|
|
|
|
.getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.str = "a_alignment_errors",
|
|
|
|
.getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.str = "a_octets_transmitted_ok",
|
|
|
|
.getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.str = "a_octets_received_ok",
|
|
|
|
.getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.str = "a_multicast_frames_xmitted_ok",
|
|
|
|
.getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.str = "a_broadcast_frames_xmitted_ok",
|
|
|
|
.getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.str = "a_multicast_frames_received_ok",
|
|
|
|
.getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.str = "a_broadcast_frames_received_ok",
|
|
|
|
.getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.str = "a_in_range_length_errors",
|
|
|
|
.getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.str = "a_out_of_range_length_field",
|
|
|
|
.getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.str = "a_frame_too_long_errors",
|
|
|
|
.getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.str = "a_symbol_error_during_carrier",
|
|
|
|
.getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.str = "a_mac_control_frames_transmitted",
|
|
|
|
.getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.str = "a_mac_control_frames_received",
|
|
|
|
.getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.str = "a_unsupported_opcodes_received",
|
|
|
|
.getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.str = "a_pause_mac_ctrl_frames_received",
|
|
|
|
.getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.str = "a_pause_mac_ctrl_frames_xmitted",
|
|
|
|
.getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
#define MLXSW_SX_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sx_port_hw_stats)
|
|
|
|
|
|
|
|
static void mlxsw_sx_port_get_strings(struct net_device *dev,
|
|
|
|
u32 stringset, u8 *data)
|
|
|
|
{
|
|
|
|
u8 *p = data;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
switch (stringset) {
|
|
|
|
case ETH_SS_STATS:
|
|
|
|
for (i = 0; i < MLXSW_SX_PORT_HW_STATS_LEN; i++) {
|
|
|
|
memcpy(p, mlxsw_sx_port_hw_stats[i].str,
|
|
|
|
ETH_GSTRING_LEN);
|
|
|
|
p += ETH_GSTRING_LEN;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mlxsw_sx_port_get_stats(struct net_device *dev,
|
|
|
|
struct ethtool_stats *stats, u64 *data)
|
|
|
|
{
|
|
|
|
struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
|
|
|
|
struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
|
|
|
|
char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
|
|
|
|
int i;
|
|
|
|
int err;
|
|
|
|
|
2016-04-06 23:10:15 +08:00
|
|
|
mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sx_port->local_port,
|
|
|
|
MLXSW_REG_PPCNT_IEEE_8023_CNT, 0);
|
2015-07-30 05:33:49 +08:00
|
|
|
err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ppcnt), ppcnt_pl);
|
|
|
|
for (i = 0; i < MLXSW_SX_PORT_HW_STATS_LEN; i++)
|
|
|
|
data[i] = !err ? mlxsw_sx_port_hw_stats[i].getter(ppcnt_pl) : 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mlxsw_sx_port_get_sset_count(struct net_device *dev, int sset)
|
|
|
|
{
|
|
|
|
switch (sset) {
|
|
|
|
case ETH_SS_STATS:
|
|
|
|
return MLXSW_SX_PORT_HW_STATS_LEN;
|
|
|
|
default:
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
struct mlxsw_sx_port_link_mode {
|
|
|
|
u32 mask;
|
|
|
|
u32 supported;
|
|
|
|
u32 advertised;
|
|
|
|
u32 speed;
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct mlxsw_sx_port_link_mode mlxsw_sx_port_link_mode[] = {
|
|
|
|
{
|
|
|
|
.mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
|
|
|
|
.supported = SUPPORTED_100baseT_Full,
|
|
|
|
.advertised = ADVERTISED_100baseT_Full,
|
|
|
|
.speed = 100,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX,
|
|
|
|
.speed = 100,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
|
|
|
|
MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
|
|
|
|
.supported = SUPPORTED_1000baseKX_Full,
|
|
|
|
.advertised = ADVERTISED_1000baseKX_Full,
|
|
|
|
.speed = 1000,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
|
|
|
|
.supported = SUPPORTED_10000baseT_Full,
|
|
|
|
.advertised = ADVERTISED_10000baseT_Full,
|
|
|
|
.speed = 10000,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
|
|
|
|
MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
|
|
|
|
.supported = SUPPORTED_10000baseKX4_Full,
|
|
|
|
.advertised = ADVERTISED_10000baseKX4_Full,
|
|
|
|
.speed = 10000,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
|
|
|
|
MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
|
|
|
|
MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
|
|
|
|
MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
|
|
|
|
.supported = SUPPORTED_10000baseKR_Full,
|
|
|
|
.advertised = ADVERTISED_10000baseKR_Full,
|
|
|
|
.speed = 10000,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
|
|
|
|
.supported = SUPPORTED_20000baseKR2_Full,
|
|
|
|
.advertised = ADVERTISED_20000baseKR2_Full,
|
|
|
|
.speed = 20000,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
|
|
|
|
.supported = SUPPORTED_40000baseCR4_Full,
|
|
|
|
.advertised = ADVERTISED_40000baseCR4_Full,
|
|
|
|
.speed = 40000,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
|
|
|
|
.supported = SUPPORTED_40000baseKR4_Full,
|
|
|
|
.advertised = ADVERTISED_40000baseKR4_Full,
|
|
|
|
.speed = 40000,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
|
|
|
|
.supported = SUPPORTED_40000baseSR4_Full,
|
|
|
|
.advertised = ADVERTISED_40000baseSR4_Full,
|
|
|
|
.speed = 40000,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
|
|
|
|
.supported = SUPPORTED_40000baseLR4_Full,
|
|
|
|
.advertised = ADVERTISED_40000baseLR4_Full,
|
|
|
|
.speed = 40000,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR |
|
|
|
|
MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR |
|
|
|
|
MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
|
|
|
|
.speed = 25000,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 |
|
|
|
|
MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 |
|
|
|
|
MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
|
|
|
|
.speed = 50000,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 |
|
|
|
|
MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
|
|
|
|
MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
|
|
|
|
MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
|
|
|
|
.speed = 100000,
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
#define MLXSW_SX_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sx_port_link_mode)
|
2016-10-29 03:35:51 +08:00
|
|
|
#define MLXSW_SX_PORT_BASE_SPEED 10000 /* Mb/s */
|
2015-07-30 05:33:49 +08:00
|
|
|
|
|
|
|
static u32 mlxsw_sx_from_ptys_supported_port(u32 ptys_eth_proto)
|
|
|
|
{
|
|
|
|
if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
|
|
|
|
MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
|
|
|
|
MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
|
|
|
|
MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
|
|
|
|
MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
|
|
|
|
MLXSW_REG_PTYS_ETH_SPEED_SGMII))
|
|
|
|
return SUPPORTED_FIBRE;
|
|
|
|
|
|
|
|
if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
|
|
|
|
MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
|
|
|
|
MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
|
|
|
|
MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
|
|
|
|
MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
|
|
|
|
return SUPPORTED_Backplane;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static u32 mlxsw_sx_from_ptys_supported_link(u32 ptys_eth_proto)
|
|
|
|
{
|
|
|
|
u32 modes = 0;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
|
|
|
|
if (ptys_eth_proto & mlxsw_sx_port_link_mode[i].mask)
|
|
|
|
modes |= mlxsw_sx_port_link_mode[i].supported;
|
|
|
|
}
|
|
|
|
return modes;
|
|
|
|
}
|
|
|
|
|
|
|
|
static u32 mlxsw_sx_from_ptys_advert_link(u32 ptys_eth_proto)
|
|
|
|
{
|
|
|
|
u32 modes = 0;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
|
|
|
|
if (ptys_eth_proto & mlxsw_sx_port_link_mode[i].mask)
|
|
|
|
modes |= mlxsw_sx_port_link_mode[i].advertised;
|
|
|
|
}
|
|
|
|
return modes;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mlxsw_sx_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
|
2017-02-08 07:07:33 +08:00
|
|
|
struct ethtool_link_ksettings *cmd)
|
2015-07-30 05:33:49 +08:00
|
|
|
{
|
|
|
|
u32 speed = SPEED_UNKNOWN;
|
|
|
|
u8 duplex = DUPLEX_UNKNOWN;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!carrier_ok)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
|
|
|
|
if (ptys_eth_proto & mlxsw_sx_port_link_mode[i].mask) {
|
|
|
|
speed = mlxsw_sx_port_link_mode[i].speed;
|
|
|
|
duplex = DUPLEX_FULL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
out:
|
2017-02-08 07:07:33 +08:00
|
|
|
cmd->base.speed = speed;
|
|
|
|
cmd->base.duplex = duplex;
|
2015-07-30 05:33:49 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static u8 mlxsw_sx_port_connector_port(u32 ptys_eth_proto)
|
|
|
|
{
|
|
|
|
if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
|
|
|
|
MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
|
|
|
|
MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
|
|
|
|
MLXSW_REG_PTYS_ETH_SPEED_SGMII))
|
|
|
|
return PORT_FIBRE;
|
|
|
|
|
|
|
|
if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
|
|
|
|
MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
|
|
|
|
MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
|
|
|
|
return PORT_DA;
|
|
|
|
|
|
|
|
if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
|
|
|
|
MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
|
|
|
|
MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
|
|
|
|
MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
|
|
|
|
return PORT_NONE;
|
|
|
|
|
|
|
|
return PORT_OTHER;
|
|
|
|
}
|
|
|
|
|
2017-02-08 07:07:33 +08:00
|
|
|
static int
|
|
|
|
mlxsw_sx_port_get_link_ksettings(struct net_device *dev,
|
|
|
|
struct ethtool_link_ksettings *cmd)
|
2015-07-30 05:33:49 +08:00
|
|
|
{
|
|
|
|
struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
|
|
|
|
struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
|
|
|
|
char ptys_pl[MLXSW_REG_PTYS_LEN];
|
|
|
|
u32 eth_proto_cap;
|
|
|
|
u32 eth_proto_admin;
|
|
|
|
u32 eth_proto_oper;
|
2017-02-08 07:07:33 +08:00
|
|
|
u32 supported, advertising, lp_advertising;
|
2015-07-30 05:33:49 +08:00
|
|
|
int err;
|
|
|
|
|
2018-03-21 15:34:06 +08:00
|
|
|
mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port, 0, false);
|
2015-07-30 05:33:49 +08:00
|
|
|
err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
|
|
|
|
if (err) {
|
|
|
|
netdev_err(dev, "Failed to get proto");
|
|
|
|
return err;
|
|
|
|
}
|
2016-10-29 03:35:52 +08:00
|
|
|
mlxsw_reg_ptys_eth_unpack(ptys_pl, ð_proto_cap,
|
|
|
|
ð_proto_admin, ð_proto_oper);
|
2015-07-30 05:33:49 +08:00
|
|
|
|
2017-02-08 07:07:33 +08:00
|
|
|
supported = mlxsw_sx_from_ptys_supported_port(eth_proto_cap) |
|
2015-07-30 05:33:49 +08:00
|
|
|
mlxsw_sx_from_ptys_supported_link(eth_proto_cap) |
|
|
|
|
SUPPORTED_Pause | SUPPORTED_Asym_Pause;
|
2017-02-08 07:07:33 +08:00
|
|
|
advertising = mlxsw_sx_from_ptys_advert_link(eth_proto_admin);
|
2015-07-30 05:33:49 +08:00
|
|
|
mlxsw_sx_from_ptys_speed_duplex(netif_carrier_ok(dev),
|
|
|
|
eth_proto_oper, cmd);
|
|
|
|
|
|
|
|
eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
|
2017-02-08 07:07:33 +08:00
|
|
|
cmd->base.port = mlxsw_sx_port_connector_port(eth_proto_oper);
|
|
|
|
lp_advertising = mlxsw_sx_from_ptys_advert_link(eth_proto_oper);
|
|
|
|
|
|
|
|
ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
|
|
|
|
supported);
|
|
|
|
ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
|
|
|
|
advertising);
|
|
|
|
ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
|
|
|
|
lp_advertising);
|
2015-07-30 05:33:49 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static u32 mlxsw_sx_to_ptys_advert_link(u32 advertising)
|
|
|
|
{
|
|
|
|
u32 ptys_proto = 0;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
|
|
|
|
if (advertising & mlxsw_sx_port_link_mode[i].advertised)
|
|
|
|
ptys_proto |= mlxsw_sx_port_link_mode[i].mask;
|
|
|
|
}
|
|
|
|
return ptys_proto;
|
|
|
|
}
|
|
|
|
|
|
|
|
static u32 mlxsw_sx_to_ptys_speed(u32 speed)
|
|
|
|
{
|
|
|
|
u32 ptys_proto = 0;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
|
|
|
|
if (speed == mlxsw_sx_port_link_mode[i].speed)
|
|
|
|
ptys_proto |= mlxsw_sx_port_link_mode[i].mask;
|
|
|
|
}
|
|
|
|
return ptys_proto;
|
|
|
|
}
|
|
|
|
|
2016-10-29 03:35:51 +08:00
|
|
|
static u32 mlxsw_sx_to_ptys_upper_speed(u32 upper_speed)
|
|
|
|
{
|
|
|
|
u32 ptys_proto = 0;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
|
|
|
|
if (mlxsw_sx_port_link_mode[i].speed <= upper_speed)
|
|
|
|
ptys_proto |= mlxsw_sx_port_link_mode[i].mask;
|
|
|
|
}
|
|
|
|
return ptys_proto;
|
|
|
|
}
|
|
|
|
|
2017-02-08 07:07:33 +08:00
|
|
|
static int
|
|
|
|
mlxsw_sx_port_set_link_ksettings(struct net_device *dev,
|
|
|
|
const struct ethtool_link_ksettings *cmd)
|
2015-07-30 05:33:49 +08:00
|
|
|
{
|
|
|
|
struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
|
|
|
|
struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
|
|
|
|
char ptys_pl[MLXSW_REG_PTYS_LEN];
|
|
|
|
u32 speed;
|
|
|
|
u32 eth_proto_new;
|
|
|
|
u32 eth_proto_cap;
|
|
|
|
u32 eth_proto_admin;
|
2017-02-08 07:07:33 +08:00
|
|
|
u32 advertising;
|
2015-07-30 05:33:49 +08:00
|
|
|
bool is_up;
|
|
|
|
int err;
|
|
|
|
|
2017-02-08 07:07:33 +08:00
|
|
|
speed = cmd->base.speed;
|
|
|
|
|
|
|
|
ethtool_convert_link_mode_to_legacy_u32(&advertising,
|
|
|
|
cmd->link_modes.advertising);
|
2015-07-30 05:33:49 +08:00
|
|
|
|
2017-02-08 07:07:33 +08:00
|
|
|
eth_proto_new = cmd->base.autoneg == AUTONEG_ENABLE ?
|
|
|
|
mlxsw_sx_to_ptys_advert_link(advertising) :
|
2015-07-30 05:33:49 +08:00
|
|
|
mlxsw_sx_to_ptys_speed(speed);
|
|
|
|
|
2018-03-21 15:34:06 +08:00
|
|
|
mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port, 0, false);
|
2015-07-30 05:33:49 +08:00
|
|
|
err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
|
|
|
|
if (err) {
|
|
|
|
netdev_err(dev, "Failed to get proto");
|
|
|
|
return err;
|
|
|
|
}
|
2016-10-29 03:35:52 +08:00
|
|
|
mlxsw_reg_ptys_eth_unpack(ptys_pl, ð_proto_cap, ð_proto_admin,
|
|
|
|
NULL);
|
2015-07-30 05:33:49 +08:00
|
|
|
|
|
|
|
eth_proto_new = eth_proto_new & eth_proto_cap;
|
|
|
|
if (!eth_proto_new) {
|
|
|
|
netdev_err(dev, "Not supported proto admin requested");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
if (eth_proto_new == eth_proto_admin)
|
|
|
|
return 0;
|
|
|
|
|
2016-10-29 03:35:52 +08:00
|
|
|
mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port,
|
2018-03-21 15:34:06 +08:00
|
|
|
eth_proto_new, true);
|
2015-07-30 05:33:49 +08:00
|
|
|
err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
|
|
|
|
if (err) {
|
|
|
|
netdev_err(dev, "Failed to set proto admin");
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = mlxsw_sx_port_oper_status_get(mlxsw_sx_port, &is_up);
|
|
|
|
if (err) {
|
|
|
|
netdev_err(dev, "Failed to get oper status");
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
if (!is_up)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false);
|
|
|
|
if (err) {
|
|
|
|
netdev_err(dev, "Failed to set admin status");
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, true);
|
|
|
|
if (err) {
|
|
|
|
netdev_err(dev, "Failed to set admin status");
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct ethtool_ops mlxsw_sx_port_ethtool_ops = {
|
|
|
|
.get_drvinfo = mlxsw_sx_port_get_drvinfo,
|
|
|
|
.get_link = ethtool_op_get_link,
|
|
|
|
.get_strings = mlxsw_sx_port_get_strings,
|
|
|
|
.get_ethtool_stats = mlxsw_sx_port_get_stats,
|
|
|
|
.get_sset_count = mlxsw_sx_port_get_sset_count,
|
2017-02-08 07:07:33 +08:00
|
|
|
.get_link_ksettings = mlxsw_sx_port_get_link_ksettings,
|
|
|
|
.set_link_ksettings = mlxsw_sx_port_set_link_ksettings,
|
2015-07-30 05:33:49 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static int mlxsw_sx_hw_id_get(struct mlxsw_sx *mlxsw_sx)
|
|
|
|
{
|
2016-10-29 03:35:46 +08:00
|
|
|
char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
|
2015-07-30 05:33:49 +08:00
|
|
|
int err;
|
|
|
|
|
|
|
|
err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(spad), spad_pl);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sx->hw_id);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mlxsw_sx_port_dev_addr_get(struct mlxsw_sx_port *mlxsw_sx_port)
|
|
|
|
{
|
|
|
|
struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
|
|
|
|
struct net_device *dev = mlxsw_sx_port->dev;
|
|
|
|
char ppad_pl[MLXSW_REG_PPAD_LEN];
|
|
|
|
int err;
|
|
|
|
|
|
|
|
mlxsw_reg_ppad_pack(ppad_pl, false, 0);
|
|
|
|
err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ppad), ppad_pl);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
mlxsw_reg_ppad_mac_memcpy_from(ppad_pl, dev->dev_addr);
|
|
|
|
/* The last byte value in base mac address is guaranteed
|
|
|
|
* to be such it does not overflow when adding local_port
|
|
|
|
* value.
|
|
|
|
*/
|
|
|
|
dev->dev_addr[ETH_ALEN - 1] += mlxsw_sx_port->local_port;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mlxsw_sx_port_stp_state_set(struct mlxsw_sx_port *mlxsw_sx_port,
|
|
|
|
u16 vid, enum mlxsw_reg_spms_state state)
|
|
|
|
{
|
|
|
|
struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
|
|
|
|
char *spms_pl;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
|
|
|
|
if (!spms_pl)
|
|
|
|
return -ENOMEM;
|
2015-10-15 23:43:26 +08:00
|
|
|
mlxsw_reg_spms_pack(spms_pl, mlxsw_sx_port->local_port);
|
|
|
|
mlxsw_reg_spms_vid_pack(spms_pl, vid, state);
|
2015-07-30 05:33:49 +08:00
|
|
|
err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(spms), spms_pl);
|
|
|
|
kfree(spms_pl);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2016-10-29 03:36:00 +08:00
|
|
|
static int mlxsw_sx_port_ib_speed_set(struct mlxsw_sx_port *mlxsw_sx_port,
|
|
|
|
u16 speed, u16 width)
|
|
|
|
{
|
|
|
|
struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
|
|
|
|
char ptys_pl[MLXSW_REG_PTYS_LEN];
|
|
|
|
|
|
|
|
mlxsw_reg_ptys_ib_pack(ptys_pl, mlxsw_sx_port->local_port, speed,
|
|
|
|
width);
|
|
|
|
return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
|
|
|
|
}
|
|
|
|
|
2016-10-29 03:35:51 +08:00
|
|
|
static int
|
|
|
|
mlxsw_sx_port_speed_by_width_set(struct mlxsw_sx_port *mlxsw_sx_port, u8 width)
|
2015-07-30 05:33:49 +08:00
|
|
|
{
|
|
|
|
struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
|
2016-10-29 03:35:51 +08:00
|
|
|
u32 upper_speed = MLXSW_SX_PORT_BASE_SPEED * width;
|
2015-07-30 05:33:49 +08:00
|
|
|
char ptys_pl[MLXSW_REG_PTYS_LEN];
|
2016-10-29 03:35:51 +08:00
|
|
|
u32 eth_proto_admin;
|
2015-07-30 05:33:49 +08:00
|
|
|
|
2016-10-29 03:35:51 +08:00
|
|
|
eth_proto_admin = mlxsw_sx_to_ptys_upper_speed(upper_speed);
|
2016-10-29 03:35:52 +08:00
|
|
|
mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port,
|
2018-03-21 15:34:06 +08:00
|
|
|
eth_proto_admin, true);
|
2015-07-30 05:33:49 +08:00
|
|
|
return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
mlxsw_sx_port_mac_learning_mode_set(struct mlxsw_sx_port *mlxsw_sx_port,
|
|
|
|
enum mlxsw_reg_spmlr_learn_mode mode)
|
|
|
|
{
|
|
|
|
struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
|
|
|
|
char spmlr_pl[MLXSW_REG_SPMLR_LEN];
|
|
|
|
|
|
|
|
mlxsw_reg_spmlr_pack(spmlr_pl, mlxsw_sx_port->local_port, mode);
|
|
|
|
return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(spmlr), spmlr_pl);
|
|
|
|
}
|
|
|
|
|
2016-10-29 03:35:59 +08:00
|
|
|
static int __mlxsw_sx_port_eth_create(struct mlxsw_sx *mlxsw_sx, u8 local_port,
|
|
|
|
u8 module, u8 width)
|
2015-07-30 05:33:49 +08:00
|
|
|
{
|
|
|
|
struct mlxsw_sx_port *mlxsw_sx_port;
|
|
|
|
struct net_device *dev;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
dev = alloc_etherdev(sizeof(struct mlxsw_sx_port));
|
|
|
|
if (!dev)
|
|
|
|
return -ENOMEM;
|
2016-10-27 21:13:01 +08:00
|
|
|
SET_NETDEV_DEV(dev, mlxsw_sx->bus_info->dev);
|
2019-10-03 17:49:33 +08:00
|
|
|
dev_net_set(dev, mlxsw_core_net(mlxsw_sx->core));
|
2015-07-30 05:33:49 +08:00
|
|
|
mlxsw_sx_port = netdev_priv(dev);
|
|
|
|
mlxsw_sx_port->dev = dev;
|
|
|
|
mlxsw_sx_port->mlxsw_sx = mlxsw_sx;
|
|
|
|
mlxsw_sx_port->local_port = local_port;
|
2016-10-29 03:35:50 +08:00
|
|
|
mlxsw_sx_port->mapping.module = module;
|
2015-07-30 05:33:49 +08:00
|
|
|
|
|
|
|
mlxsw_sx_port->pcpu_stats =
|
|
|
|
netdev_alloc_pcpu_stats(struct mlxsw_sx_port_pcpu_stats);
|
|
|
|
if (!mlxsw_sx_port->pcpu_stats) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto err_alloc_stats;
|
|
|
|
}
|
|
|
|
|
|
|
|
dev->netdev_ops = &mlxsw_sx_port_netdev_ops;
|
|
|
|
dev->ethtool_ops = &mlxsw_sx_port_ethtool_ops;
|
|
|
|
|
|
|
|
err = mlxsw_sx_port_dev_addr_get(mlxsw_sx_port);
|
|
|
|
if (err) {
|
|
|
|
dev_err(mlxsw_sx->bus_info->dev, "Port %d: Unable to get port mac address\n",
|
|
|
|
mlxsw_sx_port->local_port);
|
|
|
|
goto err_dev_addr_get;
|
|
|
|
}
|
|
|
|
|
|
|
|
netif_carrier_off(dev);
|
|
|
|
|
|
|
|
dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
|
|
|
|
NETIF_F_VLAN_CHALLENGED;
|
|
|
|
|
ethernet: use net core MTU range checking in more drivers
Somehow, I missed a healthy number of ethernet drivers in the last pass.
Most of these drivers either were in need of an updated max_mtu to make
jumbo frames possible to enable again. In a few cases, also setting a
different min_mtu to match previous lower bounds. There are also a few
drivers that had no upper bounds checking, so they're getting a brand new
ETH_MAX_MTU that is identical to IP_MAX_MTU, but accessible by includes
all ethernet and ethernet-like drivers all have already.
acenic:
- min_mtu = 0, max_mtu = 9000
amazon/ena:
- min_mtu = 128, max_mtu = adapter->max_mtu
amd/xgbe:
- min_mtu = 0, max_mtu = 9000
sb1250:
- min_mtu = 0, max_mtu = 1518
cxgb3:
- min_mtu = 81, max_mtu = 65535
cxgb4:
- min_mtu = 81, max_mtu = 9600
cxgb4vf:
- min_mtu = 81, max_mtu = 65535
benet:
- min_mtu = 256, max_mtu = 9000
ibmveth:
- min_mtu = 68, max_mtu = 65535
ibmvnic:
- min_mtu = adapter->min_mtu, max_mtu = adapter->max_mtu
- remove now redundant ibmvnic_change_mtu
jme:
- min_mtu = 1280, max_mtu = 9202
mv643xx_eth:
- min_mtu = 64, max_mtu = 9500
mlxsw:
- min_mtu = 0, max_mtu = 65535
- Basically bypassing the core checks, and instead relying on dynamic
checks in the respective switch drivers' ndo_change_mtu functions
ns83820:
- min_mtu = 0
- remove redundant ns83820_change_mtu, only checked for mtu > 1500
netxen:
- min_mtu = 0, max_mtu = 8000 (P2), max_mtu = 9600 (P3)
qlge:
- min_mtu = 1500, max_mtu = 9000
- driver only supports setting mtu to 1500 or 9000, so the core check only
rules out < 1500 and > 9000, qlge_change_mtu still needs to check that
the value is 1500 or 9000
qualcomm/emac:
- min_mtu = 46, max_mtu = 9194
xilinx_axienet:
- min_mtu = 64, max_mtu = 9000
Fixes: 61e84623ace3 ("net: centralize net_device min/max MTU checking")
CC: netdev@vger.kernel.org
CC: Jes Sorensen <jes@trained-monkey.org>
CC: Netanel Belgazal <netanel@annapurnalabs.com>
CC: Tom Lendacky <thomas.lendacky@amd.com>
CC: Santosh Raspatur <santosh@chelsio.com>
CC: Hariprasad S <hariprasad@chelsio.com>
CC: Sathya Perla <sathya.perla@broadcom.com>
CC: Ajit Khaparde <ajit.khaparde@broadcom.com>
CC: Sriharsha Basavapatna <sriharsha.basavapatna@broadcom.com>
CC: Somnath Kotur <somnath.kotur@broadcom.com>
CC: Thomas Falcon <tlfalcon@linux.vnet.ibm.com>
CC: John Allen <jallen@linux.vnet.ibm.com>
CC: Guo-Fu Tseng <cooldavid@cooldavid.org>
CC: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
CC: Jiri Pirko <jiri@mellanox.com>
CC: Ido Schimmel <idosch@mellanox.com>
CC: Manish Chopra <manish.chopra@qlogic.com>
CC: Sony Chacko <sony.chacko@qlogic.com>
CC: Rajesh Borundia <rajesh.borundia@qlogic.com>
CC: Timur Tabi <timur@codeaurora.org>
CC: Anirudha Sarangi <anirudh@xilinx.com>
CC: John Linn <John.Linn@xilinx.com>
Signed-off-by: Jarod Wilson <jarod@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-10-21 01:55:16 +08:00
|
|
|
dev->min_mtu = 0;
|
|
|
|
dev->max_mtu = ETH_MAX_MTU;
|
|
|
|
|
2015-07-30 05:33:49 +08:00
|
|
|
/* Each packet needs to have a Tx header (metadata) on top all other
|
|
|
|
* headers.
|
|
|
|
*/
|
2016-10-04 15:46:05 +08:00
|
|
|
dev->needed_headroom = MLXSW_TXHDR_LEN;
|
2015-07-30 05:33:49 +08:00
|
|
|
|
2015-08-06 22:41:53 +08:00
|
|
|
err = mlxsw_sx_port_system_port_mapping_set(mlxsw_sx_port);
|
|
|
|
if (err) {
|
|
|
|
dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set system port mapping\n",
|
|
|
|
mlxsw_sx_port->local_port);
|
|
|
|
goto err_port_system_port_mapping_set;
|
|
|
|
}
|
|
|
|
|
2015-07-30 05:33:49 +08:00
|
|
|
err = mlxsw_sx_port_swid_set(mlxsw_sx_port, 0);
|
|
|
|
if (err) {
|
|
|
|
dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set SWID\n",
|
|
|
|
mlxsw_sx_port->local_port);
|
|
|
|
goto err_port_swid_set;
|
|
|
|
}
|
|
|
|
|
2016-10-29 03:35:51 +08:00
|
|
|
err = mlxsw_sx_port_speed_by_width_set(mlxsw_sx_port, width);
|
2015-07-30 05:33:49 +08:00
|
|
|
if (err) {
|
|
|
|
dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set speed\n",
|
|
|
|
mlxsw_sx_port->local_port);
|
|
|
|
goto err_port_speed_set;
|
|
|
|
}
|
|
|
|
|
2016-10-29 03:36:00 +08:00
|
|
|
err = mlxsw_sx_port_mtu_eth_set(mlxsw_sx_port, ETH_DATA_LEN);
|
2015-07-30 05:33:49 +08:00
|
|
|
if (err) {
|
|
|
|
dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set MTU\n",
|
|
|
|
mlxsw_sx_port->local_port);
|
|
|
|
goto err_port_mtu_set;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false);
|
|
|
|
if (err)
|
|
|
|
goto err_port_admin_status_set;
|
|
|
|
|
|
|
|
err = mlxsw_sx_port_stp_state_set(mlxsw_sx_port,
|
|
|
|
MLXSW_PORT_DEFAULT_VID,
|
|
|
|
MLXSW_REG_SPMS_STATE_FORWARDING);
|
|
|
|
if (err) {
|
|
|
|
dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set STP state\n",
|
|
|
|
mlxsw_sx_port->local_port);
|
|
|
|
goto err_port_stp_state_set;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = mlxsw_sx_port_mac_learning_mode_set(mlxsw_sx_port,
|
|
|
|
MLXSW_REG_SPMLR_LEARN_MODE_DISABLE);
|
|
|
|
if (err) {
|
|
|
|
dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set MAC learning mode\n",
|
|
|
|
mlxsw_sx_port->local_port);
|
|
|
|
goto err_port_mac_learning_mode_set;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = register_netdev(dev);
|
|
|
|
if (err) {
|
|
|
|
dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to register netdev\n",
|
|
|
|
mlxsw_sx_port->local_port);
|
|
|
|
goto err_register_netdev;
|
|
|
|
}
|
|
|
|
|
2016-10-29 03:35:57 +08:00
|
|
|
mlxsw_core_port_eth_set(mlxsw_sx->core, mlxsw_sx_port->local_port,
|
2019-03-24 18:14:31 +08:00
|
|
|
mlxsw_sx_port, dev);
|
2015-07-30 05:33:49 +08:00
|
|
|
mlxsw_sx->ports[local_port] = mlxsw_sx_port;
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_register_netdev:
|
|
|
|
err_port_mac_learning_mode_set:
|
|
|
|
err_port_stp_state_set:
|
2015-10-08 21:17:37 +08:00
|
|
|
err_port_admin_status_set:
|
2015-07-30 05:33:49 +08:00
|
|
|
err_port_mtu_set:
|
|
|
|
err_port_speed_set:
|
2016-10-20 22:05:44 +08:00
|
|
|
mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT);
|
2015-07-30 05:33:49 +08:00
|
|
|
err_port_swid_set:
|
2015-08-06 22:41:53 +08:00
|
|
|
err_port_system_port_mapping_set:
|
2015-07-30 05:33:49 +08:00
|
|
|
err_dev_addr_get:
|
|
|
|
free_percpu(mlxsw_sx_port->pcpu_stats);
|
|
|
|
err_alloc_stats:
|
|
|
|
free_netdev(dev);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2016-10-29 03:35:59 +08:00
|
|
|
static int mlxsw_sx_port_eth_create(struct mlxsw_sx *mlxsw_sx, u8 local_port,
|
|
|
|
u8 module, u8 width)
|
2016-10-29 03:35:55 +08:00
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
2019-03-24 18:14:31 +08:00
|
|
|
err = mlxsw_core_port_init(mlxsw_sx->core, local_port,
|
2019-04-03 20:24:18 +08:00
|
|
|
module + 1, false, 0,
|
|
|
|
mlxsw_sx->hw_id, sizeof(mlxsw_sx->hw_id));
|
2016-10-29 03:35:55 +08:00
|
|
|
if (err) {
|
|
|
|
dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to init core port\n",
|
|
|
|
local_port);
|
|
|
|
return err;
|
|
|
|
}
|
2016-10-29 03:35:59 +08:00
|
|
|
err = __mlxsw_sx_port_eth_create(mlxsw_sx, local_port, module, width);
|
2016-10-29 03:35:55 +08:00
|
|
|
if (err)
|
|
|
|
goto err_port_create;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_port_create:
|
|
|
|
mlxsw_core_port_fini(mlxsw_sx->core, local_port);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2016-10-29 03:35:59 +08:00
|
|
|
static void __mlxsw_sx_port_eth_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port)
|
2015-07-30 05:33:49 +08:00
|
|
|
{
|
|
|
|
struct mlxsw_sx_port *mlxsw_sx_port = mlxsw_sx->ports[local_port];
|
|
|
|
|
2016-10-29 03:35:55 +08:00
|
|
|
mlxsw_core_port_clear(mlxsw_sx->core, local_port, mlxsw_sx);
|
2015-07-30 05:33:49 +08:00
|
|
|
unregister_netdev(mlxsw_sx_port->dev); /* This calls ndo_stop */
|
2016-10-29 03:35:48 +08:00
|
|
|
mlxsw_sx->ports[local_port] = NULL;
|
2015-07-30 05:33:49 +08:00
|
|
|
mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT);
|
|
|
|
free_percpu(mlxsw_sx_port->pcpu_stats);
|
2015-08-06 22:41:52 +08:00
|
|
|
free_netdev(mlxsw_sx_port->dev);
|
2015-07-30 05:33:49 +08:00
|
|
|
}
|
|
|
|
|
2016-10-29 03:36:00 +08:00
|
|
|
static bool mlxsw_sx_port_created(struct mlxsw_sx *mlxsw_sx, u8 local_port)
|
2016-10-29 03:35:55 +08:00
|
|
|
{
|
2016-10-29 03:36:00 +08:00
|
|
|
return mlxsw_sx->ports[local_port] != NULL;
|
2016-10-29 03:35:55 +08:00
|
|
|
}
|
|
|
|
|
2016-10-29 03:36:00 +08:00
|
|
|
static int __mlxsw_sx_port_ib_create(struct mlxsw_sx *mlxsw_sx, u8 local_port,
|
|
|
|
u8 module, u8 width)
|
2016-10-29 03:35:48 +08:00
|
|
|
{
|
2016-10-29 03:36:00 +08:00
|
|
|
struct mlxsw_sx_port *mlxsw_sx_port;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
mlxsw_sx_port = kzalloc(sizeof(*mlxsw_sx_port), GFP_KERNEL);
|
|
|
|
if (!mlxsw_sx_port)
|
|
|
|
return -ENOMEM;
|
|
|
|
mlxsw_sx_port->mlxsw_sx = mlxsw_sx;
|
|
|
|
mlxsw_sx_port->local_port = local_port;
|
|
|
|
mlxsw_sx_port->mapping.module = module;
|
|
|
|
|
|
|
|
err = mlxsw_sx_port_system_port_mapping_set(mlxsw_sx_port);
|
|
|
|
if (err) {
|
|
|
|
dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set system port mapping\n",
|
|
|
|
mlxsw_sx_port->local_port);
|
|
|
|
goto err_port_system_port_mapping_set;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Adding port to Infiniband swid (1) */
|
|
|
|
err = mlxsw_sx_port_swid_set(mlxsw_sx_port, 1);
|
|
|
|
if (err) {
|
|
|
|
dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set SWID\n",
|
|
|
|
mlxsw_sx_port->local_port);
|
|
|
|
goto err_port_swid_set;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Expose the IB port number as it's front panel name */
|
|
|
|
err = mlxsw_sx_port_ib_port_set(mlxsw_sx_port, module + 1);
|
|
|
|
if (err) {
|
|
|
|
dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set IB port\n",
|
|
|
|
mlxsw_sx_port->local_port);
|
|
|
|
goto err_port_ib_set;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Supports all speeds from SDR to FDR (bitmask) and support bus width
|
|
|
|
* of 1x, 2x and 4x (3 bits bitmask)
|
|
|
|
*/
|
|
|
|
err = mlxsw_sx_port_ib_speed_set(mlxsw_sx_port,
|
|
|
|
MLXSW_REG_PTYS_IB_SPEED_EDR - 1,
|
|
|
|
BIT(3) - 1);
|
|
|
|
if (err) {
|
|
|
|
dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set speed\n",
|
|
|
|
mlxsw_sx_port->local_port);
|
|
|
|
goto err_port_speed_set;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Change to the maximum MTU the device supports, the SMA will take
|
|
|
|
* care of the active MTU
|
|
|
|
*/
|
|
|
|
err = mlxsw_sx_port_mtu_ib_set(mlxsw_sx_port, MLXSW_IB_DEFAULT_MTU);
|
|
|
|
if (err) {
|
|
|
|
dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set MTU\n",
|
|
|
|
mlxsw_sx_port->local_port);
|
|
|
|
goto err_port_mtu_set;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, true);
|
|
|
|
if (err) {
|
|
|
|
dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to change admin state to UP\n",
|
|
|
|
mlxsw_sx_port->local_port);
|
|
|
|
goto err_port_admin_set;
|
|
|
|
}
|
|
|
|
|
|
|
|
mlxsw_core_port_ib_set(mlxsw_sx->core, mlxsw_sx_port->local_port,
|
|
|
|
mlxsw_sx_port);
|
|
|
|
mlxsw_sx->ports[local_port] = mlxsw_sx_port;
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_port_admin_set:
|
|
|
|
err_port_mtu_set:
|
|
|
|
err_port_speed_set:
|
|
|
|
err_port_ib_set:
|
|
|
|
mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT);
|
|
|
|
err_port_swid_set:
|
|
|
|
err_port_system_port_mapping_set:
|
|
|
|
kfree(mlxsw_sx_port);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __mlxsw_sx_port_ib_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port)
|
|
|
|
{
|
|
|
|
struct mlxsw_sx_port *mlxsw_sx_port = mlxsw_sx->ports[local_port];
|
|
|
|
|
|
|
|
mlxsw_core_port_clear(mlxsw_sx->core, local_port, mlxsw_sx);
|
|
|
|
mlxsw_sx->ports[local_port] = NULL;
|
|
|
|
mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false);
|
|
|
|
mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT);
|
|
|
|
kfree(mlxsw_sx_port);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __mlxsw_sx_port_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port)
|
|
|
|
{
|
|
|
|
enum devlink_port_type port_type =
|
|
|
|
mlxsw_core_port_type_get(mlxsw_sx->core, local_port);
|
|
|
|
|
|
|
|
if (port_type == DEVLINK_PORT_TYPE_ETH)
|
|
|
|
__mlxsw_sx_port_eth_remove(mlxsw_sx, local_port);
|
|
|
|
else if (port_type == DEVLINK_PORT_TYPE_IB)
|
|
|
|
__mlxsw_sx_port_ib_remove(mlxsw_sx, local_port);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mlxsw_sx_port_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port)
|
|
|
|
{
|
|
|
|
__mlxsw_sx_port_remove(mlxsw_sx, local_port);
|
|
|
|
mlxsw_core_port_fini(mlxsw_sx->core, local_port);
|
2016-10-29 03:35:48 +08:00
|
|
|
}
|
|
|
|
|
2015-07-30 05:33:49 +08:00
|
|
|
static void mlxsw_sx_ports_remove(struct mlxsw_sx *mlxsw_sx)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2017-03-24 15:02:48 +08:00
|
|
|
for (i = 1; i < mlxsw_core_max_ports(mlxsw_sx->core); i++)
|
2016-10-29 03:35:48 +08:00
|
|
|
if (mlxsw_sx_port_created(mlxsw_sx, i))
|
2016-10-29 03:36:00 +08:00
|
|
|
mlxsw_sx_port_remove(mlxsw_sx, i);
|
2015-07-30 05:33:49 +08:00
|
|
|
kfree(mlxsw_sx->ports);
|
2020-05-21 20:11:44 +08:00
|
|
|
mlxsw_sx->ports = NULL;
|
2015-07-30 05:33:49 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int mlxsw_sx_ports_create(struct mlxsw_sx *mlxsw_sx)
|
|
|
|
{
|
2017-03-24 15:02:48 +08:00
|
|
|
unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sx->core);
|
2015-07-30 05:33:49 +08:00
|
|
|
size_t alloc_size;
|
2016-10-29 03:35:50 +08:00
|
|
|
u8 module, width;
|
2015-07-30 05:33:49 +08:00
|
|
|
int i;
|
|
|
|
int err;
|
|
|
|
|
2017-03-24 15:02:48 +08:00
|
|
|
alloc_size = sizeof(struct mlxsw_sx_port *) * max_ports;
|
2015-07-30 05:33:49 +08:00
|
|
|
mlxsw_sx->ports = kzalloc(alloc_size, GFP_KERNEL);
|
|
|
|
if (!mlxsw_sx->ports)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2017-03-24 15:02:48 +08:00
|
|
|
for (i = 1; i < max_ports; i++) {
|
2016-10-29 03:35:50 +08:00
|
|
|
err = mlxsw_sx_port_module_info_get(mlxsw_sx, i, &module,
|
|
|
|
&width);
|
2016-10-29 03:35:47 +08:00
|
|
|
if (err)
|
|
|
|
goto err_port_module_info_get;
|
|
|
|
if (!width)
|
|
|
|
continue;
|
2016-10-29 03:35:59 +08:00
|
|
|
err = mlxsw_sx_port_eth_create(mlxsw_sx, i, module, width);
|
2015-07-30 05:33:49 +08:00
|
|
|
if (err)
|
|
|
|
goto err_port_create;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_port_create:
|
2016-10-29 03:35:47 +08:00
|
|
|
err_port_module_info_get:
|
2015-07-30 05:33:49 +08:00
|
|
|
for (i--; i >= 1; i--)
|
2016-10-29 03:35:48 +08:00
|
|
|
if (mlxsw_sx_port_created(mlxsw_sx, i))
|
2016-10-29 03:36:00 +08:00
|
|
|
mlxsw_sx_port_remove(mlxsw_sx, i);
|
2015-07-30 05:33:49 +08:00
|
|
|
kfree(mlxsw_sx->ports);
|
2020-05-21 20:11:44 +08:00
|
|
|
mlxsw_sx->ports = NULL;
|
2015-07-30 05:33:49 +08:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2016-10-29 03:36:00 +08:00
|
|
|
static void mlxsw_sx_pude_eth_event_func(struct mlxsw_sx_port *mlxsw_sx_port,
|
|
|
|
enum mlxsw_reg_pude_oper_status status)
|
|
|
|
{
|
|
|
|
if (status == MLXSW_PORT_OPER_STATUS_UP) {
|
|
|
|
netdev_info(mlxsw_sx_port->dev, "link up\n");
|
|
|
|
netif_carrier_on(mlxsw_sx_port->dev);
|
|
|
|
} else {
|
|
|
|
netdev_info(mlxsw_sx_port->dev, "link down\n");
|
|
|
|
netif_carrier_off(mlxsw_sx_port->dev);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mlxsw_sx_pude_ib_event_func(struct mlxsw_sx_port *mlxsw_sx_port,
|
|
|
|
enum mlxsw_reg_pude_oper_status status)
|
|
|
|
{
|
|
|
|
if (status == MLXSW_PORT_OPER_STATUS_UP)
|
|
|
|
pr_info("ib link for port %d - up\n",
|
|
|
|
mlxsw_sx_port->mapping.module + 1);
|
|
|
|
else
|
|
|
|
pr_info("ib link for port %d - down\n",
|
|
|
|
mlxsw_sx_port->mapping.module + 1);
|
|
|
|
}
|
|
|
|
|
2015-07-30 05:33:49 +08:00
|
|
|
static void mlxsw_sx_pude_event_func(const struct mlxsw_reg_info *reg,
|
|
|
|
char *pude_pl, void *priv)
|
|
|
|
{
|
|
|
|
struct mlxsw_sx *mlxsw_sx = priv;
|
|
|
|
struct mlxsw_sx_port *mlxsw_sx_port;
|
|
|
|
enum mlxsw_reg_pude_oper_status status;
|
2016-10-29 03:36:00 +08:00
|
|
|
enum devlink_port_type port_type;
|
2015-07-30 05:33:49 +08:00
|
|
|
u8 local_port;
|
|
|
|
|
|
|
|
local_port = mlxsw_reg_pude_local_port_get(pude_pl);
|
|
|
|
mlxsw_sx_port = mlxsw_sx->ports[local_port];
|
|
|
|
if (!mlxsw_sx_port) {
|
|
|
|
dev_warn(mlxsw_sx->bus_info->dev, "Port %d: Link event received for non-existent port\n",
|
|
|
|
local_port);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
status = mlxsw_reg_pude_oper_status_get(pude_pl);
|
2016-10-29 03:36:00 +08:00
|
|
|
port_type = mlxsw_core_port_type_get(mlxsw_sx->core, local_port);
|
|
|
|
if (port_type == DEVLINK_PORT_TYPE_ETH)
|
|
|
|
mlxsw_sx_pude_eth_event_func(mlxsw_sx_port, status);
|
|
|
|
else if (port_type == DEVLINK_PORT_TYPE_IB)
|
|
|
|
mlxsw_sx_pude_ib_event_func(mlxsw_sx_port, status);
|
2015-07-30 05:33:49 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void mlxsw_sx_rx_listener_func(struct sk_buff *skb, u8 local_port,
|
|
|
|
void *priv)
|
|
|
|
{
|
|
|
|
struct mlxsw_sx *mlxsw_sx = priv;
|
|
|
|
struct mlxsw_sx_port *mlxsw_sx_port = mlxsw_sx->ports[local_port];
|
|
|
|
struct mlxsw_sx_port_pcpu_stats *pcpu_stats;
|
|
|
|
|
|
|
|
if (unlikely(!mlxsw_sx_port)) {
|
2015-10-15 23:43:22 +08:00
|
|
|
dev_warn_ratelimited(mlxsw_sx->bus_info->dev, "Port %d: skb received for non-existent port\n",
|
|
|
|
local_port);
|
2015-07-30 05:33:49 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
skb->dev = mlxsw_sx_port->dev;
|
|
|
|
|
|
|
|
pcpu_stats = this_cpu_ptr(mlxsw_sx_port->pcpu_stats);
|
|
|
|
u64_stats_update_begin(&pcpu_stats->syncp);
|
|
|
|
pcpu_stats->rx_packets++;
|
|
|
|
pcpu_stats->rx_bytes += skb->len;
|
|
|
|
u64_stats_update_end(&pcpu_stats->syncp);
|
|
|
|
|
|
|
|
skb->protocol = eth_type_trans(skb, skb->dev);
|
|
|
|
netif_receive_skb(skb);
|
|
|
|
}
|
|
|
|
|
2016-10-29 03:36:00 +08:00
|
|
|
static int mlxsw_sx_port_type_set(struct mlxsw_core *mlxsw_core, u8 local_port,
|
|
|
|
enum devlink_port_type new_type)
|
|
|
|
{
|
|
|
|
struct mlxsw_sx *mlxsw_sx = mlxsw_core_driver_priv(mlxsw_core);
|
|
|
|
u8 module, width;
|
|
|
|
int err;
|
|
|
|
|
2020-05-21 20:11:44 +08:00
|
|
|
if (!mlxsw_sx->ports || !mlxsw_sx->ports[local_port]) {
|
|
|
|
dev_err(mlxsw_sx->bus_info->dev, "Port number \"%d\" does not exist\n",
|
|
|
|
local_port);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2016-10-29 03:36:00 +08:00
|
|
|
if (new_type == DEVLINK_PORT_TYPE_AUTO)
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
__mlxsw_sx_port_remove(mlxsw_sx, local_port);
|
|
|
|
err = mlxsw_sx_port_module_info_get(mlxsw_sx, local_port, &module,
|
|
|
|
&width);
|
|
|
|
if (err)
|
|
|
|
goto err_port_module_info_get;
|
|
|
|
|
|
|
|
if (new_type == DEVLINK_PORT_TYPE_ETH)
|
|
|
|
err = __mlxsw_sx_port_eth_create(mlxsw_sx, local_port, module,
|
|
|
|
width);
|
|
|
|
else if (new_type == DEVLINK_PORT_TYPE_IB)
|
|
|
|
err = __mlxsw_sx_port_ib_create(mlxsw_sx, local_port, module,
|
|
|
|
width);
|
|
|
|
|
|
|
|
err_port_module_info_get:
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
mlxsw: Create a different trap group list for each device
Trap groups can be used to control traps priority, both in terms of
which trap "wins" if a packet matches two traps (priority) and in terms
of packets from which trap group will be scheduled to the cpu first (tc).
They can also be used to set rate limiters (policers) on them (will be
added in the next patches).
Currently, we support two trap groups. In Spectrum we want a better
resolution, so every protocol / flow will have a different trap group,
so we can control its parameters separately. Once the policers will be
implemented, it will also allow us limit the rate of each protocol by
itself.
This patch change the trap group list to include:
* the emad trap group, which is shared for all the devices.
* Switchx2's trap groups, which are a copy of the current trap groups.
* Spectrum's new trap groups, in order to match the above guidelines.
(Switchib is using only the emad trap group, so it require no changes).
This patch also includes new configuration for Spectrum's trap groups,
with primary priority order within them.
Signed-off-by: Nogah Frankel <nogahf@mellanox.com>
Signed-off-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-11-25 17:33:44 +08:00
|
|
|
#define MLXSW_SX_RXL(_trap_id) \
|
2016-11-25 17:33:38 +08:00
|
|
|
MLXSW_RXL(mlxsw_sx_rx_listener_func, _trap_id, TRAP_TO_CPU, \
|
mlxsw: Create a different trap group list for each device
Trap groups can be used to control traps priority, both in terms of
which trap "wins" if a packet matches two traps (priority) and in terms
of packets from which trap group will be scheduled to the cpu first (tc).
They can also be used to set rate limiters (policers) on them (will be
added in the next patches).
Currently, we support two trap groups. In Spectrum we want a better
resolution, so every protocol / flow will have a different trap group,
so we can control its parameters separately. Once the policers will be
implemented, it will also allow us limit the rate of each protocol by
itself.
This patch change the trap group list to include:
* the emad trap group, which is shared for all the devices.
* Switchx2's trap groups, which are a copy of the current trap groups.
* Spectrum's new trap groups, in order to match the above guidelines.
(Switchib is using only the emad trap group, so it require no changes).
This patch also includes new configuration for Spectrum's trap groups,
with primary priority order within them.
Signed-off-by: Nogah Frankel <nogahf@mellanox.com>
Signed-off-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-11-25 17:33:44 +08:00
|
|
|
false, SX2_RX, FORWARD)
|
2016-11-25 17:33:33 +08:00
|
|
|
|
2016-11-25 17:33:36 +08:00
|
|
|
static const struct mlxsw_listener mlxsw_sx_listener[] = {
|
2016-11-25 17:33:39 +08:00
|
|
|
MLXSW_EVENTL(mlxsw_sx_pude_event_func, PUDE, EMAD),
|
2016-11-25 17:33:33 +08:00
|
|
|
MLXSW_SX_RXL(FDB_MC),
|
|
|
|
MLXSW_SX_RXL(STP),
|
|
|
|
MLXSW_SX_RXL(LACP),
|
|
|
|
MLXSW_SX_RXL(EAPOL),
|
|
|
|
MLXSW_SX_RXL(LLDP),
|
|
|
|
MLXSW_SX_RXL(MMRP),
|
|
|
|
MLXSW_SX_RXL(MVRP),
|
|
|
|
MLXSW_SX_RXL(RPVST),
|
|
|
|
MLXSW_SX_RXL(DHCP),
|
|
|
|
MLXSW_SX_RXL(IGMP_QUERY),
|
|
|
|
MLXSW_SX_RXL(IGMP_V1_REPORT),
|
|
|
|
MLXSW_SX_RXL(IGMP_V2_REPORT),
|
|
|
|
MLXSW_SX_RXL(IGMP_V2_LEAVE),
|
|
|
|
MLXSW_SX_RXL(IGMP_V3_REPORT),
|
2015-07-30 05:33:49 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static int mlxsw_sx_traps_init(struct mlxsw_sx *mlxsw_sx)
|
|
|
|
{
|
|
|
|
char htgt_pl[MLXSW_REG_HTGT_LEN];
|
|
|
|
int i;
|
|
|
|
int err;
|
|
|
|
|
mlxsw: Create a different trap group list for each device
Trap groups can be used to control traps priority, both in terms of
which trap "wins" if a packet matches two traps (priority) and in terms
of packets from which trap group will be scheduled to the cpu first (tc).
They can also be used to set rate limiters (policers) on them (will be
added in the next patches).
Currently, we support two trap groups. In Spectrum we want a better
resolution, so every protocol / flow will have a different trap group,
so we can control its parameters separately. Once the policers will be
implemented, it will also allow us limit the rate of each protocol by
itself.
This patch change the trap group list to include:
* the emad trap group, which is shared for all the devices.
* Switchx2's trap groups, which are a copy of the current trap groups.
* Spectrum's new trap groups, in order to match the above guidelines.
(Switchib is using only the emad trap group, so it require no changes).
This patch also includes new configuration for Spectrum's trap groups,
with primary priority order within them.
Signed-off-by: Nogah Frankel <nogahf@mellanox.com>
Signed-off-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-11-25 17:33:44 +08:00
|
|
|
mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_SX2_RX,
|
2016-11-25 17:33:42 +08:00
|
|
|
MLXSW_REG_HTGT_INVALID_POLICER,
|
|
|
|
MLXSW_REG_HTGT_DEFAULT_PRIORITY,
|
|
|
|
MLXSW_REG_HTGT_DEFAULT_TC);
|
|
|
|
mlxsw_reg_htgt_local_path_rdq_set(htgt_pl,
|
|
|
|
MLXSW_REG_HTGT_LOCAL_PATH_RDQ_SX2_RX);
|
|
|
|
|
2015-07-30 05:33:49 +08:00
|
|
|
err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(htgt), htgt_pl);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
mlxsw: Create a different trap group list for each device
Trap groups can be used to control traps priority, both in terms of
which trap "wins" if a packet matches two traps (priority) and in terms
of packets from which trap group will be scheduled to the cpu first (tc).
They can also be used to set rate limiters (policers) on them (will be
added in the next patches).
Currently, we support two trap groups. In Spectrum we want a better
resolution, so every protocol / flow will have a different trap group,
so we can control its parameters separately. Once the policers will be
implemented, it will also allow us limit the rate of each protocol by
itself.
This patch change the trap group list to include:
* the emad trap group, which is shared for all the devices.
* Switchx2's trap groups, which are a copy of the current trap groups.
* Spectrum's new trap groups, in order to match the above guidelines.
(Switchib is using only the emad trap group, so it require no changes).
This patch also includes new configuration for Spectrum's trap groups,
with primary priority order within them.
Signed-off-by: Nogah Frankel <nogahf@mellanox.com>
Signed-off-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-11-25 17:33:44 +08:00
|
|
|
mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_SX2_CTRL,
|
2016-11-25 17:33:42 +08:00
|
|
|
MLXSW_REG_HTGT_INVALID_POLICER,
|
|
|
|
MLXSW_REG_HTGT_DEFAULT_PRIORITY,
|
|
|
|
MLXSW_REG_HTGT_DEFAULT_TC);
|
|
|
|
mlxsw_reg_htgt_local_path_rdq_set(htgt_pl,
|
|
|
|
MLXSW_REG_HTGT_LOCAL_PATH_RDQ_SX2_CTRL);
|
|
|
|
|
2015-10-15 23:43:28 +08:00
|
|
|
err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(htgt), htgt_pl);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
2016-11-25 17:33:36 +08:00
|
|
|
for (i = 0; i < ARRAY_SIZE(mlxsw_sx_listener); i++) {
|
2016-11-25 17:33:33 +08:00
|
|
|
err = mlxsw_core_trap_register(mlxsw_sx->core,
|
2016-11-25 17:33:36 +08:00
|
|
|
&mlxsw_sx_listener[i],
|
2016-11-25 17:33:33 +08:00
|
|
|
mlxsw_sx);
|
2015-07-30 05:33:49 +08:00
|
|
|
if (err)
|
2016-11-25 17:33:36 +08:00
|
|
|
goto err_listener_register;
|
2015-07-30 05:33:49 +08:00
|
|
|
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
|
2016-11-25 17:33:36 +08:00
|
|
|
err_listener_register:
|
2015-07-30 05:33:49 +08:00
|
|
|
for (i--; i >= 0; i--) {
|
2016-11-25 17:33:33 +08:00
|
|
|
mlxsw_core_trap_unregister(mlxsw_sx->core,
|
2016-11-25 17:33:36 +08:00
|
|
|
&mlxsw_sx_listener[i],
|
2016-11-25 17:33:33 +08:00
|
|
|
mlxsw_sx);
|
2015-07-30 05:33:49 +08:00
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mlxsw_sx_traps_fini(struct mlxsw_sx *mlxsw_sx)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2016-11-25 17:33:36 +08:00
|
|
|
for (i = 0; i < ARRAY_SIZE(mlxsw_sx_listener); i++) {
|
2016-11-25 17:33:33 +08:00
|
|
|
mlxsw_core_trap_unregister(mlxsw_sx->core,
|
2016-11-25 17:33:36 +08:00
|
|
|
&mlxsw_sx_listener[i],
|
2016-11-25 17:33:33 +08:00
|
|
|
mlxsw_sx);
|
2015-07-30 05:33:49 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mlxsw_sx_flood_init(struct mlxsw_sx *mlxsw_sx)
|
|
|
|
{
|
|
|
|
char sfgc_pl[MLXSW_REG_SFGC_LEN];
|
|
|
|
char sgcr_pl[MLXSW_REG_SGCR_LEN];
|
|
|
|
char *sftr_pl;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
/* Configure a flooding table, which includes only CPU port. */
|
|
|
|
sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
|
|
|
|
if (!sftr_pl)
|
|
|
|
return -ENOMEM;
|
2015-10-16 20:01:23 +08:00
|
|
|
mlxsw_reg_sftr_pack(sftr_pl, 0, 0, MLXSW_REG_SFGC_TABLE_TYPE_SINGLE, 0,
|
|
|
|
MLXSW_PORT_CPU_PORT, true);
|
2015-07-30 05:33:49 +08:00
|
|
|
err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sftr), sftr_pl);
|
|
|
|
kfree(sftr_pl);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
/* Flood different packet types using the flooding table. */
|
|
|
|
mlxsw_reg_sfgc_pack(sfgc_pl,
|
|
|
|
MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST,
|
|
|
|
MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
|
|
|
|
MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
|
|
|
|
0);
|
|
|
|
err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
mlxsw_reg_sfgc_pack(sfgc_pl,
|
|
|
|
MLXSW_REG_SFGC_TYPE_BROADCAST,
|
|
|
|
MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
|
|
|
|
MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
|
|
|
|
0);
|
|
|
|
err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
mlxsw_reg_sfgc_pack(sfgc_pl,
|
|
|
|
MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP,
|
|
|
|
MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
|
|
|
|
MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
|
|
|
|
0);
|
|
|
|
err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
mlxsw_reg_sfgc_pack(sfgc_pl,
|
|
|
|
MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6,
|
|
|
|
MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
|
|
|
|
MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
|
|
|
|
0);
|
|
|
|
err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
mlxsw_reg_sfgc_pack(sfgc_pl,
|
|
|
|
MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4,
|
|
|
|
MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
|
|
|
|
MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
|
|
|
|
0);
|
|
|
|
err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
mlxsw_reg_sgcr_pack(sgcr_pl, true);
|
|
|
|
return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sgcr), sgcr_pl);
|
|
|
|
}
|
|
|
|
|
2016-11-25 17:33:40 +08:00
|
|
|
static int mlxsw_sx_basic_trap_groups_set(struct mlxsw_core *mlxsw_core)
|
|
|
|
{
|
|
|
|
char htgt_pl[MLXSW_REG_HTGT_LEN];
|
|
|
|
|
2016-11-25 17:33:42 +08:00
|
|
|
mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
|
|
|
|
MLXSW_REG_HTGT_INVALID_POLICER,
|
|
|
|
MLXSW_REG_HTGT_DEFAULT_PRIORITY,
|
|
|
|
MLXSW_REG_HTGT_DEFAULT_TC);
|
|
|
|
mlxsw_reg_htgt_swid_set(htgt_pl, MLXSW_PORT_SWID_ALL_SWIDS);
|
|
|
|
mlxsw_reg_htgt_local_path_rdq_set(htgt_pl,
|
|
|
|
MLXSW_REG_HTGT_LOCAL_PATH_RDQ_SX2_EMAD);
|
2016-11-25 17:33:40 +08:00
|
|
|
return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
|
|
|
|
}
|
|
|
|
|
2016-04-09 01:11:23 +08:00
|
|
|
static int mlxsw_sx_init(struct mlxsw_core *mlxsw_core,
|
2019-10-03 17:49:34 +08:00
|
|
|
const struct mlxsw_bus_info *mlxsw_bus_info,
|
|
|
|
struct netlink_ext_ack *extack)
|
2015-07-30 05:33:49 +08:00
|
|
|
{
|
2016-04-09 01:11:23 +08:00
|
|
|
struct mlxsw_sx *mlxsw_sx = mlxsw_core_driver_priv(mlxsw_core);
|
2015-07-30 05:33:49 +08:00
|
|
|
int err;
|
|
|
|
|
|
|
|
mlxsw_sx->core = mlxsw_core;
|
|
|
|
mlxsw_sx->bus_info = mlxsw_bus_info;
|
|
|
|
|
|
|
|
err = mlxsw_sx_hw_id_get(mlxsw_sx);
|
|
|
|
if (err) {
|
|
|
|
dev_err(mlxsw_sx->bus_info->dev, "Failed to get switch HW ID\n");
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = mlxsw_sx_ports_create(mlxsw_sx);
|
|
|
|
if (err) {
|
|
|
|
dev_err(mlxsw_sx->bus_info->dev, "Failed to create ports\n");
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = mlxsw_sx_traps_init(mlxsw_sx);
|
|
|
|
if (err) {
|
2016-11-25 17:33:36 +08:00
|
|
|
dev_err(mlxsw_sx->bus_info->dev, "Failed to set traps\n");
|
|
|
|
goto err_listener_register;
|
2015-07-30 05:33:49 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
err = mlxsw_sx_flood_init(mlxsw_sx);
|
|
|
|
if (err) {
|
|
|
|
dev_err(mlxsw_sx->bus_info->dev, "Failed to initialize flood tables\n");
|
|
|
|
goto err_flood_init;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_flood_init:
|
|
|
|
mlxsw_sx_traps_fini(mlxsw_sx);
|
2016-11-25 17:33:36 +08:00
|
|
|
err_listener_register:
|
2015-07-30 05:33:49 +08:00
|
|
|
mlxsw_sx_ports_remove(mlxsw_sx);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2016-04-09 01:11:23 +08:00
|
|
|
static void mlxsw_sx_fini(struct mlxsw_core *mlxsw_core)
|
2015-07-30 05:33:49 +08:00
|
|
|
{
|
2016-04-09 01:11:23 +08:00
|
|
|
struct mlxsw_sx *mlxsw_sx = mlxsw_core_driver_priv(mlxsw_core);
|
2015-07-30 05:33:49 +08:00
|
|
|
|
|
|
|
mlxsw_sx_traps_fini(mlxsw_sx);
|
|
|
|
mlxsw_sx_ports_remove(mlxsw_sx);
|
|
|
|
}
|
|
|
|
|
2017-08-11 21:40:42 +08:00
|
|
|
static const struct mlxsw_config_profile mlxsw_sx_config_profile = {
|
2015-07-30 05:33:49 +08:00
|
|
|
.used_max_vepa_channels = 1,
|
|
|
|
.max_vepa_channels = 0,
|
|
|
|
.used_max_mid = 1,
|
|
|
|
.max_mid = 7000,
|
|
|
|
.used_max_pgt = 1,
|
|
|
|
.max_pgt = 0,
|
|
|
|
.used_max_system_port = 1,
|
|
|
|
.max_system_port = 48000,
|
|
|
|
.used_max_vlan_groups = 1,
|
|
|
|
.max_vlan_groups = 127,
|
|
|
|
.used_max_regions = 1,
|
|
|
|
.max_regions = 400,
|
|
|
|
.used_flood_tables = 1,
|
|
|
|
.max_flood_tables = 2,
|
|
|
|
.max_vid_flood_tables = 1,
|
|
|
|
.used_flood_mode = 1,
|
|
|
|
.flood_mode = 3,
|
|
|
|
.used_max_ib_mc = 1,
|
2016-10-29 03:35:56 +08:00
|
|
|
.max_ib_mc = 6,
|
2015-07-30 05:33:49 +08:00
|
|
|
.used_max_pkey = 1,
|
|
|
|
.max_pkey = 0,
|
|
|
|
.swid_config = {
|
|
|
|
{
|
|
|
|
.used_type = 1,
|
|
|
|
.type = MLXSW_PORT_SWID_TYPE_ETH,
|
2016-10-29 03:35:56 +08:00
|
|
|
},
|
|
|
|
{
|
|
|
|
.used_type = 1,
|
|
|
|
.type = MLXSW_PORT_SWID_TYPE_IB,
|
2015-07-30 05:33:49 +08:00
|
|
|
}
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct mlxsw_driver mlxsw_sx_driver = {
|
2016-10-27 21:12:59 +08:00
|
|
|
.kind = mlxsw_sx_driver_name,
|
2015-07-30 05:33:49 +08:00
|
|
|
.priv_size = sizeof(struct mlxsw_sx),
|
|
|
|
.init = mlxsw_sx_init,
|
|
|
|
.fini = mlxsw_sx_fini,
|
2016-11-25 17:33:40 +08:00
|
|
|
.basic_trap_groups_set = mlxsw_sx_basic_trap_groups_set,
|
2015-07-30 05:33:49 +08:00
|
|
|
.txhdr_construct = mlxsw_sx_txhdr_construct,
|
|
|
|
.txhdr_len = MLXSW_TXHDR_LEN,
|
|
|
|
.profile = &mlxsw_sx_config_profile,
|
2016-10-29 03:36:00 +08:00
|
|
|
.port_type_set = mlxsw_sx_port_type_set,
|
2015-07-30 05:33:49 +08:00
|
|
|
};
|
|
|
|
|
2016-10-27 21:12:59 +08:00
|
|
|
static const struct pci_device_id mlxsw_sx_pci_id_table[] = {
|
|
|
|
{PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SWITCHX2), 0},
|
|
|
|
{0, },
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct pci_driver mlxsw_sx_pci_driver = {
|
|
|
|
.name = mlxsw_sx_driver_name,
|
|
|
|
.id_table = mlxsw_sx_pci_id_table,
|
|
|
|
};
|
|
|
|
|
2015-07-30 05:33:49 +08:00
|
|
|
static int __init mlxsw_sx_module_init(void)
|
|
|
|
{
|
2016-10-27 21:12:59 +08:00
|
|
|
int err;
|
|
|
|
|
|
|
|
err = mlxsw_core_driver_register(&mlxsw_sx_driver);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
err = mlxsw_pci_driver_register(&mlxsw_sx_pci_driver);
|
|
|
|
if (err)
|
|
|
|
goto err_pci_driver_register;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_pci_driver_register:
|
|
|
|
mlxsw_core_driver_unregister(&mlxsw_sx_driver);
|
|
|
|
return err;
|
2015-07-30 05:33:49 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void __exit mlxsw_sx_module_exit(void)
|
|
|
|
{
|
2016-10-27 21:12:59 +08:00
|
|
|
mlxsw_pci_driver_unregister(&mlxsw_sx_pci_driver);
|
2015-07-30 05:33:49 +08:00
|
|
|
mlxsw_core_driver_unregister(&mlxsw_sx_driver);
|
|
|
|
}
|
|
|
|
|
|
|
|
module_init(mlxsw_sx_module_init);
|
|
|
|
module_exit(mlxsw_sx_module_exit);
|
|
|
|
|
|
|
|
MODULE_LICENSE("Dual BSD/GPL");
|
|
|
|
MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
|
|
|
|
MODULE_DESCRIPTION("Mellanox SwitchX-2 driver");
|
2016-10-27 21:12:59 +08:00
|
|
|
MODULE_DEVICE_TABLE(pci, mlxsw_sx_pci_id_table);
|