Merge branch 'mlx5-100G-fixes'
Saeed Mahameed says: ==================== Mellanox 100G mlx5 fixes#2 for 4.7-rc The following series provides one-liners fixes for mlx5 driver plus one medium patch to reorganize ethtool counters reporting. Highlights: - Added MODIFY_FLOW_TABLE to command strings table - Add ConnectX-5 PCIe 4.0 to list of supported devices - Rename ASYNC_EVENTS enum - Enable BlueFlame only when supported by device - Avoid adding same vxlan port twice - Report the correct number of PFC counters - Reorganize ethtool reported counters and remove duplications ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
f5074d0ce2
|
@ -545,6 +545,7 @@ const char *mlx5_command_str(int command)
|
|||
MLX5_COMMAND_STR_CASE(ALLOC_FLOW_COUNTER);
|
||||
MLX5_COMMAND_STR_CASE(DEALLOC_FLOW_COUNTER);
|
||||
MLX5_COMMAND_STR_CASE(QUERY_FLOW_COUNTER);
|
||||
MLX5_COMMAND_STR_CASE(MODIFY_FLOW_TABLE);
|
||||
default: return "unknown command opcode";
|
||||
}
|
||||
}
|
||||
|
|
|
@ -401,7 +401,7 @@ enum mlx5e_traffic_types {
|
|||
};
|
||||
|
||||
enum {
|
||||
MLX5E_STATE_ASYNC_EVENTS_ENABLE,
|
||||
MLX5E_STATE_ASYNC_EVENTS_ENABLED,
|
||||
MLX5E_STATE_OPENED,
|
||||
MLX5E_STATE_DESTROYING,
|
||||
};
|
||||
|
|
|
@ -184,7 +184,9 @@ static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv)
|
|||
#define MLX5E_NUM_SQ_STATS(priv) \
|
||||
(NUM_SQ_STATS * priv->params.num_channels * priv->params.num_tc * \
|
||||
test_bit(MLX5E_STATE_OPENED, &priv->state))
|
||||
#define MLX5E_NUM_PFC_COUNTERS(priv) hweight8(mlx5e_query_pfc_combined(priv))
|
||||
#define MLX5E_NUM_PFC_COUNTERS(priv) \
|
||||
(hweight8(mlx5e_query_pfc_combined(priv)) * \
|
||||
NUM_PPORT_PER_PRIO_PFC_COUNTERS)
|
||||
|
||||
static int mlx5e_get_sset_count(struct net_device *dev, int sset)
|
||||
{
|
||||
|
@ -211,42 +213,41 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data)
|
|||
|
||||
/* SW counters */
|
||||
for (i = 0; i < NUM_SW_COUNTERS; i++)
|
||||
strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].name);
|
||||
strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].format);
|
||||
|
||||
/* Q counters */
|
||||
for (i = 0; i < MLX5E_NUM_Q_CNTRS(priv); i++)
|
||||
strcpy(data + (idx++) * ETH_GSTRING_LEN, q_stats_desc[i].name);
|
||||
strcpy(data + (idx++) * ETH_GSTRING_LEN, q_stats_desc[i].format);
|
||||
|
||||
/* VPORT counters */
|
||||
for (i = 0; i < NUM_VPORT_COUNTERS; i++)
|
||||
strcpy(data + (idx++) * ETH_GSTRING_LEN,
|
||||
vport_stats_desc[i].name);
|
||||
vport_stats_desc[i].format);
|
||||
|
||||
/* PPORT counters */
|
||||
for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
|
||||
strcpy(data + (idx++) * ETH_GSTRING_LEN,
|
||||
pport_802_3_stats_desc[i].name);
|
||||
pport_802_3_stats_desc[i].format);
|
||||
|
||||
for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
|
||||
strcpy(data + (idx++) * ETH_GSTRING_LEN,
|
||||
pport_2863_stats_desc[i].name);
|
||||
pport_2863_stats_desc[i].format);
|
||||
|
||||
for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
|
||||
strcpy(data + (idx++) * ETH_GSTRING_LEN,
|
||||
pport_2819_stats_desc[i].name);
|
||||
pport_2819_stats_desc[i].format);
|
||||
|
||||
for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
|
||||
for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
|
||||
sprintf(data + (idx++) * ETH_GSTRING_LEN, "prio%d_%s",
|
||||
prio,
|
||||
pport_per_prio_traffic_stats_desc[i].name);
|
||||
sprintf(data + (idx++) * ETH_GSTRING_LEN,
|
||||
pport_per_prio_traffic_stats_desc[i].format, prio);
|
||||
}
|
||||
|
||||
pfc_combined = mlx5e_query_pfc_combined(priv);
|
||||
for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
|
||||
for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
|
||||
sprintf(data + (idx++) * ETH_GSTRING_LEN, "prio%d_%s",
|
||||
prio, pport_per_prio_pfc_stats_desc[i].name);
|
||||
sprintf(data + (idx++) * ETH_GSTRING_LEN,
|
||||
pport_per_prio_pfc_stats_desc[i].format, prio);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -256,16 +257,15 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data)
|
|||
/* per channel counters */
|
||||
for (i = 0; i < priv->params.num_channels; i++)
|
||||
for (j = 0; j < NUM_RQ_STATS; j++)
|
||||
sprintf(data + (idx++) * ETH_GSTRING_LEN, "rx%d_%s", i,
|
||||
rq_stats_desc[j].name);
|
||||
sprintf(data + (idx++) * ETH_GSTRING_LEN,
|
||||
rq_stats_desc[j].format, i);
|
||||
|
||||
for (tc = 0; tc < priv->params.num_tc; tc++)
|
||||
for (i = 0; i < priv->params.num_channels; i++)
|
||||
for (j = 0; j < NUM_SQ_STATS; j++)
|
||||
sprintf(data + (idx++) * ETH_GSTRING_LEN,
|
||||
"tx%d_%s",
|
||||
priv->channeltc_to_txq_map[i][tc],
|
||||
sq_stats_desc[j].name);
|
||||
sq_stats_desc[j].format,
|
||||
priv->channeltc_to_txq_map[i][tc]);
|
||||
}
|
||||
|
||||
static void mlx5e_get_strings(struct net_device *dev,
|
||||
|
|
|
@ -105,11 +105,11 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
|
|||
|
||||
s->rx_packets += rq_stats->packets;
|
||||
s->rx_bytes += rq_stats->bytes;
|
||||
s->lro_packets += rq_stats->lro_packets;
|
||||
s->lro_bytes += rq_stats->lro_bytes;
|
||||
s->rx_lro_packets += rq_stats->lro_packets;
|
||||
s->rx_lro_bytes += rq_stats->lro_bytes;
|
||||
s->rx_csum_none += rq_stats->csum_none;
|
||||
s->rx_csum_sw += rq_stats->csum_sw;
|
||||
s->rx_csum_inner += rq_stats->csum_inner;
|
||||
s->rx_csum_complete += rq_stats->csum_complete;
|
||||
s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
|
||||
s->rx_wqe_err += rq_stats->wqe_err;
|
||||
s->rx_mpwqe_filler += rq_stats->mpwqe_filler;
|
||||
s->rx_mpwqe_frag += rq_stats->mpwqe_frag;
|
||||
|
@ -122,24 +122,23 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
|
|||
|
||||
s->tx_packets += sq_stats->packets;
|
||||
s->tx_bytes += sq_stats->bytes;
|
||||
s->tso_packets += sq_stats->tso_packets;
|
||||
s->tso_bytes += sq_stats->tso_bytes;
|
||||
s->tso_inner_packets += sq_stats->tso_inner_packets;
|
||||
s->tso_inner_bytes += sq_stats->tso_inner_bytes;
|
||||
s->tx_tso_packets += sq_stats->tso_packets;
|
||||
s->tx_tso_bytes += sq_stats->tso_bytes;
|
||||
s->tx_tso_inner_packets += sq_stats->tso_inner_packets;
|
||||
s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes;
|
||||
s->tx_queue_stopped += sq_stats->stopped;
|
||||
s->tx_queue_wake += sq_stats->wake;
|
||||
s->tx_queue_dropped += sq_stats->dropped;
|
||||
s->tx_csum_inner += sq_stats->csum_offload_inner;
|
||||
tx_offload_none += sq_stats->csum_offload_none;
|
||||
s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
|
||||
tx_offload_none += sq_stats->csum_none;
|
||||
}
|
||||
}
|
||||
|
||||
/* Update calculated offload counters */
|
||||
s->tx_csum_offload = s->tx_packets - tx_offload_none - s->tx_csum_inner;
|
||||
s->rx_csum_good = s->rx_packets - s->rx_csum_none -
|
||||
s->rx_csum_sw;
|
||||
s->tx_csum_partial = s->tx_packets - tx_offload_none - s->tx_csum_partial_inner;
|
||||
s->rx_csum_unnecessary = s->rx_packets - s->rx_csum_none - s->rx_csum_complete;
|
||||
|
||||
s->link_down_events = MLX5_GET(ppcnt_reg,
|
||||
s->link_down_events_phy = MLX5_GET(ppcnt_reg,
|
||||
priv->stats.pport.phy_counters,
|
||||
counter_set.phys_layer_cntrs.link_down_events);
|
||||
}
|
||||
|
@ -244,7 +243,7 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
|
|||
{
|
||||
struct mlx5e_priv *priv = vpriv;
|
||||
|
||||
if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state))
|
||||
if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state))
|
||||
return;
|
||||
|
||||
switch (event) {
|
||||
|
@ -260,12 +259,12 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
|
|||
|
||||
static void mlx5e_enable_async_events(struct mlx5e_priv *priv)
|
||||
{
|
||||
set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
|
||||
set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state);
|
||||
}
|
||||
|
||||
static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
|
||||
{
|
||||
clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
|
||||
clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state);
|
||||
synchronize_irq(mlx5_get_msix_vec(priv->mdev, MLX5_EQ_VEC_ASYNC));
|
||||
}
|
||||
|
||||
|
@ -580,7 +579,7 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
|
|||
void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq);
|
||||
int err;
|
||||
|
||||
err = mlx5_alloc_map_uar(mdev, &sq->uar, true);
|
||||
err = mlx5_alloc_map_uar(mdev, &sq->uar, !!MLX5_CAP_GEN(mdev, bf));
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
|
|
@ -689,7 +689,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
|
|||
if (is_first_ethertype_ip(skb)) {
|
||||
skb->ip_summed = CHECKSUM_COMPLETE;
|
||||
skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
|
||||
rq->stats.csum_sw++;
|
||||
rq->stats.csum_complete++;
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -699,7 +699,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
|
|||
if (cqe_is_tunneled(cqe)) {
|
||||
skb->csum_level = 1;
|
||||
skb->encapsulation = 1;
|
||||
rq->stats.csum_inner++;
|
||||
rq->stats.csum_unnecessary_inner++;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -42,9 +42,11 @@
|
|||
be64_to_cpu(*(__be32 *)((char *)ptr + dsc[i].offset))
|
||||
|
||||
#define MLX5E_DECLARE_STAT(type, fld) #fld, offsetof(type, fld)
|
||||
#define MLX5E_DECLARE_RX_STAT(type, fld) "rx%d_"#fld, offsetof(type, fld)
|
||||
#define MLX5E_DECLARE_TX_STAT(type, fld) "tx%d_"#fld, offsetof(type, fld)
|
||||
|
||||
struct counter_desc {
|
||||
char name[ETH_GSTRING_LEN];
|
||||
char format[ETH_GSTRING_LEN];
|
||||
int offset; /* Byte offset */
|
||||
};
|
||||
|
||||
|
@ -53,18 +55,18 @@ struct mlx5e_sw_stats {
|
|||
u64 rx_bytes;
|
||||
u64 tx_packets;
|
||||
u64 tx_bytes;
|
||||
u64 tso_packets;
|
||||
u64 tso_bytes;
|
||||
u64 tso_inner_packets;
|
||||
u64 tso_inner_bytes;
|
||||
u64 lro_packets;
|
||||
u64 lro_bytes;
|
||||
u64 rx_csum_good;
|
||||
u64 tx_tso_packets;
|
||||
u64 tx_tso_bytes;
|
||||
u64 tx_tso_inner_packets;
|
||||
u64 tx_tso_inner_bytes;
|
||||
u64 rx_lro_packets;
|
||||
u64 rx_lro_bytes;
|
||||
u64 rx_csum_unnecessary;
|
||||
u64 rx_csum_none;
|
||||
u64 rx_csum_sw;
|
||||
u64 rx_csum_inner;
|
||||
u64 tx_csum_offload;
|
||||
u64 tx_csum_inner;
|
||||
u64 rx_csum_complete;
|
||||
u64 rx_csum_unnecessary_inner;
|
||||
u64 tx_csum_partial;
|
||||
u64 tx_csum_partial_inner;
|
||||
u64 tx_queue_stopped;
|
||||
u64 tx_queue_wake;
|
||||
u64 tx_queue_dropped;
|
||||
|
@ -76,7 +78,7 @@ struct mlx5e_sw_stats {
|
|||
u64 rx_cqe_compress_pkts;
|
||||
|
||||
/* Special handling counters */
|
||||
u64 link_down_events;
|
||||
u64 link_down_events_phy;
|
||||
};
|
||||
|
||||
static const struct counter_desc sw_stats_desc[] = {
|
||||
|
@ -84,18 +86,18 @@ static const struct counter_desc sw_stats_desc[] = {
|
|||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tso_packets) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tso_bytes) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tso_inner_packets) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tso_inner_bytes) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, lro_packets) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, lro_bytes) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_good) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_packets) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_bytes) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_sw) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_inner) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_offload) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_inner) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) },
|
||||
|
@ -105,7 +107,7 @@ static const struct counter_desc sw_stats_desc[] = {
|
|||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, link_down_events) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, link_down_events_phy) },
|
||||
};
|
||||
|
||||
struct mlx5e_qcounter_stats {
|
||||
|
@ -125,12 +127,6 @@ struct mlx5e_vport_stats {
|
|||
};
|
||||
|
||||
static const struct counter_desc vport_stats_desc[] = {
|
||||
{ "rx_vport_error_packets",
|
||||
VPORT_COUNTER_OFF(received_errors.packets) },
|
||||
{ "rx_vport_error_bytes", VPORT_COUNTER_OFF(received_errors.octets) },
|
||||
{ "tx_vport_error_packets",
|
||||
VPORT_COUNTER_OFF(transmit_errors.packets) },
|
||||
{ "tx_vport_error_bytes", VPORT_COUNTER_OFF(transmit_errors.octets) },
|
||||
{ "rx_vport_unicast_packets",
|
||||
VPORT_COUNTER_OFF(received_eth_unicast.packets) },
|
||||
{ "rx_vport_unicast_bytes",
|
||||
|
@ -192,94 +188,68 @@ struct mlx5e_pport_stats {
|
|||
};
|
||||
|
||||
static const struct counter_desc pport_802_3_stats_desc[] = {
|
||||
{ "frames_tx", PPORT_802_3_OFF(a_frames_transmitted_ok) },
|
||||
{ "frames_rx", PPORT_802_3_OFF(a_frames_received_ok) },
|
||||
{ "check_seq_err", PPORT_802_3_OFF(a_frame_check_sequence_errors) },
|
||||
{ "alignment_err", PPORT_802_3_OFF(a_alignment_errors) },
|
||||
{ "octets_tx", PPORT_802_3_OFF(a_octets_transmitted_ok) },
|
||||
{ "octets_received", PPORT_802_3_OFF(a_octets_received_ok) },
|
||||
{ "multicast_xmitted", PPORT_802_3_OFF(a_multicast_frames_xmitted_ok) },
|
||||
{ "broadcast_xmitted", PPORT_802_3_OFF(a_broadcast_frames_xmitted_ok) },
|
||||
{ "multicast_rx", PPORT_802_3_OFF(a_multicast_frames_received_ok) },
|
||||
{ "broadcast_rx", PPORT_802_3_OFF(a_broadcast_frames_received_ok) },
|
||||
{ "in_range_len_errors", PPORT_802_3_OFF(a_in_range_length_errors) },
|
||||
{ "out_of_range_len", PPORT_802_3_OFF(a_out_of_range_length_field) },
|
||||
{ "too_long_errors", PPORT_802_3_OFF(a_frame_too_long_errors) },
|
||||
{ "symbol_err", PPORT_802_3_OFF(a_symbol_error_during_carrier) },
|
||||
{ "mac_control_tx", PPORT_802_3_OFF(a_mac_control_frames_transmitted) },
|
||||
{ "mac_control_rx", PPORT_802_3_OFF(a_mac_control_frames_received) },
|
||||
{ "unsupported_op_rx",
|
||||
PPORT_802_3_OFF(a_unsupported_opcodes_received) },
|
||||
{ "pause_ctrl_rx", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_received) },
|
||||
{ "pause_ctrl_tx",
|
||||
PPORT_802_3_OFF(a_pause_mac_ctrl_frames_transmitted) },
|
||||
{ "tx_packets_phy", PPORT_802_3_OFF(a_frames_transmitted_ok) },
|
||||
{ "rx_packets_phy", PPORT_802_3_OFF(a_frames_received_ok) },
|
||||
{ "rx_crc_errors_phy", PPORT_802_3_OFF(a_frame_check_sequence_errors) },
|
||||
{ "tx_bytes_phy", PPORT_802_3_OFF(a_octets_transmitted_ok) },
|
||||
{ "rx_bytes_phy", PPORT_802_3_OFF(a_octets_received_ok) },
|
||||
{ "tx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_xmitted_ok) },
|
||||
{ "tx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_xmitted_ok) },
|
||||
{ "rx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_received_ok) },
|
||||
{ "rx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_received_ok) },
|
||||
{ "rx_in_range_len_errors_phy", PPORT_802_3_OFF(a_in_range_length_errors) },
|
||||
{ "rx_out_of_range_len_phy", PPORT_802_3_OFF(a_out_of_range_length_field) },
|
||||
{ "rx_oversize_pkts_phy", PPORT_802_3_OFF(a_frame_too_long_errors) },
|
||||
{ "rx_symbol_err_phy", PPORT_802_3_OFF(a_symbol_error_during_carrier) },
|
||||
{ "tx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_transmitted) },
|
||||
{ "rx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_received) },
|
||||
{ "rx_unsupported_op_phy", PPORT_802_3_OFF(a_unsupported_opcodes_received) },
|
||||
{ "rx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_received) },
|
||||
{ "tx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_transmitted) },
|
||||
};
|
||||
|
||||
static const struct counter_desc pport_2863_stats_desc[] = {
|
||||
{ "in_octets", PPORT_2863_OFF(if_in_octets) },
|
||||
{ "in_ucast_pkts", PPORT_2863_OFF(if_in_ucast_pkts) },
|
||||
{ "in_discards", PPORT_2863_OFF(if_in_discards) },
|
||||
{ "in_errors", PPORT_2863_OFF(if_in_errors) },
|
||||
{ "in_unknown_protos", PPORT_2863_OFF(if_in_unknown_protos) },
|
||||
{ "out_octets", PPORT_2863_OFF(if_out_octets) },
|
||||
{ "out_ucast_pkts", PPORT_2863_OFF(if_out_ucast_pkts) },
|
||||
{ "out_discards", PPORT_2863_OFF(if_out_discards) },
|
||||
{ "out_errors", PPORT_2863_OFF(if_out_errors) },
|
||||
{ "in_multicast_pkts", PPORT_2863_OFF(if_in_multicast_pkts) },
|
||||
{ "in_broadcast_pkts", PPORT_2863_OFF(if_in_broadcast_pkts) },
|
||||
{ "out_multicast_pkts", PPORT_2863_OFF(if_out_multicast_pkts) },
|
||||
{ "out_broadcast_pkts", PPORT_2863_OFF(if_out_broadcast_pkts) },
|
||||
{ "rx_discards_phy", PPORT_2863_OFF(if_in_discards) },
|
||||
{ "tx_discards_phy", PPORT_2863_OFF(if_out_discards) },
|
||||
{ "tx_errors_phy", PPORT_2863_OFF(if_out_errors) },
|
||||
};
|
||||
|
||||
static const struct counter_desc pport_2819_stats_desc[] = {
|
||||
{ "drop_events", PPORT_2819_OFF(ether_stats_drop_events) },
|
||||
{ "octets", PPORT_2819_OFF(ether_stats_octets) },
|
||||
{ "pkts", PPORT_2819_OFF(ether_stats_pkts) },
|
||||
{ "broadcast_pkts", PPORT_2819_OFF(ether_stats_broadcast_pkts) },
|
||||
{ "multicast_pkts", PPORT_2819_OFF(ether_stats_multicast_pkts) },
|
||||
{ "crc_align_errors", PPORT_2819_OFF(ether_stats_crc_align_errors) },
|
||||
{ "undersize_pkts", PPORT_2819_OFF(ether_stats_undersize_pkts) },
|
||||
{ "oversize_pkts", PPORT_2819_OFF(ether_stats_oversize_pkts) },
|
||||
{ "fragments", PPORT_2819_OFF(ether_stats_fragments) },
|
||||
{ "jabbers", PPORT_2819_OFF(ether_stats_jabbers) },
|
||||
{ "collisions", PPORT_2819_OFF(ether_stats_collisions) },
|
||||
{ "p64octets", PPORT_2819_OFF(ether_stats_pkts64octets) },
|
||||
{ "p65to127octets", PPORT_2819_OFF(ether_stats_pkts65to127octets) },
|
||||
{ "p128to255octets", PPORT_2819_OFF(ether_stats_pkts128to255octets) },
|
||||
{ "p256to511octets", PPORT_2819_OFF(ether_stats_pkts256to511octets) },
|
||||
{ "p512to1023octets", PPORT_2819_OFF(ether_stats_pkts512to1023octets) },
|
||||
{ "p1024to1518octets",
|
||||
PPORT_2819_OFF(ether_stats_pkts1024to1518octets) },
|
||||
{ "p1519to2047octets",
|
||||
PPORT_2819_OFF(ether_stats_pkts1519to2047octets) },
|
||||
{ "p2048to4095octets",
|
||||
PPORT_2819_OFF(ether_stats_pkts2048to4095octets) },
|
||||
{ "p4096to8191octets",
|
||||
PPORT_2819_OFF(ether_stats_pkts4096to8191octets) },
|
||||
{ "p8192to10239octets",
|
||||
PPORT_2819_OFF(ether_stats_pkts8192to10239octets) },
|
||||
{ "rx_undersize_pkts_phy", PPORT_2819_OFF(ether_stats_undersize_pkts) },
|
||||
{ "rx_fragments_phy", PPORT_2819_OFF(ether_stats_fragments) },
|
||||
{ "rx_jabbers_phy", PPORT_2819_OFF(ether_stats_jabbers) },
|
||||
{ "rx_64_bytes_phy", PPORT_2819_OFF(ether_stats_pkts64octets) },
|
||||
{ "rx_65_to_127_bytes_phy", PPORT_2819_OFF(ether_stats_pkts65to127octets) },
|
||||
{ "rx_128_to_255_bytes_phy", PPORT_2819_OFF(ether_stats_pkts128to255octets) },
|
||||
{ "rx_256_to_511_bytes_phy", PPORT_2819_OFF(ether_stats_pkts256to511octets) },
|
||||
{ "rx_512_to_1023_bytes_phy", PPORT_2819_OFF(ether_stats_pkts512to1023octets) },
|
||||
{ "rx_1024_to_1518_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1024to1518octets) },
|
||||
{ "rx_1519_to_2047_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1519to2047octets) },
|
||||
{ "rx_2048_to_4095_bytes_phy", PPORT_2819_OFF(ether_stats_pkts2048to4095octets) },
|
||||
{ "rx_4096_to_8191_bytes_phy", PPORT_2819_OFF(ether_stats_pkts4096to8191octets) },
|
||||
{ "rx_8192_to_10239_bytes_phy", PPORT_2819_OFF(ether_stats_pkts8192to10239octets) },
|
||||
};
|
||||
|
||||
static const struct counter_desc pport_per_prio_traffic_stats_desc[] = {
|
||||
{ "rx_octets", PPORT_PER_PRIO_OFF(rx_octets) },
|
||||
{ "rx_frames", PPORT_PER_PRIO_OFF(rx_frames) },
|
||||
{ "tx_octets", PPORT_PER_PRIO_OFF(tx_octets) },
|
||||
{ "tx_frames", PPORT_PER_PRIO_OFF(tx_frames) },
|
||||
{ "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) },
|
||||
{ "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) },
|
||||
{ "tx_prio%d_bytes", PPORT_PER_PRIO_OFF(tx_octets) },
|
||||
{ "tx_prio%d_packets", PPORT_PER_PRIO_OFF(tx_frames) },
|
||||
};
|
||||
|
||||
static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
|
||||
{ "rx_pause", PPORT_PER_PRIO_OFF(rx_pause) },
|
||||
{ "rx_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) },
|
||||
{ "tx_pause", PPORT_PER_PRIO_OFF(tx_pause) },
|
||||
{ "tx_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) },
|
||||
{ "rx_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
|
||||
{ "rx_prio%d_pause", PPORT_PER_PRIO_OFF(rx_pause) },
|
||||
{ "rx_prio%d_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) },
|
||||
{ "tx_prio%d_pause", PPORT_PER_PRIO_OFF(tx_pause) },
|
||||
{ "tx_prio%d_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) },
|
||||
{ "rx_prio%d_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
|
||||
};
|
||||
|
||||
struct mlx5e_rq_stats {
|
||||
u64 packets;
|
||||
u64 bytes;
|
||||
u64 csum_sw;
|
||||
u64 csum_inner;
|
||||
u64 csum_complete;
|
||||
u64 csum_unnecessary_inner;
|
||||
u64 csum_none;
|
||||
u64 lro_packets;
|
||||
u64 lro_bytes;
|
||||
|
@ -292,19 +262,19 @@ struct mlx5e_rq_stats {
|
|||
};
|
||||
|
||||
static const struct counter_desc rq_stats_desc[] = {
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, packets) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, bytes) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, csum_sw) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, csum_inner) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, csum_none) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, lro_packets) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, lro_bytes) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, wqe_err) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, mpwqe_filler) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, mpwqe_frag) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
|
||||
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
|
||||
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
|
||||
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
|
||||
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
|
||||
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
|
||||
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) },
|
||||
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) },
|
||||
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
|
||||
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler) },
|
||||
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_frag) },
|
||||
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
|
||||
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
|
||||
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
|
||||
};
|
||||
|
||||
struct mlx5e_sq_stats {
|
||||
|
@ -315,28 +285,28 @@ struct mlx5e_sq_stats {
|
|||
u64 tso_bytes;
|
||||
u64 tso_inner_packets;
|
||||
u64 tso_inner_bytes;
|
||||
u64 csum_offload_inner;
|
||||
u64 csum_partial_inner;
|
||||
u64 nop;
|
||||
/* less likely accessed in data path */
|
||||
u64 csum_offload_none;
|
||||
u64 csum_none;
|
||||
u64 stopped;
|
||||
u64 wake;
|
||||
u64 dropped;
|
||||
};
|
||||
|
||||
static const struct counter_desc sq_stats_desc[] = {
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, packets) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, bytes) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, tso_packets) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, tso_bytes) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, csum_offload_inner) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, nop) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, csum_offload_none) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, stopped) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, wake) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, dropped) },
|
||||
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, packets) },
|
||||
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, bytes) },
|
||||
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_packets) },
|
||||
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
|
||||
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
|
||||
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
|
||||
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
|
||||
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) },
|
||||
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
|
||||
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
|
||||
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) },
|
||||
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) },
|
||||
};
|
||||
|
||||
#define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc)
|
||||
|
|
|
@ -192,12 +192,12 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
|
|||
if (skb->encapsulation) {
|
||||
eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM |
|
||||
MLX5_ETH_WQE_L4_INNER_CSUM;
|
||||
sq->stats.csum_offload_inner++;
|
||||
sq->stats.csum_partial_inner++;
|
||||
} else {
|
||||
eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
|
||||
}
|
||||
} else
|
||||
sq->stats.csum_offload_none++;
|
||||
sq->stats.csum_none++;
|
||||
|
||||
if (sq->cc != sq->prev_cc) {
|
||||
sq->prev_cc = sq->cc;
|
||||
|
|
|
@ -1508,8 +1508,9 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
|
|||
{ PCI_VDEVICE(MELLANOX, 0x1014), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4 VF */
|
||||
{ PCI_VDEVICE(MELLANOX, 0x1015) }, /* ConnectX-4LX */
|
||||
{ PCI_VDEVICE(MELLANOX, 0x1016), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4LX VF */
|
||||
{ PCI_VDEVICE(MELLANOX, 0x1017) }, /* ConnectX-5 */
|
||||
{ PCI_VDEVICE(MELLANOX, 0x1017) }, /* ConnectX-5, PCIe 3.0 */
|
||||
{ PCI_VDEVICE(MELLANOX, 0x1018), MLX5_PCI_DEV_IS_VF}, /* ConnectX-5 VF */
|
||||
{ PCI_VDEVICE(MELLANOX, 0x1019) }, /* ConnectX-5, PCIe 4.0 */
|
||||
{ 0, }
|
||||
};
|
||||
|
||||
|
|
|
@ -105,6 +105,9 @@ static void mlx5e_vxlan_add_port(struct work_struct *work)
|
|||
struct mlx5e_vxlan *vxlan;
|
||||
int err;
|
||||
|
||||
if (mlx5e_vxlan_lookup_port(priv, port))
|
||||
goto free_work;
|
||||
|
||||
if (mlx5e_vxlan_core_add_port_cmd(priv->mdev, port))
|
||||
goto free_work;
|
||||
|
||||
|
|
Loading…
Reference in New Issue