Merge branch 'mlx5-next'

Saeed Mahameed says:

====================
Mellanox 100G mlx5 update 2016-11-15

This series contains four humble mlx5 features.

From Gal,
 - Add the support for PCIe statistics and expose them in ethtool

From Huy,
 - Add the support for port module events reporting and statistics
 - Add the support for driver version setting into FW (for display purposes only)

From Mohamad,
 - Extended the command interface cache flexibility

This series was generated against commit
6a02f5eb6a ("Merge branch 'mlxsw-i2c")

V2:
 - Changed plain "unsigned" to "unsigned int"
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2016-11-18 12:08:58 -05:00
commit 511d5d5b65
12 changed files with 454 additions and 84 deletions

View File

@ -53,14 +53,6 @@ enum {
CMD_MODE_EVENTS
};
enum {
NUM_LONG_LISTS = 2,
NUM_MED_LISTS = 64,
LONG_LIST_SIZE = (2ULL * 1024 * 1024 * 1024 / PAGE_SIZE) * 8 + 16 +
MLX5_CMD_DATA_BLOCK_SIZE,
MED_LIST_SIZE = 16 + MLX5_CMD_DATA_BLOCK_SIZE,
};
enum {
MLX5_CMD_DELIVERY_STAT_OK = 0x0,
MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR = 0x1,
@ -1372,10 +1364,10 @@ static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
{
unsigned long flags;
if (msg->cache) {
spin_lock_irqsave(&msg->cache->lock, flags);
list_add_tail(&msg->list, &msg->cache->head);
spin_unlock_irqrestore(&msg->cache->lock, flags);
if (msg->parent) {
spin_lock_irqsave(&msg->parent->lock, flags);
list_add_tail(&msg->list, &msg->parent->head);
spin_unlock_irqrestore(&msg->parent->lock, flags);
} else {
mlx5_free_cmd_msg(dev, msg);
}
@ -1472,30 +1464,37 @@ static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
gfp_t gfp)
{
struct mlx5_cmd_msg *msg = ERR_PTR(-ENOMEM);
struct cmd_msg_cache *ch = NULL;
struct mlx5_cmd *cmd = &dev->cmd;
struct cache_ent *ent = NULL;
int i;
if (in_size > MED_LIST_SIZE && in_size <= LONG_LIST_SIZE)
ent = &cmd->cache.large;
else if (in_size > 16 && in_size <= MED_LIST_SIZE)
ent = &cmd->cache.med;
if (in_size <= 16)
goto cache_miss;
if (ent) {
spin_lock_irq(&ent->lock);
if (!list_empty(&ent->head)) {
msg = list_entry(ent->head.next, typeof(*msg), list);
/* For cached lists, we must explicitly state what is
* the real size
*/
msg->len = in_size;
list_del(&msg->list);
for (i = 0; i < MLX5_NUM_COMMAND_CACHES; i++) {
ch = &cmd->cache[i];
if (in_size > ch->max_inbox_size)
continue;
spin_lock_irq(&ch->lock);
if (list_empty(&ch->head)) {
spin_unlock_irq(&ch->lock);
continue;
}
spin_unlock_irq(&ent->lock);
msg = list_entry(ch->head.next, typeof(*msg), list);
/* For cached lists, we must explicitly state what is
* the real size
*/
msg->len = in_size;
list_del(&msg->list);
spin_unlock_irq(&ch->lock);
break;
}
if (IS_ERR(msg))
msg = mlx5_alloc_cmd_msg(dev, gfp, in_size, 0);
if (!IS_ERR(msg))
return msg;
cache_miss:
msg = mlx5_alloc_cmd_msg(dev, gfp, in_size, 0);
return msg;
}
@ -1593,58 +1592,56 @@ EXPORT_SYMBOL(mlx5_cmd_exec_cb);
static void destroy_msg_cache(struct mlx5_core_dev *dev)
{
struct mlx5_cmd *cmd = &dev->cmd;
struct cmd_msg_cache *ch;
struct mlx5_cmd_msg *msg;
struct mlx5_cmd_msg *n;
int i;
list_for_each_entry_safe(msg, n, &cmd->cache.large.head, list) {
list_del(&msg->list);
mlx5_free_cmd_msg(dev, msg);
}
list_for_each_entry_safe(msg, n, &cmd->cache.med.head, list) {
list_del(&msg->list);
mlx5_free_cmd_msg(dev, msg);
for (i = 0; i < MLX5_NUM_COMMAND_CACHES; i++) {
ch = &dev->cmd.cache[i];
list_for_each_entry_safe(msg, n, &ch->head, list) {
list_del(&msg->list);
mlx5_free_cmd_msg(dev, msg);
}
}
}
static int create_msg_cache(struct mlx5_core_dev *dev)
static unsigned cmd_cache_num_ent[MLX5_NUM_COMMAND_CACHES] = {
512, 32, 16, 8, 2
};
static unsigned cmd_cache_ent_size[MLX5_NUM_COMMAND_CACHES] = {
16 + MLX5_CMD_DATA_BLOCK_SIZE,
16 + MLX5_CMD_DATA_BLOCK_SIZE * 2,
16 + MLX5_CMD_DATA_BLOCK_SIZE * 16,
16 + MLX5_CMD_DATA_BLOCK_SIZE * 256,
16 + MLX5_CMD_DATA_BLOCK_SIZE * 512,
};
static void create_msg_cache(struct mlx5_core_dev *dev)
{
struct mlx5_cmd *cmd = &dev->cmd;
struct cmd_msg_cache *ch;
struct mlx5_cmd_msg *msg;
int err;
int i;
int k;
spin_lock_init(&cmd->cache.large.lock);
INIT_LIST_HEAD(&cmd->cache.large.head);
spin_lock_init(&cmd->cache.med.lock);
INIT_LIST_HEAD(&cmd->cache.med.head);
for (i = 0; i < NUM_LONG_LISTS; i++) {
msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE, 0);
if (IS_ERR(msg)) {
err = PTR_ERR(msg);
goto ex_err;
/* Initialize and fill the caches with initial entries */
for (k = 0; k < MLX5_NUM_COMMAND_CACHES; k++) {
ch = &cmd->cache[k];
spin_lock_init(&ch->lock);
INIT_LIST_HEAD(&ch->head);
ch->num_ent = cmd_cache_num_ent[k];
ch->max_inbox_size = cmd_cache_ent_size[k];
for (i = 0; i < ch->num_ent; i++) {
msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL | __GFP_NOWARN,
ch->max_inbox_size, 0);
if (IS_ERR(msg))
break;
msg->parent = ch;
list_add_tail(&msg->list, &ch->head);
}
msg->cache = &cmd->cache.large;
list_add_tail(&msg->list, &cmd->cache.large.head);
}
for (i = 0; i < NUM_MED_LISTS; i++) {
msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE, 0);
if (IS_ERR(msg)) {
err = PTR_ERR(msg);
goto ex_err;
}
msg->cache = &cmd->cache.med;
list_add_tail(&msg->list, &cmd->cache.med.head);
}
return 0;
ex_err:
destroy_msg_cache(dev);
return err;
}
static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
@ -1767,11 +1764,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
cmd->mode = CMD_MODE_POLLING;
err = create_msg_cache(dev);
if (err) {
dev_err(&dev->pdev->dev, "failed to create command cache\n");
goto err_free_page;
}
create_msg_cache(dev);
set_wqname(dev);
cmd->wq = create_singlethread_workqueue(cmd->wq_name);

View File

@ -171,9 +171,13 @@ static int mlx5e_get_sset_count(struct net_device *dev, int sset)
return NUM_SW_COUNTERS +
MLX5E_NUM_Q_CNTRS(priv) +
NUM_VPORT_COUNTERS + NUM_PPORT_COUNTERS +
NUM_PCIE_COUNTERS +
MLX5E_NUM_RQ_STATS(priv) +
MLX5E_NUM_SQ_STATS(priv) +
MLX5E_NUM_PFC_COUNTERS(priv);
MLX5E_NUM_PFC_COUNTERS(priv) +
ARRAY_SIZE(mlx5e_pme_status_desc) +
ARRAY_SIZE(mlx5e_pme_error_desc);
case ETH_SS_PRIV_FLAGS:
return ARRAY_SIZE(mlx5e_priv_flags);
/* fallthrough */
@ -213,6 +217,14 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data)
strcpy(data + (idx++) * ETH_GSTRING_LEN,
pport_2819_stats_desc[i].format);
for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN,
pcie_perf_stats_desc[i].format);
for (i = 0; i < NUM_PCIE_TAS_COUNTERS; i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN,
pcie_tas_stats_desc[i].format);
for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
sprintf(data + (idx++) * ETH_GSTRING_LEN,
@ -237,6 +249,13 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data)
}
}
/* port module event counters */
for (i = 0; i < ARRAY_SIZE(mlx5e_pme_status_desc); i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_status_desc[i].format);
for (i = 0; i < ARRAY_SIZE(mlx5e_pme_error_desc); i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_error_desc[i].format);
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
return;
@ -279,6 +298,7 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_priv *mlx5_priv;
int i, j, tc, prio, idx = 0;
unsigned long pfc_combined;
@ -314,6 +334,14 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2819_counters,
pport_2819_stats_desc, i);
for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
data[idx++] = MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
pcie_perf_stats_desc, i);
for (i = 0; i < NUM_PCIE_TAS_COUNTERS; i++)
data[idx++] = MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_tas_counters,
pcie_tas_stats_desc, i);
for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
@ -335,6 +363,16 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
}
}
/* port module event counters */
mlx5_priv = &priv->mdev->priv;
for (i = 0; i < ARRAY_SIZE(mlx5e_pme_status_desc); i++)
data[idx++] = MLX5E_READ_CTR64_CPU(mlx5_priv->pme_stats.status_counters,
mlx5e_pme_status_desc, i);
for (i = 0; i < ARRAY_SIZE(mlx5e_pme_error_desc); i++)
data[idx++] = MLX5E_READ_CTR64_CPU(mlx5_priv->pme_stats.error_counters,
mlx5e_pme_error_desc, i);
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
return;

View File

@ -290,12 +290,36 @@ static void mlx5e_update_q_counter(struct mlx5e_priv *priv)
&qcnt->rx_out_of_buffer);
}
static void mlx5e_update_pcie_counters(struct mlx5e_priv *priv)
{
struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie;
struct mlx5_core_dev *mdev = priv->mdev;
int sz = MLX5_ST_SZ_BYTES(mpcnt_reg);
void *out;
u32 *in;
in = mlx5_vzalloc(sz);
if (!in)
return;
out = pcie_stats->pcie_perf_counters;
MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP);
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
out = pcie_stats->pcie_tas_counters;
MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_TIMERS_AND_STATES_COUNTERS_GROUP);
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
kvfree(in);
}
void mlx5e_update_stats(struct mlx5e_priv *priv)
{
mlx5e_update_q_counter(priv);
mlx5e_update_vport_counters(priv);
mlx5e_update_pport_counters(priv);
mlx5e_update_sw_counters(priv);
mlx5e_update_pcie_counters(priv);
}
void mlx5e_update_stats_work(struct work_struct *work)

View File

@ -39,7 +39,7 @@
#define MLX5E_READ_CTR32_CPU(ptr, dsc, i) \
(*(u32 *)((char *)ptr + dsc[i].offset))
#define MLX5E_READ_CTR32_BE(ptr, dsc, i) \
be64_to_cpu(*(__be32 *)((char *)ptr + dsc[i].offset))
be32_to_cpu(*(__be32 *)((char *)ptr + dsc[i].offset))
#define MLX5E_DECLARE_STAT(type, fld) #fld, offsetof(type, fld)
#define MLX5E_DECLARE_RX_STAT(type, fld) "rx%d_"#fld, offsetof(type, fld)
@ -276,6 +276,32 @@ static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
{ "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
};
#define PCIE_PERF_OFF(c) \
MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c)
#define PCIE_PERF_GET(pcie_stats, c) \
MLX5_GET(mpcnt_reg, pcie_stats->pcie_perf_counters, \
counter_set.pcie_perf_cntrs_grp_data_layout.c)
#define PCIE_TAS_OFF(c) \
MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_tas_cntrs_grp_data_layout.c)
#define PCIE_TAS_GET(pcie_stats, c) \
MLX5_GET(mpcnt_reg, pcie_stats->pcie_tas_counters, \
counter_set.pcie_tas_cntrs_grp_data_layout.c)
struct mlx5e_pcie_stats {
__be64 pcie_perf_counters[MLX5_ST_SZ_QW(mpcnt_reg)];
__be64 pcie_tas_counters[MLX5_ST_SZ_QW(mpcnt_reg)];
};
static const struct counter_desc pcie_perf_stats_desc[] = {
{ "rx_pci_signal_integrity", PCIE_PERF_OFF(rx_errors) },
{ "tx_pci_signal_integrity", PCIE_PERF_OFF(tx_errors) },
};
static const struct counter_desc pcie_tas_stats_desc[] = {
{ "tx_pci_transport_nonfatal_msg", PCIE_TAS_OFF(non_fatal_err_msg_sent) },
{ "tx_pci_transport_fatal_msg", PCIE_TAS_OFF(fatal_err_msg_sent) },
};
struct mlx5e_rq_stats {
u64 packets;
u64 bytes;
@ -360,6 +386,8 @@ static const struct counter_desc sq_stats_desc[] = {
#define NUM_PPORT_802_3_COUNTERS ARRAY_SIZE(pport_802_3_stats_desc)
#define NUM_PPORT_2863_COUNTERS ARRAY_SIZE(pport_2863_stats_desc)
#define NUM_PPORT_2819_COUNTERS ARRAY_SIZE(pport_2819_stats_desc)
#define NUM_PCIE_PERF_COUNTERS ARRAY_SIZE(pcie_perf_stats_desc)
#define NUM_PCIE_TAS_COUNTERS ARRAY_SIZE(pcie_tas_stats_desc)
#define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS \
ARRAY_SIZE(pport_per_prio_traffic_stats_desc)
#define NUM_PPORT_PER_PRIO_PFC_COUNTERS \
@ -369,6 +397,7 @@ static const struct counter_desc sq_stats_desc[] = {
NUM_PPORT_2819_COUNTERS + \
NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * \
NUM_PPORT_PRIO)
#define NUM_PCIE_COUNTERS (NUM_PCIE_PERF_COUNTERS + NUM_PCIE_TAS_COUNTERS)
#define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc)
#define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc)
@ -377,6 +406,24 @@ struct mlx5e_stats {
struct mlx5e_qcounter_stats qcnt;
struct mlx5e_vport_stats vport;
struct mlx5e_pport_stats pport;
struct mlx5e_pcie_stats pcie;
};
static const struct counter_desc mlx5e_pme_status_desc[] = {
{ "module_plug", 0 },
{ "module_unplug", 8 },
};
static const struct counter_desc mlx5e_pme_error_desc[] = {
{ "module_pwr_budget_exd", 0 }, /* power budget exceed */
{ "module_long_range", 8 }, /* long range for non MLNX cable */
{ "module_bus_stuck", 16 }, /* bus stuck (I2C or data shorted) */
{ "module_no_eeprom", 24 }, /* no eeprom/retry time out */
{ "module_enforce_part", 32 }, /* enforce part number list */
{ "module_unknown_id", 40 }, /* unknown identifier */
{ "module_high_temp", 48 }, /* high temperature */
{ "module_bad_shorted", 56 }, /* bad or shorted cable/module */
{ "module_unknown_status", 64 },
};
#endif /* __MLX5_EN_STATS_H__ */

View File

@ -139,6 +139,8 @@ static const char *eqe_type_str(u8 type)
return "MLX5_EVENT_TYPE_PORT_CHANGE";
case MLX5_EVENT_TYPE_GPIO_EVENT:
return "MLX5_EVENT_TYPE_GPIO_EVENT";
case MLX5_EVENT_TYPE_PORT_MODULE_EVENT:
return "MLX5_EVENT_TYPE_PORT_MODULE_EVENT";
case MLX5_EVENT_TYPE_REMOTE_CONFIG:
return "MLX5_EVENT_TYPE_REMOTE_CONFIG";
case MLX5_EVENT_TYPE_DB_BF_CONGESTION:
@ -285,6 +287,11 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
mlx5_eswitch_vport_event(dev->priv.eswitch, eqe);
break;
#endif
case MLX5_EVENT_TYPE_PORT_MODULE_EVENT:
mlx5_port_module_event(dev, eqe);
break;
default:
mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n",
eqe->type, eq->eqn);
@ -480,6 +487,11 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
mlx5_core_is_pf(dev))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_NIC_VPORT_CHANGE);
if (MLX5_CAP_GEN(dev, port_module_event))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_PORT_MODULE_EVENT);
else
mlx5_core_dbg(dev, "port_module_event is not set\n");
err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD,
MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD,
"mlx5_cmd_eq", &dev->priv.uuari.uars[0]);

View File

@ -175,6 +175,41 @@ static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili)
return err;
}
static void mlx5_set_driver_version(struct mlx5_core_dev *dev)
{
int driver_ver_sz = MLX5_FLD_SZ_BYTES(set_driver_version_in,
driver_version);
u8 in[MLX5_ST_SZ_BYTES(set_driver_version_in)] = {0};
u8 out[MLX5_ST_SZ_BYTES(set_driver_version_out)] = {0};
int remaining_size = driver_ver_sz;
char *string;
if (!MLX5_CAP_GEN(dev, driver_version))
return;
string = MLX5_ADDR_OF(set_driver_version_in, in, driver_version);
strncpy(string, "Linux", remaining_size);
remaining_size = max_t(int, 0, driver_ver_sz - strlen(string));
strncat(string, ",", remaining_size);
remaining_size = max_t(int, 0, driver_ver_sz - strlen(string));
strncat(string, DRIVER_NAME, remaining_size);
remaining_size = max_t(int, 0, driver_ver_sz - strlen(string));
strncat(string, ",", remaining_size);
remaining_size = max_t(int, 0, driver_ver_sz - strlen(string));
strncat(string, DRIVER_VERSION, remaining_size);
/*Send the command*/
MLX5_SET(set_driver_version_in, in, opcode,
MLX5_CMD_OP_SET_DRIVER_VERSION);
mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
static int set_dma_caps(struct pci_dev *pdev)
{
int err;
@ -1015,6 +1050,8 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
goto err_pagealloc_stop;
}
mlx5_set_driver_version(dev);
mlx5_start_health_poll(dev);
err = mlx5_query_hca_caps(dev);

View File

@ -81,6 +81,7 @@ int mlx5_cmd_init_hca(struct mlx5_core_dev *dev);
int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev);
void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
unsigned long param);
void mlx5_port_module_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe);
void mlx5_enter_error_state(struct mlx5_core_dev *dev);
void mlx5_disable_device(struct mlx5_core_dev *dev);
void mlx5_recover_device(struct mlx5_core_dev *dev);

View File

@ -746,3 +746,60 @@ void mlx5_query_port_fcs(struct mlx5_core_dev *mdev, bool *supported,
*supported = !!(MLX5_GET(pcmr_reg, out, fcs_cap));
*enabled = !!(MLX5_GET(pcmr_reg, out, fcs_chk));
}
static const char *mlx5_pme_status[MLX5_MODULE_STATUS_NUM] = {
"Cable plugged", /* MLX5_MODULE_STATUS_PLUGGED = 0x1 */
"Cable unplugged", /* MLX5_MODULE_STATUS_UNPLUGGED = 0x2 */
"Cable error", /* MLX5_MODULE_STATUS_ERROR = 0x3 */
};
static const char *mlx5_pme_error[MLX5_MODULE_EVENT_ERROR_NUM] = {
"Power budget exceeded",
"Long Range for non MLNX cable",
"Bus stuck(I2C or data shorted)",
"No EEPROM/retry timeout",
"Enforce part number list",
"Unknown identifier",
"High Temperature",
"Bad or shorted cable/module",
"Unknown status",
};
void mlx5_port_module_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe)
{
enum port_module_event_status_type module_status;
enum port_module_event_error_type error_type;
struct mlx5_eqe_port_module *module_event_eqe;
struct mlx5_priv *priv = &dev->priv;
u8 module_num;
module_event_eqe = &eqe->data.port_module;
module_num = module_event_eqe->module;
module_status = module_event_eqe->module_status &
PORT_MODULE_EVENT_MODULE_STATUS_MASK;
error_type = module_event_eqe->error_type &
PORT_MODULE_EVENT_ERROR_TYPE_MASK;
if (module_status < MLX5_MODULE_STATUS_ERROR) {
priv->pme_stats.status_counters[module_status - 1]++;
} else if (module_status == MLX5_MODULE_STATUS_ERROR) {
if (error_type >= MLX5_MODULE_EVENT_ERROR_UNKNOWN)
/* Unknown error type */
error_type = MLX5_MODULE_EVENT_ERROR_UNKNOWN;
priv->pme_stats.error_counters[error_type]++;
}
if (!printk_ratelimit())
return;
if (module_status < MLX5_MODULE_STATUS_ERROR)
mlx5_core_info(dev,
"Port module event: module %u, %s\n",
module_num, mlx5_pme_status[module_status - 1]);
else if (module_status == MLX5_MODULE_STATUS_ERROR)
mlx5_core_info(dev,
"Port module event[error]: module %u, %s, %s\n",
module_num, mlx5_pme_status[module_status - 1],
mlx5_pme_error[error_type]);
}

View File

@ -277,6 +277,7 @@ enum mlx5_event {
MLX5_EVENT_TYPE_INTERNAL_ERROR = 0x08,
MLX5_EVENT_TYPE_PORT_CHANGE = 0x09,
MLX5_EVENT_TYPE_GPIO_EVENT = 0x15,
MLX5_EVENT_TYPE_PORT_MODULE_EVENT = 0x16,
MLX5_EVENT_TYPE_REMOTE_CONFIG = 0x19,
MLX5_EVENT_TYPE_DB_BF_CONGESTION = 0x1a,
@ -552,6 +553,15 @@ struct mlx5_eqe_vport_change {
__be32 rsvd1[6];
} __packed;
struct mlx5_eqe_port_module {
u8 reserved_at_0[1];
u8 module;
u8 reserved_at_2[1];
u8 module_status;
u8 reserved_at_4[2];
u8 error_type;
} __packed;
union ev_data {
__be32 raw[7];
struct mlx5_eqe_cmd cmd;
@ -565,6 +575,7 @@ union ev_data {
struct mlx5_eqe_page_req req_pages;
struct mlx5_eqe_page_fault page_fault;
struct mlx5_eqe_vport_change vport_change;
struct mlx5_eqe_port_module port_module;
} __packed;
struct mlx5_eqe {
@ -1060,6 +1071,11 @@ enum {
MLX5_INFINIBAND_PORT_COUNTERS_GROUP = 0x20,
};
enum {
MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP = 0x0,
MLX5_PCIE_TIMERS_AND_STATES_COUNTERS_GROUP = 0x2,
};
static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz)
{
if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE)

View File

@ -121,6 +121,7 @@ enum {
MLX5_REG_HOST_ENDIANNESS = 0x7004,
MLX5_REG_MCIA = 0x9014,
MLX5_REG_MLCR = 0x902b,
MLX5_REG_MPCNT = 0x9051,
};
enum {
@ -208,7 +209,7 @@ struct mlx5_cmd_first {
struct mlx5_cmd_msg {
struct list_head list;
struct cache_ent *cache;
struct cmd_msg_cache *parent;
u32 len;
struct mlx5_cmd_first first;
struct mlx5_cmd_mailbox *next;
@ -228,17 +229,17 @@ struct mlx5_cmd_debug {
u16 outlen;
};
struct cache_ent {
struct cmd_msg_cache {
/* protect block chain allocations
*/
spinlock_t lock;
struct list_head head;
unsigned int max_inbox_size;
unsigned int num_ent;
};
struct cmd_msg_cache {
struct cache_ent large;
struct cache_ent med;
enum {
MLX5_NUM_COMMAND_CACHES = 5,
};
struct mlx5_cmd_stats {
@ -281,7 +282,7 @@ struct mlx5_cmd {
struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS];
struct pci_pool *pool;
struct mlx5_cmd_debug dbg;
struct cmd_msg_cache cache;
struct cmd_msg_cache cache[MLX5_NUM_COMMAND_CACHES];
int checksum_disabled;
struct mlx5_cmd_stats stats[MLX5_CMD_OP_MAX];
};
@ -498,6 +499,31 @@ struct mlx5_rl_table {
struct mlx5_rl_entry *rl_entry;
};
enum port_module_event_status_type {
MLX5_MODULE_STATUS_PLUGGED = 0x1,
MLX5_MODULE_STATUS_UNPLUGGED = 0x2,
MLX5_MODULE_STATUS_ERROR = 0x3,
MLX5_MODULE_STATUS_NUM = 0x3,
};
enum port_module_event_error_type {
MLX5_MODULE_EVENT_ERROR_POWER_BUDGET_EXCEEDED,
MLX5_MODULE_EVENT_ERROR_LONG_RANGE_FOR_NON_MLNX_CABLE_MODULE,
MLX5_MODULE_EVENT_ERROR_BUS_STUCK,
MLX5_MODULE_EVENT_ERROR_NO_EEPROM_RETRY_TIMEOUT,
MLX5_MODULE_EVENT_ERROR_ENFORCE_PART_NUMBER_LIST,
MLX5_MODULE_EVENT_ERROR_UNKNOWN_IDENTIFIER,
MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE,
MLX5_MODULE_EVENT_ERROR_BAD_CABLE,
MLX5_MODULE_EVENT_ERROR_UNKNOWN,
MLX5_MODULE_EVENT_ERROR_NUM,
};
struct mlx5_port_module_event_stats {
u64 status_counters[MLX5_MODULE_STATUS_NUM];
u64 error_counters[MLX5_MODULE_EVENT_ERROR_NUM];
};
struct mlx5_priv {
char name[MLX5_MAX_NAME_LEN];
struct mlx5_eq_table eq_table;
@ -559,6 +585,8 @@ struct mlx5_priv {
unsigned long pci_dev_data;
struct mlx5_fc_stats fc_stats;
struct mlx5_rl_table rl_table;
struct mlx5_port_module_event_stats pme_stats;
};
enum mlx5_device_state {

View File

@ -83,6 +83,7 @@ enum {
MLX5_CMD_OP_SET_HCA_CAP = 0x109,
MLX5_CMD_OP_QUERY_ISSI = 0x10a,
MLX5_CMD_OP_SET_ISSI = 0x10b,
MLX5_CMD_OP_SET_DRIVER_VERSION = 0x10d,
MLX5_CMD_OP_CREATE_MKEY = 0x200,
MLX5_CMD_OP_QUERY_MKEY = 0x201,
MLX5_CMD_OP_DESTROY_MKEY = 0x202,
@ -824,7 +825,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 early_vf_enable[0x1];
u8 reserved_at_1a9[0x2];
u8 local_ca_ack_delay[0x5];
u8 reserved_at_1af[0x2];
u8 port_module_event[0x1];
u8 reserved_at_1b0[0x1];
u8 ports_check[0x1];
u8 reserved_at_1b2[0x1];
u8 disable_link_up[0x1];
@ -908,7 +910,7 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 log_pg_sz[0x8];
u8 bf[0x1];
u8 reserved_at_261[0x1];
u8 driver_version[0x1];
u8 pad_tx_eth_packet[0x1];
u8 reserved_at_263[0x8];
u8 log_bf_reg_size[0x5];
@ -1755,6 +1757,80 @@ struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits {
u8 reserved_at_4c0[0x300];
};
struct mlx5_ifc_pcie_perf_cntrs_grp_data_layout_bits {
u8 life_time_counter_high[0x20];
u8 life_time_counter_low[0x20];
u8 rx_errors[0x20];
u8 tx_errors[0x20];
u8 l0_to_recovery_eieos[0x20];
u8 l0_to_recovery_ts[0x20];
u8 l0_to_recovery_framing[0x20];
u8 l0_to_recovery_retrain[0x20];
u8 crc_error_dllp[0x20];
u8 crc_error_tlp[0x20];
u8 reserved_at_140[0x680];
};
struct mlx5_ifc_pcie_tas_cntrs_grp_data_layout_bits {
u8 life_time_counter_high[0x20];
u8 life_time_counter_low[0x20];
u8 time_to_boot_image_start[0x20];
u8 time_to_link_image[0x20];
u8 calibration_time[0x20];
u8 time_to_first_perst[0x20];
u8 time_to_detect_state[0x20];
u8 time_to_l0[0x20];
u8 time_to_crs_en[0x20];
u8 time_to_plastic_image_start[0x20];
u8 time_to_iron_image_start[0x20];
u8 perst_handler[0x20];
u8 times_in_l1[0x20];
u8 times_in_l23[0x20];
u8 dl_down[0x20];
u8 config_cycle1usec[0x20];
u8 config_cycle2to7usec[0x20];
u8 config_cycle_8to15usec[0x20];
u8 config_cycle_16_to_63usec[0x20];
u8 config_cycle_64usec[0x20];
u8 correctable_err_msg_sent[0x20];
u8 non_fatal_err_msg_sent[0x20];
u8 fatal_err_msg_sent[0x20];
u8 reserved_at_2e0[0x4e0];
};
struct mlx5_ifc_cmd_inter_comp_event_bits {
u8 command_completion_vector[0x20];
@ -2919,6 +2995,12 @@ union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits {
u8 reserved_at_0[0x7c0];
};
union mlx5_ifc_pcie_cntrs_grp_data_layout_auto_bits {
struct mlx5_ifc_pcie_perf_cntrs_grp_data_layout_bits pcie_perf_cntrs_grp_data_layout;
struct mlx5_ifc_pcie_tas_cntrs_grp_data_layout_bits pcie_tas_cntrs_grp_data_layout;
u8 reserved_at_0[0x7c0];
};
union mlx5_ifc_event_auto_bits {
struct mlx5_ifc_comp_event_bits comp_event;
struct mlx5_ifc_dct_events_bits dct_events;
@ -4004,6 +4086,25 @@ struct mlx5_ifc_query_issi_in_bits {
u8 reserved_at_40[0x40];
};
struct mlx5_ifc_set_driver_version_out_bits {
u8 status[0x8];
u8 reserved_0[0x18];
u8 syndrome[0x20];
u8 reserved_1[0x40];
};
struct mlx5_ifc_set_driver_version_in_bits {
u8 opcode[0x10];
u8 reserved_0[0x10];
u8 reserved_1[0x10];
u8 op_mod[0x10];
u8 reserved_2[0x40];
u8 driver_version[64][0x8];
};
struct mlx5_ifc_query_hca_vport_pkey_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@ -7219,6 +7320,18 @@ struct mlx5_ifc_ppcnt_reg_bits {
union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits counter_set;
};
struct mlx5_ifc_mpcnt_reg_bits {
u8 reserved_at_0[0x8];
u8 pcie_index[0x8];
u8 reserved_at_10[0xa];
u8 grp[0x6];
u8 clr[0x1];
u8 reserved_at_21[0x1f];
union mlx5_ifc_pcie_cntrs_grp_data_layout_auto_bits counter_set;
};
struct mlx5_ifc_ppad_reg_bits {
u8 reserved_at_0[0x3];
u8 single_mac[0x1];
@ -7824,6 +7937,7 @@ union mlx5_ifc_ports_control_registers_document_bits {
struct mlx5_ifc_pmtu_reg_bits pmtu_reg;
struct mlx5_ifc_ppad_reg_bits ppad_reg;
struct mlx5_ifc_ppcnt_reg_bits ppcnt_reg;
struct mlx5_ifc_mpcnt_reg_bits mpcnt_reg;
struct mlx5_ifc_pplm_reg_bits pplm_reg;
struct mlx5_ifc_pplr_reg_bits pplr_reg;
struct mlx5_ifc_ppsc_reg_bits ppsc_reg;

View File

@ -94,6 +94,9 @@ enum mlx5e_link_mode {
#define MLX5E_PROT_MASK(link_mode) (1 << link_mode)
#define PORT_MODULE_EVENT_MODULE_STATUS_MASK 0xF
#define PORT_MODULE_EVENT_ERROR_TYPE_MASK 0xF
int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps);
int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
int ptys_size, int proto_mask, u8 local_port);