mlx5-updates-2017-06-16
This series provide some updates and cleanups for mlx5 core and netdevice driver. From Eli Cohen, add a missing event string. From Or Gerlitz, some checkpatch cleanups. From Moni, Disalbe HW level LAG when SRIOV is enabled. From Tariq, A code reuse cleanup in aRFS flow. From Itay Aveksis, Typo fix. From Gal Pressman, ethtool statistics updates and "update stats" deferred work optimizations. From Majd Dibbiny, Fast unload support on kernel shutdown. -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEcBAABAgAGBQJZQv0+AAoJEEg/ir3gV/o+1iQH/15I5Pr9KoCWSTN9aUglRupU 8HmJhkf7Novaro6WtIybgMGkdoNTrmHgyTEngAkRq5a5Ws/LrC/1wLH+lVMDh+Fx /2a5cfPsK483gHWBtAbasBD8SHnsyTIeVnEhuDsevHQNkz3HGuKOgx5ZHF1sdkHU bj/QU06LNPKAlMDI/wKod13MB4+AdTFemaJRCCgXFvu/p/EfVvB+TStdOsrxj1kx lDIwkCykJSJsg38HoLXt7Z12nWwgHGf2De04RukKeJ6C6KTdKcUu5EYbaL9BSZZT jiIayYjRgeXzNhY4R5yLPc0FkecNIgC90YJShUN3nR3PWa+ytaHpfJQPOS4/AW8= =Tjmk -----END PGP SIGNATURE----- Merge tag 'mlx5-updates-2017-06-16' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux Saeed Mahameed says: ==================== Mellanox mlx5 updates and cleanups 2017-06-16 mlx5-updates-2017-06-16 This series provide some updates and cleanups for mlx5 core and netdevice driver. From Eli Cohen, add a missing event string. From Or Gerlitz, some checkpatch cleanups. From Moni, Disalbe HW level LAG when SRIOV is enabled. From Tariq, A code reuse cleanup in aRFS flow. From Itay Aveksis, Typo fix. From Gal Pressman, ethtool statistics updates and "update stats" deferred work optimizations. From Majd Dibbiny, Fast unload support on kernel shutdown. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
273889e306
|
@ -439,7 +439,7 @@ static void get_atomic_caps(struct mlx5_ib_dev *dev,
|
||||||
u8 atomic_operations = MLX5_CAP_ATOMIC(dev->mdev, atomic_operations);
|
u8 atomic_operations = MLX5_CAP_ATOMIC(dev->mdev, atomic_operations);
|
||||||
u8 atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp);
|
u8 atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp);
|
||||||
u8 atomic_req_8B_endianness_mode =
|
u8 atomic_req_8B_endianness_mode =
|
||||||
MLX5_CAP_ATOMIC(dev->mdev, atomic_req_8B_endianess_mode);
|
MLX5_CAP_ATOMIC(dev->mdev, atomic_req_8B_endianness_mode);
|
||||||
|
|
||||||
/* Check if HW supports 8 bytes standard atomic operations and capable
|
/* Check if HW supports 8 bytes standard atomic operations and capable
|
||||||
* of host endianness respond
|
* of host endianness respond
|
||||||
|
|
|
@ -274,7 +274,6 @@ void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(mlx5_db_free);
|
EXPORT_SYMBOL_GPL(mlx5_db_free);
|
||||||
|
|
||||||
|
|
||||||
void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas)
|
void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas)
|
||||||
{
|
{
|
||||||
u64 addr;
|
u64 addr;
|
||||||
|
|
|
@ -217,7 +217,6 @@ static void free_cmd(struct mlx5_cmd_work_ent *ent)
|
||||||
kfree(ent);
|
kfree(ent);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static int verify_signature(struct mlx5_cmd_work_ent *ent)
|
static int verify_signature(struct mlx5_cmd_work_ent *ent)
|
||||||
{
|
{
|
||||||
struct mlx5_cmd_mailbox *next = ent->out->next;
|
struct mlx5_cmd_mailbox *next = ent->out->next;
|
||||||
|
@ -786,6 +785,8 @@ static void cmd_work_handler(struct work_struct *work)
|
||||||
struct mlx5_cmd_layout *lay;
|
struct mlx5_cmd_layout *lay;
|
||||||
struct semaphore *sem;
|
struct semaphore *sem;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
bool poll_cmd = ent->polling;
|
||||||
|
|
||||||
|
|
||||||
sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
|
sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
|
||||||
down(sem);
|
down(sem);
|
||||||
|
@ -846,7 +847,7 @@ static void cmd_work_handler(struct work_struct *work)
|
||||||
iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell);
|
iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell);
|
||||||
mmiowb();
|
mmiowb();
|
||||||
/* if not in polling don't use ent after this point */
|
/* if not in polling don't use ent after this point */
|
||||||
if (cmd->mode == CMD_MODE_POLLING) {
|
if (cmd->mode == CMD_MODE_POLLING || poll_cmd) {
|
||||||
poll_timeout(ent);
|
poll_timeout(ent);
|
||||||
/* make sure we read the descriptor after ownership is SW */
|
/* make sure we read the descriptor after ownership is SW */
|
||||||
rmb();
|
rmb();
|
||||||
|
@ -874,7 +875,7 @@ static const char *deliv_status_to_str(u8 status)
|
||||||
case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR:
|
case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR:
|
||||||
return "command input length error";
|
return "command input length error";
|
||||||
case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR:
|
case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR:
|
||||||
return "command ouput length error";
|
return "command output length error";
|
||||||
case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR:
|
case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR:
|
||||||
return "reserved fields not cleared";
|
return "reserved fields not cleared";
|
||||||
case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR:
|
case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR:
|
||||||
|
@ -890,7 +891,7 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
|
||||||
struct mlx5_cmd *cmd = &dev->cmd;
|
struct mlx5_cmd *cmd = &dev->cmd;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
if (cmd->mode == CMD_MODE_POLLING) {
|
if (cmd->mode == CMD_MODE_POLLING || ent->polling) {
|
||||||
wait_for_completion(&ent->done);
|
wait_for_completion(&ent->done);
|
||||||
} else if (!wait_for_completion_timeout(&ent->done, timeout)) {
|
} else if (!wait_for_completion_timeout(&ent->done, timeout)) {
|
||||||
ent->ret = -ETIMEDOUT;
|
ent->ret = -ETIMEDOUT;
|
||||||
|
@ -918,7 +919,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
|
||||||
struct mlx5_cmd_msg *out, void *uout, int uout_size,
|
struct mlx5_cmd_msg *out, void *uout, int uout_size,
|
||||||
mlx5_cmd_cbk_t callback,
|
mlx5_cmd_cbk_t callback,
|
||||||
void *context, int page_queue, u8 *status,
|
void *context, int page_queue, u8 *status,
|
||||||
u8 token)
|
u8 token, bool force_polling)
|
||||||
{
|
{
|
||||||
struct mlx5_cmd *cmd = &dev->cmd;
|
struct mlx5_cmd *cmd = &dev->cmd;
|
||||||
struct mlx5_cmd_work_ent *ent;
|
struct mlx5_cmd_work_ent *ent;
|
||||||
|
@ -936,6 +937,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
|
||||||
return PTR_ERR(ent);
|
return PTR_ERR(ent);
|
||||||
|
|
||||||
ent->token = token;
|
ent->token = token;
|
||||||
|
ent->polling = force_polling;
|
||||||
|
|
||||||
if (!callback)
|
if (!callback)
|
||||||
init_completion(&ent->done);
|
init_completion(&ent->done);
|
||||||
|
@ -1001,7 +1003,6 @@ static ssize_t dbg_write(struct file *filp, const char __user *buf,
|
||||||
return err ? err : count;
|
return err ? err : count;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static const struct file_operations fops = {
|
static const struct file_operations fops = {
|
||||||
.owner = THIS_MODULE,
|
.owner = THIS_MODULE,
|
||||||
.open = simple_open,
|
.open = simple_open,
|
||||||
|
@ -1153,7 +1154,7 @@ err_alloc:
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev,
|
static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev,
|
||||||
struct mlx5_cmd_msg *msg)
|
struct mlx5_cmd_msg *msg)
|
||||||
{
|
{
|
||||||
struct mlx5_cmd_mailbox *head = msg->next;
|
struct mlx5_cmd_mailbox *head = msg->next;
|
||||||
struct mlx5_cmd_mailbox *next;
|
struct mlx5_cmd_mailbox *next;
|
||||||
|
@ -1537,7 +1538,8 @@ static int is_manage_pages(void *in)
|
||||||
}
|
}
|
||||||
|
|
||||||
static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
|
static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
|
||||||
int out_size, mlx5_cmd_cbk_t callback, void *context)
|
int out_size, mlx5_cmd_cbk_t callback, void *context,
|
||||||
|
bool force_polling)
|
||||||
{
|
{
|
||||||
struct mlx5_cmd_msg *inb;
|
struct mlx5_cmd_msg *inb;
|
||||||
struct mlx5_cmd_msg *outb;
|
struct mlx5_cmd_msg *outb;
|
||||||
|
@ -1582,7 +1584,7 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
|
||||||
}
|
}
|
||||||
|
|
||||||
err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context,
|
err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context,
|
||||||
pages_queue, &status, token);
|
pages_queue, &status, token, force_polling);
|
||||||
if (err)
|
if (err)
|
||||||
goto out_out;
|
goto out_out;
|
||||||
|
|
||||||
|
@ -1610,7 +1612,7 @@ int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
|
||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL);
|
err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, false);
|
||||||
return err ? : mlx5_cmd_check(dev, in, out);
|
return err ? : mlx5_cmd_check(dev, in, out);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(mlx5_cmd_exec);
|
EXPORT_SYMBOL(mlx5_cmd_exec);
|
||||||
|
@ -1619,10 +1621,22 @@ int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
|
||||||
void *out, int out_size, mlx5_cmd_cbk_t callback,
|
void *out, int out_size, mlx5_cmd_cbk_t callback,
|
||||||
void *context)
|
void *context)
|
||||||
{
|
{
|
||||||
return cmd_exec(dev, in, in_size, out, out_size, callback, context);
|
return cmd_exec(dev, in, in_size, out, out_size, callback, context,
|
||||||
|
false);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(mlx5_cmd_exec_cb);
|
EXPORT_SYMBOL(mlx5_cmd_exec_cb);
|
||||||
|
|
||||||
|
int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size,
|
||||||
|
void *out, int out_size)
|
||||||
|
{
|
||||||
|
int err;
|
||||||
|
|
||||||
|
err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, true);
|
||||||
|
|
||||||
|
return err ? : mlx5_cmd_check(dev, in, out);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(mlx5_cmd_exec_polling);
|
||||||
|
|
||||||
static void destroy_msg_cache(struct mlx5_core_dev *dev)
|
static void destroy_msg_cache(struct mlx5_core_dev *dev)
|
||||||
{
|
{
|
||||||
struct cmd_msg_cache *ch;
|
struct cmd_msg_cache *ch;
|
||||||
|
|
|
@ -168,7 +168,6 @@ static ssize_t average_read(struct file *filp, char __user *buf, size_t count,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static ssize_t average_write(struct file *filp, const char __user *buf,
|
static ssize_t average_write(struct file *filp, const char __user *buf,
|
||||||
size_t count, loff_t *pos)
|
size_t count, loff_t *pos)
|
||||||
{
|
{
|
||||||
|
@ -466,7 +465,6 @@ static ssize_t dbg_read(struct file *filp, char __user *buf, size_t count,
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
if (is_str)
|
if (is_str)
|
||||||
ret = snprintf(tbuf, sizeof(tbuf), "%s\n", (const char *)(unsigned long)field);
|
ret = snprintf(tbuf, sizeof(tbuf), "%s\n", (const char *)(unsigned long)field);
|
||||||
else
|
else
|
||||||
|
@ -562,7 +560,6 @@ void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp)
|
||||||
rem_res_tree(qp->dbg);
|
rem_res_tree(qp->dbg);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
|
int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
|
||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
|
|
|
@ -822,7 +822,7 @@ void mlx5e_rx_am(struct mlx5e_rq *rq);
|
||||||
void mlx5e_rx_am_work(struct work_struct *work);
|
void mlx5e_rx_am_work(struct work_struct *work);
|
||||||
struct mlx5e_cq_moder mlx5e_am_get_def_profile(u8 rx_cq_period_mode);
|
struct mlx5e_cq_moder mlx5e_am_get_def_profile(u8 rx_cq_period_mode);
|
||||||
|
|
||||||
void mlx5e_update_stats(struct mlx5e_priv *priv);
|
void mlx5e_update_stats(struct mlx5e_priv *priv, bool full);
|
||||||
|
|
||||||
int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
|
int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
|
||||||
void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv);
|
void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv);
|
||||||
|
|
|
@ -178,6 +178,7 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv,
|
||||||
struct mlx5_flow_destination dest;
|
struct mlx5_flow_destination dest;
|
||||||
MLX5_DECLARE_FLOW_ACT(flow_act);
|
MLX5_DECLARE_FLOW_ACT(flow_act);
|
||||||
struct mlx5_flow_spec *spec;
|
struct mlx5_flow_spec *spec;
|
||||||
|
enum mlx5e_traffic_types tt;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
|
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
|
||||||
|
@ -187,24 +188,16 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv,
|
||||||
}
|
}
|
||||||
|
|
||||||
dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
|
dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
|
||||||
switch (type) {
|
tt = arfs_get_tt(type);
|
||||||
case ARFS_IPV4_TCP:
|
if (tt == -EINVAL) {
|
||||||
dest.tir_num = tir[MLX5E_TT_IPV4_TCP].tirn;
|
netdev_err(priv->netdev, "%s: bad arfs_type: %d\n",
|
||||||
break;
|
__func__, type);
|
||||||
case ARFS_IPV4_UDP:
|
|
||||||
dest.tir_num = tir[MLX5E_TT_IPV4_UDP].tirn;
|
|
||||||
break;
|
|
||||||
case ARFS_IPV6_TCP:
|
|
||||||
dest.tir_num = tir[MLX5E_TT_IPV6_TCP].tirn;
|
|
||||||
break;
|
|
||||||
case ARFS_IPV6_UDP:
|
|
||||||
dest.tir_num = tir[MLX5E_TT_IPV6_UDP].tirn;
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
dest.tir_num = tir[tt].tirn;
|
||||||
|
|
||||||
arfs_t->default_rule = mlx5_add_flow_rules(arfs_t->ft.t, spec,
|
arfs_t->default_rule = mlx5_add_flow_rules(arfs_t->ft.t, spec,
|
||||||
&flow_act,
|
&flow_act,
|
||||||
&dest, 1);
|
&dest, 1);
|
||||||
|
|
|
@ -145,7 +145,6 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb)
|
||||||
int inlen;
|
int inlen;
|
||||||
void *in;
|
void *in;
|
||||||
|
|
||||||
|
|
||||||
inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
|
inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
|
||||||
in = kvzalloc(inlen, GFP_KERNEL);
|
in = kvzalloc(inlen, GFP_KERNEL);
|
||||||
if (!in)
|
if (!in)
|
||||||
|
|
|
@ -311,7 +311,7 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
|
||||||
|
|
||||||
mutex_lock(&priv->state_lock);
|
mutex_lock(&priv->state_lock);
|
||||||
if (test_bit(MLX5E_STATE_OPENED, &priv->state))
|
if (test_bit(MLX5E_STATE_OPENED, &priv->state))
|
||||||
mlx5e_update_stats(priv);
|
mlx5e_update_stats(priv, true);
|
||||||
channels = &priv->channels;
|
channels = &priv->channels;
|
||||||
mutex_unlock(&priv->state_lock);
|
mutex_unlock(&priv->state_lock);
|
||||||
|
|
||||||
|
|
|
@ -170,7 +170,6 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
|
||||||
|
|
||||||
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
|
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
|
||||||
|
|
||||||
|
|
||||||
switch (rule_type) {
|
switch (rule_type) {
|
||||||
case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
|
case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
|
||||||
rule_p = &priv->fs.vlan.untagged_rule;
|
rule_p = &priv->fs.vlan.untagged_rule;
|
||||||
|
|
|
@ -124,7 +124,8 @@ static void mlx5e_update_carrier(struct mlx5e_priv *priv)
|
||||||
u8 port_state;
|
u8 port_state;
|
||||||
|
|
||||||
port_state = mlx5_query_vport_state(mdev,
|
port_state = mlx5_query_vport_state(mdev,
|
||||||
MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT, 0);
|
MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT,
|
||||||
|
0);
|
||||||
|
|
||||||
if (port_state == VPORT_STATE_UP) {
|
if (port_state == VPORT_STATE_UP) {
|
||||||
netdev_info(priv->netdev, "Link up\n");
|
netdev_info(priv->netdev, "Link up\n");
|
||||||
|
@ -243,18 +244,14 @@ static void mlx5e_update_vport_counters(struct mlx5e_priv *priv)
|
||||||
mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
|
mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mlx5e_update_pport_counters(struct mlx5e_priv *priv)
|
static void mlx5e_update_pport_counters(struct mlx5e_priv *priv, bool full)
|
||||||
{
|
{
|
||||||
struct mlx5e_pport_stats *pstats = &priv->stats.pport;
|
struct mlx5e_pport_stats *pstats = &priv->stats.pport;
|
||||||
struct mlx5_core_dev *mdev = priv->mdev;
|
struct mlx5_core_dev *mdev = priv->mdev;
|
||||||
|
u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
|
||||||
int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
|
int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
|
||||||
int prio;
|
int prio;
|
||||||
void *out;
|
void *out;
|
||||||
u32 *in;
|
|
||||||
|
|
||||||
in = kvzalloc(sz, GFP_KERNEL);
|
|
||||||
if (!in)
|
|
||||||
return;
|
|
||||||
|
|
||||||
MLX5_SET(ppcnt_reg, in, local_port, 1);
|
MLX5_SET(ppcnt_reg, in, local_port, 1);
|
||||||
|
|
||||||
|
@ -262,6 +259,9 @@ static void mlx5e_update_pport_counters(struct mlx5e_priv *priv)
|
||||||
MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
|
MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
|
||||||
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
|
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
|
||||||
|
|
||||||
|
if (!full)
|
||||||
|
return;
|
||||||
|
|
||||||
out = pstats->RFC_2863_counters;
|
out = pstats->RFC_2863_counters;
|
||||||
MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
|
MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
|
||||||
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
|
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
|
||||||
|
@ -287,52 +287,55 @@ static void mlx5e_update_pport_counters(struct mlx5e_priv *priv)
|
||||||
mlx5_core_access_reg(mdev, in, sz, out, sz,
|
mlx5_core_access_reg(mdev, in, sz, out, sz,
|
||||||
MLX5_REG_PPCNT, 0, 0);
|
MLX5_REG_PPCNT, 0, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
kvfree(in);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mlx5e_update_q_counter(struct mlx5e_priv *priv)
|
static void mlx5e_update_q_counter(struct mlx5e_priv *priv)
|
||||||
{
|
{
|
||||||
struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
|
struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
|
||||||
|
u32 out[MLX5_ST_SZ_DW(query_q_counter_out)];
|
||||||
|
int err;
|
||||||
|
|
||||||
if (!priv->q_counter)
|
if (!priv->q_counter)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
mlx5_core_query_out_of_buffer(priv->mdev, priv->q_counter,
|
err = mlx5_core_query_q_counter(priv->mdev, priv->q_counter, 0, out, sizeof(out));
|
||||||
&qcnt->rx_out_of_buffer);
|
if (err)
|
||||||
|
return;
|
||||||
|
|
||||||
|
qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out, out, out_of_buffer);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mlx5e_update_pcie_counters(struct mlx5e_priv *priv)
|
static void mlx5e_update_pcie_counters(struct mlx5e_priv *priv)
|
||||||
{
|
{
|
||||||
struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie;
|
struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie;
|
||||||
struct mlx5_core_dev *mdev = priv->mdev;
|
struct mlx5_core_dev *mdev = priv->mdev;
|
||||||
|
u32 in[MLX5_ST_SZ_DW(mpcnt_reg)] = {0};
|
||||||
int sz = MLX5_ST_SZ_BYTES(mpcnt_reg);
|
int sz = MLX5_ST_SZ_BYTES(mpcnt_reg);
|
||||||
void *out;
|
void *out;
|
||||||
u32 *in;
|
|
||||||
|
|
||||||
if (!MLX5_CAP_MCAM_FEATURE(mdev, pcie_performance_group))
|
if (!MLX5_CAP_MCAM_FEATURE(mdev, pcie_performance_group))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
in = kvzalloc(sz, GFP_KERNEL);
|
|
||||||
if (!in)
|
|
||||||
return;
|
|
||||||
|
|
||||||
out = pcie_stats->pcie_perf_counters;
|
out = pcie_stats->pcie_perf_counters;
|
||||||
MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP);
|
MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP);
|
||||||
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
|
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
|
||||||
|
|
||||||
kvfree(in);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void mlx5e_update_stats(struct mlx5e_priv *priv)
|
void mlx5e_update_stats(struct mlx5e_priv *priv, bool full)
|
||||||
{
|
{
|
||||||
mlx5e_update_pcie_counters(priv);
|
if (full)
|
||||||
mlx5e_update_pport_counters(priv);
|
mlx5e_update_pcie_counters(priv);
|
||||||
|
mlx5e_update_pport_counters(priv, full);
|
||||||
mlx5e_update_vport_counters(priv);
|
mlx5e_update_vport_counters(priv);
|
||||||
mlx5e_update_q_counter(priv);
|
mlx5e_update_q_counter(priv);
|
||||||
mlx5e_update_sw_counters(priv);
|
mlx5e_update_sw_counters(priv);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void mlx5e_update_ndo_stats(struct mlx5e_priv *priv)
|
||||||
|
{
|
||||||
|
mlx5e_update_stats(priv, false);
|
||||||
|
}
|
||||||
|
|
||||||
void mlx5e_update_stats_work(struct work_struct *work)
|
void mlx5e_update_stats_work(struct work_struct *work)
|
||||||
{
|
{
|
||||||
struct delayed_work *dwork = to_delayed_work(work);
|
struct delayed_work *dwork = to_delayed_work(work);
|
||||||
|
@ -3067,7 +3070,6 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
|
||||||
*/
|
*/
|
||||||
stats->multicast =
|
stats->multicast =
|
||||||
VPORT_COUNTER_GET(vstats, received_eth_multicast.packets);
|
VPORT_COUNTER_GET(vstats, received_eth_multicast.packets);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mlx5e_set_rx_mode(struct net_device *dev)
|
static void mlx5e_set_rx_mode(struct net_device *dev)
|
||||||
|
@ -3727,7 +3729,7 @@ static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
|
||||||
if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable))
|
if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable))
|
||||||
mlx5_core_warn(mdev, "Self loop back prevention is not supported\n");
|
mlx5_core_warn(mdev, "Self loop back prevention is not supported\n");
|
||||||
if (!MLX5_CAP_GEN(mdev, cq_moderation))
|
if (!MLX5_CAP_GEN(mdev, cq_moderation))
|
||||||
mlx5_core_warn(mdev, "CQ modiration is not supported\n");
|
mlx5_core_warn(mdev, "CQ moderation is not supported\n");
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -3860,7 +3862,7 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
|
||||||
/* set CQE compression */
|
/* set CQE compression */
|
||||||
params->rx_cqe_compress_def = false;
|
params->rx_cqe_compress_def = false;
|
||||||
if (MLX5_CAP_GEN(mdev, cqe_compression) &&
|
if (MLX5_CAP_GEN(mdev, cqe_compression) &&
|
||||||
MLX5_CAP_GEN(mdev, vport_group_manager))
|
MLX5_CAP_GEN(mdev, vport_group_manager))
|
||||||
params->rx_cqe_compress_def = cqe_compress_heuristic(link_speed, pci_bw);
|
params->rx_cqe_compress_def = cqe_compress_heuristic(link_speed, pci_bw);
|
||||||
|
|
||||||
MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS, params->rx_cqe_compress_def);
|
MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS, params->rx_cqe_compress_def);
|
||||||
|
@ -4211,7 +4213,7 @@ static const struct mlx5e_profile mlx5e_nic_profile = {
|
||||||
.cleanup_tx = mlx5e_cleanup_nic_tx,
|
.cleanup_tx = mlx5e_cleanup_nic_tx,
|
||||||
.enable = mlx5e_nic_enable,
|
.enable = mlx5e_nic_enable,
|
||||||
.disable = mlx5e_nic_disable,
|
.disable = mlx5e_nic_disable,
|
||||||
.update_stats = mlx5e_update_stats,
|
.update_stats = mlx5e_update_ndo_stats,
|
||||||
.max_nch = mlx5e_get_max_num_channels,
|
.max_nch = mlx5e_get_max_num_channels,
|
||||||
.rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe,
|
.rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe,
|
||||||
.rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
|
.rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
|
||||||
|
|
|
@ -1019,7 +1019,6 @@ err_destroy_netdev:
|
||||||
mlx5e_destroy_netdev(netdev_priv(netdev));
|
mlx5e_destroy_netdev(netdev_priv(netdev));
|
||||||
kfree(rpriv);
|
kfree(rpriv);
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
|
|
@ -268,7 +268,7 @@ static const struct counter_desc pport_2819_stats_desc[] = {
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct counter_desc pport_phy_statistical_stats_desc[] = {
|
static const struct counter_desc pport_phy_statistical_stats_desc[] = {
|
||||||
{ "rx_symbol_errors_phy", PPORT_PHY_STATISTICAL_OFF(phy_symbol_errors) },
|
{ "rx_pcs_symbol_err_phy", PPORT_PHY_STATISTICAL_OFF(phy_symbol_errors) },
|
||||||
{ "rx_corrected_bits_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits) },
|
{ "rx_corrected_bits_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits) },
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -245,7 +245,7 @@ mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb,
|
||||||
int fsz = skb_frag_size(frag);
|
int fsz = skb_frag_size(frag);
|
||||||
|
|
||||||
dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
|
dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
|
||||||
DMA_TO_DEVICE);
|
DMA_TO_DEVICE);
|
||||||
if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
|
if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
|
|
@ -157,6 +157,8 @@ static const char *eqe_type_str(u8 type)
|
||||||
return "MLX5_EVENT_TYPE_PAGE_FAULT";
|
return "MLX5_EVENT_TYPE_PAGE_FAULT";
|
||||||
case MLX5_EVENT_TYPE_PPS_EVENT:
|
case MLX5_EVENT_TYPE_PPS_EVENT:
|
||||||
return "MLX5_EVENT_TYPE_PPS_EVENT";
|
return "MLX5_EVENT_TYPE_PPS_EVENT";
|
||||||
|
case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE:
|
||||||
|
return "MLX5_EVENT_TYPE_NIC_VPORT_CHANGE";
|
||||||
case MLX5_EVENT_TYPE_FPGA_ERROR:
|
case MLX5_EVENT_TYPE_FPGA_ERROR:
|
||||||
return "MLX5_EVENT_TYPE_FPGA_ERROR";
|
return "MLX5_EVENT_TYPE_FPGA_ERROR";
|
||||||
default:
|
default:
|
||||||
|
@ -189,7 +191,7 @@ static void eq_update_ci(struct mlx5_eq *eq, int arm)
|
||||||
{
|
{
|
||||||
__be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2);
|
__be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2);
|
||||||
u32 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24);
|
u32 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24);
|
||||||
__raw_writel((__force u32) cpu_to_be32(val), addr);
|
__raw_writel((__force u32)cpu_to_be32(val), addr);
|
||||||
/* We still want ordering, just not swabbing, so add a barrier */
|
/* We still want ordering, just not swabbing, so add a barrier */
|
||||||
mb();
|
mb();
|
||||||
}
|
}
|
||||||
|
@ -675,7 +677,6 @@ int mlx5_eq_init(struct mlx5_core_dev *dev)
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void mlx5_eq_cleanup(struct mlx5_core_dev *dev)
|
void mlx5_eq_cleanup(struct mlx5_core_dev *dev)
|
||||||
{
|
{
|
||||||
mlx5_eq_debugfs_cleanup(dev);
|
mlx5_eq_debugfs_cleanup(dev);
|
||||||
|
@ -687,7 +688,6 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
|
||||||
u64 async_event_mask = MLX5_ASYNC_EVENT_MASK;
|
u64 async_event_mask = MLX5_ASYNC_EVENT_MASK;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
|
|
||||||
if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH &&
|
if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH &&
|
||||||
MLX5_CAP_GEN(dev, vport_group_manager) &&
|
MLX5_CAP_GEN(dev, vport_group_manager) &&
|
||||||
mlx5_core_is_pf(dev))
|
mlx5_core_is_pf(dev))
|
||||||
|
|
|
@ -1217,7 +1217,6 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
|
||||||
"vport[%d] configure ingress rules failed, illegal mac with spoofchk\n",
|
"vport[%d] configure ingress rules failed, illegal mac with spoofchk\n",
|
||||||
vport->vport);
|
vport->vport);
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
esw_vport_cleanup_ingress_rules(esw, vport);
|
esw_vport_cleanup_ingress_rules(esw, vport);
|
||||||
|
|
|
@ -691,7 +691,7 @@ mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn)
|
||||||
|
|
||||||
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
|
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
|
||||||
flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
|
flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
|
||||||
&flow_act, &dest, 1);
|
&flow_act, &dest, 1);
|
||||||
if (IS_ERR(flow_rule)) {
|
if (IS_ERR(flow_rule)) {
|
||||||
esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
|
esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -1093,7 +1093,7 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap)
|
||||||
if (err) {
|
if (err) {
|
||||||
esw_warn(esw->dev, "Failed re-creating fast FDB table, err %d\n", err);
|
esw_warn(esw->dev, "Failed re-creating fast FDB table, err %d\n", err);
|
||||||
esw->offloads.encap = !encap;
|
esw->offloads.encap = !encap;
|
||||||
(void) esw_create_offloads_fast_fdb_table(esw);
|
(void)esw_create_offloads_fast_fdb_table(esw);
|
||||||
}
|
}
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
|
@ -104,6 +104,7 @@ struct node_caps {
|
||||||
size_t arr_sz;
|
size_t arr_sz;
|
||||||
long *caps;
|
long *caps;
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct init_tree_node {
|
static struct init_tree_node {
|
||||||
enum fs_node_type type;
|
enum fs_node_type type;
|
||||||
struct init_tree_node *children;
|
struct init_tree_node *children;
|
||||||
|
@ -1858,7 +1859,6 @@ static int create_anchor_flow_table(struct mlx5_flow_steering *steering)
|
||||||
|
|
||||||
static int init_root_ns(struct mlx5_flow_steering *steering)
|
static int init_root_ns(struct mlx5_flow_steering *steering)
|
||||||
{
|
{
|
||||||
|
|
||||||
steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX);
|
steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX);
|
||||||
if (!steering->root_ns)
|
if (!steering->root_ns)
|
||||||
goto cleanup;
|
goto cleanup;
|
||||||
|
|
|
@ -195,3 +195,31 @@ int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev)
|
||||||
MLX5_SET(teardown_hca_in, in, opcode, MLX5_CMD_OP_TEARDOWN_HCA);
|
MLX5_SET(teardown_hca_in, in, opcode, MLX5_CMD_OP_TEARDOWN_HCA);
|
||||||
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
|
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev)
|
||||||
|
{
|
||||||
|
u32 out[MLX5_ST_SZ_DW(teardown_hca_out)] = {0};
|
||||||
|
u32 in[MLX5_ST_SZ_DW(teardown_hca_in)] = {0};
|
||||||
|
int force_state;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
if (!MLX5_CAP_GEN(dev, force_teardown)) {
|
||||||
|
mlx5_core_dbg(dev, "force teardown is not supported in the firmware\n");
|
||||||
|
return -EOPNOTSUPP;
|
||||||
|
}
|
||||||
|
|
||||||
|
MLX5_SET(teardown_hca_in, in, opcode, MLX5_CMD_OP_TEARDOWN_HCA);
|
||||||
|
MLX5_SET(teardown_hca_in, in, profile, MLX5_TEARDOWN_HCA_IN_PROFILE_FORCE_CLOSE);
|
||||||
|
|
||||||
|
ret = mlx5_cmd_exec_polling(dev, in, sizeof(in), out, sizeof(out));
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
force_state = MLX5_GET(teardown_hca_out, out, force_state);
|
||||||
|
if (force_state == MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_FAIL) {
|
||||||
|
mlx5_core_err(dev, "teardown with force mode failed\n");
|
||||||
|
return -EIO;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
|
@ -111,14 +111,14 @@ static int in_fatal(struct mlx5_core_dev *dev)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void mlx5_enter_error_state(struct mlx5_core_dev *dev)
|
void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force)
|
||||||
{
|
{
|
||||||
mutex_lock(&dev->intf_state_mutex);
|
mutex_lock(&dev->intf_state_mutex);
|
||||||
if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
|
if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
|
||||||
goto unlock;
|
goto unlock;
|
||||||
|
|
||||||
mlx5_core_err(dev, "start\n");
|
mlx5_core_err(dev, "start\n");
|
||||||
if (pci_channel_offline(dev->pdev) || in_fatal(dev)) {
|
if (pci_channel_offline(dev->pdev) || in_fatal(dev) || force) {
|
||||||
dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
|
dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
|
||||||
trigger_cmd_completions(dev);
|
trigger_cmd_completions(dev);
|
||||||
}
|
}
|
||||||
|
|
|
@ -61,6 +61,11 @@ struct mlx5_lag {
|
||||||
struct lag_tracker tracker;
|
struct lag_tracker tracker;
|
||||||
struct delayed_work bond_work;
|
struct delayed_work bond_work;
|
||||||
struct notifier_block nb;
|
struct notifier_block nb;
|
||||||
|
|
||||||
|
/* Admin state. Allow lag only if allowed is true
|
||||||
|
* even if network conditions for lag were met
|
||||||
|
*/
|
||||||
|
bool allowed;
|
||||||
};
|
};
|
||||||
|
|
||||||
/* General purpose, use for short periods of time.
|
/* General purpose, use for short periods of time.
|
||||||
|
@ -214,6 +219,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
|
||||||
struct lag_tracker tracker;
|
struct lag_tracker tracker;
|
||||||
u8 v2p_port1, v2p_port2;
|
u8 v2p_port1, v2p_port2;
|
||||||
int i, err;
|
int i, err;
|
||||||
|
bool do_bond;
|
||||||
|
|
||||||
if (!dev0 || !dev1)
|
if (!dev0 || !dev1)
|
||||||
return;
|
return;
|
||||||
|
@ -222,13 +228,9 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
|
||||||
tracker = ldev->tracker;
|
tracker = ldev->tracker;
|
||||||
mutex_unlock(&lag_mutex);
|
mutex_unlock(&lag_mutex);
|
||||||
|
|
||||||
if (tracker.is_bonded && !mlx5_lag_is_bonded(ldev)) {
|
do_bond = tracker.is_bonded && ldev->allowed;
|
||||||
if (mlx5_sriov_is_enabled(dev0) ||
|
|
||||||
mlx5_sriov_is_enabled(dev1)) {
|
|
||||||
mlx5_core_warn(dev0, "LAG is not supported with SRIOV");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
if (do_bond && !mlx5_lag_is_bonded(ldev)) {
|
||||||
for (i = 0; i < MLX5_MAX_PORTS; i++)
|
for (i = 0; i < MLX5_MAX_PORTS; i++)
|
||||||
mlx5_remove_dev_by_protocol(ldev->pf[i].dev,
|
mlx5_remove_dev_by_protocol(ldev->pf[i].dev,
|
||||||
MLX5_INTERFACE_PROTOCOL_IB);
|
MLX5_INTERFACE_PROTOCOL_IB);
|
||||||
|
@ -237,7 +239,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
|
||||||
|
|
||||||
mlx5_add_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB);
|
mlx5_add_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB);
|
||||||
mlx5_nic_vport_enable_roce(dev1);
|
mlx5_nic_vport_enable_roce(dev1);
|
||||||
} else if (tracker.is_bonded && mlx5_lag_is_bonded(ldev)) {
|
} else if (do_bond && mlx5_lag_is_bonded(ldev)) {
|
||||||
mlx5_infer_tx_affinity_mapping(&tracker, &v2p_port1,
|
mlx5_infer_tx_affinity_mapping(&tracker, &v2p_port1,
|
||||||
&v2p_port2);
|
&v2p_port2);
|
||||||
|
|
||||||
|
@ -252,7 +254,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
|
||||||
"Failed to modify LAG (%d)\n",
|
"Failed to modify LAG (%d)\n",
|
||||||
err);
|
err);
|
||||||
}
|
}
|
||||||
} else if (!tracker.is_bonded && mlx5_lag_is_bonded(ldev)) {
|
} else if (!do_bond && mlx5_lag_is_bonded(ldev)) {
|
||||||
mlx5_remove_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB);
|
mlx5_remove_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB);
|
||||||
mlx5_nic_vport_disable_roce(dev1);
|
mlx5_nic_vport_disable_roce(dev1);
|
||||||
|
|
||||||
|
@ -411,6 +413,15 @@ static int mlx5_lag_netdev_event(struct notifier_block *this,
|
||||||
return NOTIFY_DONE;
|
return NOTIFY_DONE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
|
||||||
|
{
|
||||||
|
if ((ldev->pf[0].dev && mlx5_sriov_is_enabled(ldev->pf[0].dev)) ||
|
||||||
|
(ldev->pf[1].dev && mlx5_sriov_is_enabled(ldev->pf[1].dev)))
|
||||||
|
return false;
|
||||||
|
else
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
static struct mlx5_lag *mlx5_lag_dev_alloc(void)
|
static struct mlx5_lag *mlx5_lag_dev_alloc(void)
|
||||||
{
|
{
|
||||||
struct mlx5_lag *ldev;
|
struct mlx5_lag *ldev;
|
||||||
|
@ -420,6 +431,7 @@ static struct mlx5_lag *mlx5_lag_dev_alloc(void)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
INIT_DELAYED_WORK(&ldev->bond_work, mlx5_do_bond_work);
|
INIT_DELAYED_WORK(&ldev->bond_work, mlx5_do_bond_work);
|
||||||
|
ldev->allowed = mlx5_lag_check_prereq(ldev);
|
||||||
|
|
||||||
return ldev;
|
return ldev;
|
||||||
}
|
}
|
||||||
|
@ -444,7 +456,9 @@ static void mlx5_lag_dev_add_pf(struct mlx5_lag *ldev,
|
||||||
ldev->tracker.netdev_state[fn].link_up = 0;
|
ldev->tracker.netdev_state[fn].link_up = 0;
|
||||||
ldev->tracker.netdev_state[fn].tx_enabled = 0;
|
ldev->tracker.netdev_state[fn].tx_enabled = 0;
|
||||||
|
|
||||||
|
ldev->allowed = mlx5_lag_check_prereq(ldev);
|
||||||
dev->priv.lag = ldev;
|
dev->priv.lag = ldev;
|
||||||
|
|
||||||
mutex_unlock(&lag_mutex);
|
mutex_unlock(&lag_mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -464,10 +478,10 @@ static void mlx5_lag_dev_remove_pf(struct mlx5_lag *ldev,
|
||||||
memset(&ldev->pf[i], 0, sizeof(*ldev->pf));
|
memset(&ldev->pf[i], 0, sizeof(*ldev->pf));
|
||||||
|
|
||||||
dev->priv.lag = NULL;
|
dev->priv.lag = NULL;
|
||||||
|
ldev->allowed = mlx5_lag_check_prereq(ldev);
|
||||||
mutex_unlock(&lag_mutex);
|
mutex_unlock(&lag_mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/* Must be called with intf_mutex held */
|
/* Must be called with intf_mutex held */
|
||||||
void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
|
void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
|
||||||
{
|
{
|
||||||
|
@ -543,6 +557,44 @@ bool mlx5_lag_is_active(struct mlx5_core_dev *dev)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(mlx5_lag_is_active);
|
EXPORT_SYMBOL(mlx5_lag_is_active);
|
||||||
|
|
||||||
|
static int mlx5_lag_set_state(struct mlx5_core_dev *dev, bool allow)
|
||||||
|
{
|
||||||
|
struct mlx5_lag *ldev;
|
||||||
|
int ret = 0;
|
||||||
|
bool lag_active;
|
||||||
|
|
||||||
|
mlx5_dev_list_lock();
|
||||||
|
|
||||||
|
ldev = mlx5_lag_dev_get(dev);
|
||||||
|
if (!ldev) {
|
||||||
|
ret = -ENODEV;
|
||||||
|
goto unlock;
|
||||||
|
}
|
||||||
|
lag_active = mlx5_lag_is_bonded(ldev);
|
||||||
|
if (!mlx5_lag_check_prereq(ldev) && allow) {
|
||||||
|
ret = -EINVAL;
|
||||||
|
goto unlock;
|
||||||
|
}
|
||||||
|
if (ldev->allowed == allow)
|
||||||
|
goto unlock;
|
||||||
|
ldev->allowed = allow;
|
||||||
|
if ((lag_active && !allow) || allow)
|
||||||
|
mlx5_do_bond(ldev);
|
||||||
|
unlock:
|
||||||
|
mlx5_dev_list_unlock();
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
int mlx5_lag_forbid(struct mlx5_core_dev *dev)
|
||||||
|
{
|
||||||
|
return mlx5_lag_set_state(dev, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
int mlx5_lag_allow(struct mlx5_core_dev *dev)
|
||||||
|
{
|
||||||
|
return mlx5_lag_set_state(dev, true);
|
||||||
|
}
|
||||||
|
|
||||||
struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
|
struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
|
||||||
{
|
{
|
||||||
struct net_device *ndev = NULL;
|
struct net_device *ndev = NULL;
|
||||||
|
@ -586,4 +638,3 @@ bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv)
|
||||||
/* If bonded, we do not add an IB device for PF1. */
|
/* If bonded, we do not add an IB device for PF1. */
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -356,12 +356,11 @@ static void mlx5_disable_msix(struct mlx5_core_dev *dev)
|
||||||
kfree(priv->msix_arr);
|
kfree(priv->msix_arr);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct mlx5_reg_host_endianess {
|
struct mlx5_reg_host_endianness {
|
||||||
u8 he;
|
u8 he;
|
||||||
u8 rsvd[15];
|
u8 rsvd[15];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
#define CAP_MASK(pos, size) ((u64)((1 << (size)) - 1) << (pos))
|
#define CAP_MASK(pos, size) ((u64)((1 << (size)) - 1) << (pos))
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
|
@ -475,7 +474,7 @@ static int handle_hca_cap_atomic(struct mlx5_core_dev *dev)
|
||||||
|
|
||||||
req_endianness =
|
req_endianness =
|
||||||
MLX5_CAP_ATOMIC(dev,
|
MLX5_CAP_ATOMIC(dev,
|
||||||
supported_atomic_req_8B_endianess_mode_1);
|
supported_atomic_req_8B_endianness_mode_1);
|
||||||
|
|
||||||
if (req_endianness != MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS)
|
if (req_endianness != MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS)
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -487,7 +486,7 @@ static int handle_hca_cap_atomic(struct mlx5_core_dev *dev)
|
||||||
set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability);
|
set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability);
|
||||||
|
|
||||||
/* Set requestor to host endianness */
|
/* Set requestor to host endianness */
|
||||||
MLX5_SET(atomic_caps, set_hca_cap, atomic_req_8B_endianess_mode,
|
MLX5_SET(atomic_caps, set_hca_cap, atomic_req_8B_endianness_mode,
|
||||||
MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS);
|
MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS);
|
||||||
|
|
||||||
err = set_caps(dev, set_ctx, set_sz, MLX5_SET_HCA_CAP_OP_MOD_ATOMIC);
|
err = set_caps(dev, set_ctx, set_sz, MLX5_SET_HCA_CAP_OP_MOD_ATOMIC);
|
||||||
|
@ -562,8 +561,8 @@ query_ex:
|
||||||
|
|
||||||
static int set_hca_ctrl(struct mlx5_core_dev *dev)
|
static int set_hca_ctrl(struct mlx5_core_dev *dev)
|
||||||
{
|
{
|
||||||
struct mlx5_reg_host_endianess he_in;
|
struct mlx5_reg_host_endianness he_in;
|
||||||
struct mlx5_reg_host_endianess he_out;
|
struct mlx5_reg_host_endianness he_out;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
if (!mlx5_core_is_pf(dev))
|
if (!mlx5_core_is_pf(dev))
|
||||||
|
@ -1419,7 +1418,7 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
|
||||||
|
|
||||||
dev_info(&pdev->dev, "%s was called\n", __func__);
|
dev_info(&pdev->dev, "%s was called\n", __func__);
|
||||||
|
|
||||||
mlx5_enter_error_state(dev);
|
mlx5_enter_error_state(dev, false);
|
||||||
mlx5_unload_one(dev, priv, false);
|
mlx5_unload_one(dev, priv, false);
|
||||||
/* In case of kernel call drain the health wq */
|
/* In case of kernel call drain the health wq */
|
||||||
if (state) {
|
if (state) {
|
||||||
|
@ -1506,15 +1505,43 @@ static const struct pci_error_handlers mlx5_err_handler = {
|
||||||
.resume = mlx5_pci_resume
|
.resume = mlx5_pci_resume
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static int mlx5_try_fast_unload(struct mlx5_core_dev *dev)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
if (!MLX5_CAP_GEN(dev, force_teardown)) {
|
||||||
|
mlx5_core_dbg(dev, "force teardown is not supported in the firmware\n");
|
||||||
|
return -EOPNOTSUPP;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
|
||||||
|
mlx5_core_dbg(dev, "Device in internal error state, giving up\n");
|
||||||
|
return -EAGAIN;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = mlx5_cmd_force_teardown_hca(dev);
|
||||||
|
if (ret) {
|
||||||
|
mlx5_core_dbg(dev, "Firmware couldn't do fast unload error: %d\n", ret);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
mlx5_enter_error_state(dev, true);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static void shutdown(struct pci_dev *pdev)
|
static void shutdown(struct pci_dev *pdev)
|
||||||
{
|
{
|
||||||
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
|
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
|
||||||
struct mlx5_priv *priv = &dev->priv;
|
struct mlx5_priv *priv = &dev->priv;
|
||||||
|
int err;
|
||||||
|
|
||||||
dev_info(&pdev->dev, "Shutdown was called\n");
|
dev_info(&pdev->dev, "Shutdown was called\n");
|
||||||
/* Notify mlx5 clients that the kernel is being shut down */
|
/* Notify mlx5 clients that the kernel is being shut down */
|
||||||
set_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &dev->intf_state);
|
set_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &dev->intf_state);
|
||||||
mlx5_unload_one(dev, priv, false);
|
err = mlx5_try_fast_unload(dev);
|
||||||
|
if (err)
|
||||||
|
mlx5_unload_one(dev, priv, false);
|
||||||
mlx5_pci_disable_device(dev);
|
mlx5_pci_disable_device(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -83,12 +83,13 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev);
|
||||||
int mlx5_query_board_id(struct mlx5_core_dev *dev);
|
int mlx5_query_board_id(struct mlx5_core_dev *dev);
|
||||||
int mlx5_cmd_init_hca(struct mlx5_core_dev *dev);
|
int mlx5_cmd_init_hca(struct mlx5_core_dev *dev);
|
||||||
int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev);
|
int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev);
|
||||||
|
int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev);
|
||||||
void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
|
void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
|
||||||
unsigned long param);
|
unsigned long param);
|
||||||
void mlx5_core_page_fault(struct mlx5_core_dev *dev,
|
void mlx5_core_page_fault(struct mlx5_core_dev *dev,
|
||||||
struct mlx5_pagefault *pfault);
|
struct mlx5_pagefault *pfault);
|
||||||
void mlx5_port_module_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe);
|
void mlx5_port_module_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe);
|
||||||
void mlx5_enter_error_state(struct mlx5_core_dev *dev);
|
void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force);
|
||||||
void mlx5_disable_device(struct mlx5_core_dev *dev);
|
void mlx5_disable_device(struct mlx5_core_dev *dev);
|
||||||
void mlx5_recover_device(struct mlx5_core_dev *dev);
|
void mlx5_recover_device(struct mlx5_core_dev *dev);
|
||||||
int mlx5_sriov_init(struct mlx5_core_dev *dev);
|
int mlx5_sriov_init(struct mlx5_core_dev *dev);
|
||||||
|
@ -167,4 +168,7 @@ static inline int mlx5_lag_is_lacp_owner(struct mlx5_core_dev *dev)
|
||||||
MLX5_CAP_GEN(dev, lag_master);
|
MLX5_CAP_GEN(dev, lag_master);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int mlx5_lag_allow(struct mlx5_core_dev *dev);
|
||||||
|
int mlx5_lag_forbid(struct mlx5_core_dev *dev);
|
||||||
|
|
||||||
#endif /* __MLX5_CORE_H__ */
|
#endif /* __MLX5_CORE_H__ */
|
||||||
|
|
|
@ -403,7 +403,6 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
|
||||||
for (i = 0; i < num_claimed; i++)
|
for (i = 0; i < num_claimed; i++)
|
||||||
free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i]));
|
free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i]));
|
||||||
|
|
||||||
|
|
||||||
if (nclaimed)
|
if (nclaimed)
|
||||||
*nclaimed = num_claimed;
|
*nclaimed = num_claimed;
|
||||||
|
|
||||||
|
|
|
@ -30,7 +30,6 @@
|
||||||
* SOFTWARE.
|
* SOFTWARE.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
|
||||||
#include <linux/gfp.h>
|
#include <linux/gfp.h>
|
||||||
#include <linux/export.h>
|
#include <linux/export.h>
|
||||||
#include <linux/mlx5/cmd.h>
|
#include <linux/mlx5/cmd.h>
|
||||||
|
@ -519,23 +518,3 @@ int mlx5_core_query_q_counter(struct mlx5_core_dev *dev, u16 counter_id,
|
||||||
return mlx5_cmd_exec(dev, in, sizeof(in), out, out_size);
|
return mlx5_cmd_exec(dev, in, sizeof(in), out, out_size);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(mlx5_core_query_q_counter);
|
EXPORT_SYMBOL_GPL(mlx5_core_query_q_counter);
|
||||||
|
|
||||||
int mlx5_core_query_out_of_buffer(struct mlx5_core_dev *dev, u16 counter_id,
|
|
||||||
u32 *out_of_buffer)
|
|
||||||
{
|
|
||||||
int outlen = MLX5_ST_SZ_BYTES(query_q_counter_out);
|
|
||||||
void *out;
|
|
||||||
int err;
|
|
||||||
|
|
||||||
out = kvzalloc(outlen, GFP_KERNEL);
|
|
||||||
if (!out)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
err = mlx5_core_query_q_counter(dev, counter_id, 0, out, outlen);
|
|
||||||
if (!err)
|
|
||||||
*out_of_buffer = MLX5_GET(query_q_counter_out, out,
|
|
||||||
out_of_buffer);
|
|
||||||
|
|
||||||
kfree(out);
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
|
@ -175,15 +175,20 @@ int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs)
|
||||||
if (!mlx5_core_is_pf(dev))
|
if (!mlx5_core_is_pf(dev))
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
|
|
||||||
if (num_vfs && mlx5_lag_is_active(dev)) {
|
if (num_vfs) {
|
||||||
mlx5_core_warn(dev, "can't turn sriov on while LAG is active");
|
int ret;
|
||||||
return -EINVAL;
|
|
||||||
|
ret = mlx5_lag_forbid(dev);
|
||||||
|
if (ret && (ret != -ENODEV))
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (num_vfs)
|
if (num_vfs) {
|
||||||
err = mlx5_sriov_enable(pdev, num_vfs);
|
err = mlx5_sriov_enable(pdev, num_vfs);
|
||||||
else
|
} else {
|
||||||
mlx5_sriov_disable(pdev);
|
mlx5_sriov_disable(pdev);
|
||||||
|
mlx5_lag_allow(dev);
|
||||||
|
}
|
||||||
|
|
||||||
return err ? err : num_vfs;
|
return err ? err : num_vfs;
|
||||||
}
|
}
|
||||||
|
|
|
@ -817,6 +817,7 @@ struct mlx5_cmd_work_ent {
|
||||||
u64 ts1;
|
u64 ts1;
|
||||||
u64 ts2;
|
u64 ts2;
|
||||||
u16 op;
|
u16 op;
|
||||||
|
bool polling;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct mlx5_pas {
|
struct mlx5_pas {
|
||||||
|
@ -915,6 +916,8 @@ int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
|
||||||
int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
|
int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
|
||||||
void *out, int out_size, mlx5_cmd_cbk_t callback,
|
void *out, int out_size, mlx5_cmd_cbk_t callback,
|
||||||
void *context);
|
void *context);
|
||||||
|
int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size,
|
||||||
|
void *out, int out_size);
|
||||||
void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome);
|
void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome);
|
||||||
|
|
||||||
int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type);
|
int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type);
|
||||||
|
|
|
@ -661,9 +661,9 @@ enum {
|
||||||
struct mlx5_ifc_atomic_caps_bits {
|
struct mlx5_ifc_atomic_caps_bits {
|
||||||
u8 reserved_at_0[0x40];
|
u8 reserved_at_0[0x40];
|
||||||
|
|
||||||
u8 atomic_req_8B_endianess_mode[0x2];
|
u8 atomic_req_8B_endianness_mode[0x2];
|
||||||
u8 reserved_at_42[0x4];
|
u8 reserved_at_42[0x4];
|
||||||
u8 supported_atomic_req_8B_endianess_mode_1[0x1];
|
u8 supported_atomic_req_8B_endianness_mode_1[0x1];
|
||||||
|
|
||||||
u8 reserved_at_47[0x19];
|
u8 reserved_at_47[0x19];
|
||||||
|
|
||||||
|
@ -801,7 +801,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
|
||||||
u8 max_indirection[0x8];
|
u8 max_indirection[0x8];
|
||||||
u8 fixed_buffer_size[0x1];
|
u8 fixed_buffer_size[0x1];
|
||||||
u8 log_max_mrw_sz[0x7];
|
u8 log_max_mrw_sz[0x7];
|
||||||
u8 reserved_at_110[0x2];
|
u8 force_teardown[0x1];
|
||||||
|
u8 reserved_at_111[0x1];
|
||||||
u8 log_max_bsf_list_size[0x6];
|
u8 log_max_bsf_list_size[0x6];
|
||||||
u8 umr_extended_translation_offset[0x1];
|
u8 umr_extended_translation_offset[0x1];
|
||||||
u8 null_mkey[0x1];
|
u8 null_mkey[0x1];
|
||||||
|
@ -3094,18 +3095,25 @@ struct mlx5_ifc_tsar_element_bits {
|
||||||
u8 reserved_at_10[0x10];
|
u8 reserved_at_10[0x10];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
enum {
|
||||||
|
MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_SUCCESS = 0x0,
|
||||||
|
MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_FAIL = 0x1,
|
||||||
|
};
|
||||||
|
|
||||||
struct mlx5_ifc_teardown_hca_out_bits {
|
struct mlx5_ifc_teardown_hca_out_bits {
|
||||||
u8 status[0x8];
|
u8 status[0x8];
|
||||||
u8 reserved_at_8[0x18];
|
u8 reserved_at_8[0x18];
|
||||||
|
|
||||||
u8 syndrome[0x20];
|
u8 syndrome[0x20];
|
||||||
|
|
||||||
u8 reserved_at_40[0x40];
|
u8 reserved_at_40[0x3f];
|
||||||
|
|
||||||
|
u8 force_state[0x1];
|
||||||
};
|
};
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
MLX5_TEARDOWN_HCA_IN_PROFILE_GRACEFUL_CLOSE = 0x0,
|
MLX5_TEARDOWN_HCA_IN_PROFILE_GRACEFUL_CLOSE = 0x0,
|
||||||
MLX5_TEARDOWN_HCA_IN_PROFILE_PANIC_CLOSE = 0x1,
|
MLX5_TEARDOWN_HCA_IN_PROFILE_FORCE_CLOSE = 0x1,
|
||||||
};
|
};
|
||||||
|
|
||||||
struct mlx5_ifc_teardown_hca_in_bits {
|
struct mlx5_ifc_teardown_hca_in_bits {
|
||||||
|
|
|
@ -569,8 +569,6 @@ int mlx5_core_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id);
|
||||||
int mlx5_core_dealloc_q_counter(struct mlx5_core_dev *dev, u16 counter_id);
|
int mlx5_core_dealloc_q_counter(struct mlx5_core_dev *dev, u16 counter_id);
|
||||||
int mlx5_core_query_q_counter(struct mlx5_core_dev *dev, u16 counter_id,
|
int mlx5_core_query_q_counter(struct mlx5_core_dev *dev, u16 counter_id,
|
||||||
int reset, void *out, int out_size);
|
int reset, void *out, int out_size);
|
||||||
int mlx5_core_query_out_of_buffer(struct mlx5_core_dev *dev, u16 counter_id,
|
|
||||||
u32 *out_of_buffer);
|
|
||||||
|
|
||||||
static inline const char *mlx5_qp_type_str(int type)
|
static inline const char *mlx5_qp_type_str(int type)
|
||||||
{
|
{
|
||||||
|
|
Loading…
Reference in New Issue