mlx5-fixes-2020-04-29

-----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCAAdFiEEGhZs6bAKwk/OTgTpSD+KveBX+j4FAl6q+tEACgkQSD+KveBX
 +j7PKAgAz5g/voNLiUn2Pz54a3qpYDy4OjpOIpyJeJ5mf0z9fZPRUO1Cx8Y0HY1f
 dpySyZZ93kfl5DfWnMxST8Mk+2p8V0CD7EK1b0YrF3kdnhWoO7aAagJGEy6uc8Ez
 wAuAK3VHs0Ufj1+YFpY9uYyvrAmkwY0WGZbfd1bKxS6D7ttp3OZ95k5o2MlJcpa+
 36IDd3DH8V3jGLU83dtLISw8hLf/0tgCX5CrcelJpls5ge3ZlOTpevcLaoUg1UY4
 CXC1igw1eBe658dC0TR64LPo25GVQCks8Oui4D1AbS3hhvKKePPTqNwyudl6BWP2
 DXJWL6sJVCaX7Y8/7ttmNjDoO5aBxw==
 =kuyA
 -----END PGP SIGNATURE-----

Merge tag 'mlx5-fixes-2020-04-29' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
Mellanox, mlx5 fixes 2020-04-29

This series introduces some fixes to mlx5 driver.

Please pull and let me know if there is any problem.

v2:
 - Dropped the ktls patch, Tariq has to check if it is fixable in the stack

For -stable v4.12
 ('net/mlx5: Fix forced completion access non initialized command entry')
 ('net/mlx5: Fix command entry leak in Internal Error State')

For -stable v5.4
 ('net/mlx5: DR, On creation set CQ's arm_db member to right value')

For -stable v5.6
 ('net/mlx5e: Fix q counters on uplink representors')
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2020-04-30 12:58:11 -07:00
commit 81d6bc44fa
4 changed files with 29 additions and 18 deletions

View File

@ -888,7 +888,6 @@ static void cmd_work_handler(struct work_struct *work)
}
cmd->ent_arr[ent->idx] = ent;
set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state);
lay = get_inst(cmd, ent->idx);
ent->lay = lay;
memset(lay, 0, sizeof(*lay));
@ -910,6 +909,7 @@ static void cmd_work_handler(struct work_struct *work)
if (ent->callback)
schedule_delayed_work(&ent->cb_timeout_work, cb_timeout);
set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state);
/* Skip sending command to fw if internal error */
if (pci_channel_offline(dev->pdev) ||
@ -922,6 +922,10 @@ static void cmd_work_handler(struct work_struct *work)
MLX5_SET(mbox_out, ent->out, syndrome, drv_synd);
mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
/* no doorbell, no need to keep the entry */
free_ent(cmd, ent->idx);
if (ent->callback)
free_cmd(ent);
return;
}

View File

@ -1773,19 +1773,14 @@ static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
static int mlx5e_init_ul_rep_rx(struct mlx5e_priv *priv)
{
int err = mlx5e_init_rep_rx(priv);
if (err)
return err;
mlx5e_create_q_counters(priv);
return 0;
return mlx5e_init_rep_rx(priv);
}
static void mlx5e_cleanup_ul_rep_rx(struct mlx5e_priv *priv)
{
mlx5e_destroy_q_counters(priv);
mlx5e_cleanup_rep_rx(priv);
mlx5e_destroy_q_counters(priv);
}
static int mlx5e_init_uplink_rep_tx(struct mlx5e_rep_priv *rpriv)

View File

@ -1550,9 +1550,9 @@ static int esw_create_restore_table(struct mlx5_eswitch *esw)
MLX5_FLOW_NAMESPACE_KERNEL, 1,
modact);
if (IS_ERR(mod_hdr)) {
err = PTR_ERR(mod_hdr);
esw_warn(dev, "Failed to create restore mod header, err: %d\n",
err);
err = PTR_ERR(mod_hdr);
goto err_mod_hdr;
}
@ -2219,10 +2219,12 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
total_vports = num_vfs + MLX5_SPECIAL_VPORTS(esw->dev);
memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
mutex_init(&esw->fdb_table.offloads.vports.lock);
hash_init(esw->fdb_table.offloads.vports.table);
err = esw_create_uplink_offloads_acl_tables(esw);
if (err)
return err;
goto create_acl_err;
err = esw_create_offloads_table(esw, total_vports);
if (err)
@ -2240,9 +2242,6 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
if (err)
goto create_fg_err;
mutex_init(&esw->fdb_table.offloads.vports.lock);
hash_init(esw->fdb_table.offloads.vports.table);
return 0;
create_fg_err:
@ -2253,18 +2252,19 @@ create_restore_err:
esw_destroy_offloads_table(esw);
create_offloads_err:
esw_destroy_uplink_offloads_acl_tables(esw);
create_acl_err:
mutex_destroy(&esw->fdb_table.offloads.vports.lock);
return err;
}
static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
{
mutex_destroy(&esw->fdb_table.offloads.vports.lock);
esw_destroy_vport_rx_group(esw);
esw_destroy_offloads_fdb_tables(esw);
esw_destroy_restore_table(esw);
esw_destroy_offloads_table(esw);
esw_destroy_uplink_offloads_acl_tables(esw);
mutex_destroy(&esw->fdb_table.offloads.vports.lock);
}
static void
@ -2377,9 +2377,9 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
err_vports:
esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
err_uplink:
esw_set_passing_vport_metadata(esw, false);
err_steering_init:
esw_offloads_steering_cleanup(esw);
err_steering_init:
esw_set_passing_vport_metadata(esw, false);
err_vport_metadata:
mlx5_rdma_disable_roce(esw->dev);
mutex_destroy(&esw->offloads.termtbl_mutex);

View File

@ -695,6 +695,12 @@ static void dr_cq_event(struct mlx5_core_cq *mcq,
pr_info("CQ event %u on CQ #%u\n", event, mcq->cqn);
}
static void dr_cq_complete(struct mlx5_core_cq *mcq,
struct mlx5_eqe *eqe)
{
pr_err("CQ completion CQ: #%u\n", mcq->cqn);
}
static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
struct mlx5_uars_page *uar,
size_t ncqe)
@ -756,6 +762,7 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
mlx5_fill_page_frag_array(&cq->wq_ctrl.buf, pas);
cq->mcq.event = dr_cq_event;
cq->mcq.comp = dr_cq_complete;
err = mlx5_core_create_cq(mdev, &cq->mcq, in, inlen, out, sizeof(out));
kvfree(in);
@ -767,7 +774,12 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
cq->mcq.set_ci_db = cq->wq_ctrl.db.db;
cq->mcq.arm_db = cq->wq_ctrl.db.db + 1;
*cq->mcq.set_ci_db = 0;
*cq->mcq.arm_db = 0;
/* set no-zero value, in order to avoid the HW to run db-recovery on
* CQ that used in polling mode.
*/
*cq->mcq.arm_db = cpu_to_be32(2 << 28);
cq->mcq.vector = 0;
cq->mcq.irqn = irqn;
cq->mcq.uar = uar;