mlx5-fixes-2021-07-27
-----BEGIN PGP SIGNATURE----- iQEzBAABCAAdFiEEGhZs6bAKwk/OTgTpSD+KveBX+j4FAmEAkk4ACgkQSD+KveBX +j6hFgf/eIRpqg3mivOXjwo3LF34cP02OBStbb5S5S2MFX60n8Z1GXew42I01iON zUv6RRmtgJiWHedWUYaXnPHE3hqXlX7Vr2fuBXlNJdPG87jZBBK5FLfQCSPBmcyQ ZK778uSlgarBL3kGwPkPLLwMypMgHpTCkvCb9RIOD2HzXPCjftP4IBQwEEV2zyaj d7mtbndGeOyRyEh2RO7ABdJNIAta3Z6EfZCe61mYhVg6c49fkoZlX4/vdX5FhtZM Pvode2ReD49vigpvfDpSSFea6W+qWtTz83/J9Dbnt8byLdzY5VKm3hanv7o41ksg FyLHz08HPJjP6Mwikel/ZyG5Bb5FFw== =lD83 -----END PGP SIGNATURE----- Merge tag 'mlx5-fixes-2021-07-27' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux Saeed Mahameed says: ==================== mlx5 fixes 2021-07-27 This series introduces some fixes to mlx5 driver. Please pull and let me know if there is any problem. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
9d0279d043
|
@ -500,10 +500,7 @@ static int next_phys_dev(struct device *dev, const void *data)
|
|||
return 1;
|
||||
}
|
||||
|
||||
/* This function is called with two flows:
|
||||
* 1. During initialization of mlx5_core_dev and we don't need to lock it.
|
||||
* 2. During LAG configure stage and caller holds &mlx5_intf_mutex.
|
||||
*/
|
||||
/* Must be called with intf_mutex held */
|
||||
struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct auxiliary_device *adev;
|
||||
|
|
|
@ -471,6 +471,15 @@ static void mlx5e_build_rx_cq_param(struct mlx5_core_dev *mdev,
|
|||
param->cq_period_mode = params->rx_cq_moderation.cq_period_mode;
|
||||
}
|
||||
|
||||
static u8 rq_end_pad_mode(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
|
||||
{
|
||||
bool ro = pcie_relaxed_ordering_enabled(mdev->pdev) &&
|
||||
MLX5_CAP_GEN(mdev, relaxed_ordering_write);
|
||||
|
||||
return ro && params->lro_en ?
|
||||
MLX5_WQ_END_PAD_MODE_NONE : MLX5_WQ_END_PAD_MODE_ALIGN;
|
||||
}
|
||||
|
||||
int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
|
||||
struct mlx5e_params *params,
|
||||
struct mlx5e_xsk_param *xsk,
|
||||
|
@ -508,7 +517,7 @@ int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
|
|||
}
|
||||
|
||||
MLX5_SET(wq, wq, wq_type, params->rq_wq_type);
|
||||
MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
|
||||
MLX5_SET(wq, wq, end_padding_mode, rq_end_pad_mode(mdev, params));
|
||||
MLX5_SET(wq, wq, log_wq_stride,
|
||||
mlx5e_get_rqwq_log_stride(params->rq_wq_type, ndsegs));
|
||||
MLX5_SET(wq, wq, pd, mdev->mlx5e_res.hw_objs.pdn);
|
||||
|
|
|
@ -482,8 +482,11 @@ static void mlx5e_ptp_build_params(struct mlx5e_ptp *c,
|
|||
params->log_sq_size = orig->log_sq_size;
|
||||
mlx5e_ptp_build_sq_param(c->mdev, params, &cparams->txq_sq_param);
|
||||
}
|
||||
if (test_bit(MLX5E_PTP_STATE_RX, c->state))
|
||||
/* RQ */
|
||||
if (test_bit(MLX5E_PTP_STATE_RX, c->state)) {
|
||||
params->vlan_strip_disable = orig->vlan_strip_disable;
|
||||
mlx5e_ptp_build_rq_param(c->mdev, c->netdev, c->priv->q_counter, cparams);
|
||||
}
|
||||
}
|
||||
|
||||
static int mlx5e_init_ptp_rq(struct mlx5e_ptp *c, struct mlx5e_params *params,
|
||||
|
@ -494,7 +497,7 @@ static int mlx5e_init_ptp_rq(struct mlx5e_ptp *c, struct mlx5e_params *params,
|
|||
int err;
|
||||
|
||||
rq->wq_type = params->rq_wq_type;
|
||||
rq->pdev = mdev->device;
|
||||
rq->pdev = c->pdev;
|
||||
rq->netdev = priv->netdev;
|
||||
rq->priv = priv;
|
||||
rq->clock = &mdev->clock;
|
||||
|
|
|
@ -37,7 +37,7 @@ static void mlx5e_init_trap_rq(struct mlx5e_trap *t, struct mlx5e_params *params
|
|||
struct mlx5e_priv *priv = t->priv;
|
||||
|
||||
rq->wq_type = params->rq_wq_type;
|
||||
rq->pdev = mdev->device;
|
||||
rq->pdev = t->pdev;
|
||||
rq->netdev = priv->netdev;
|
||||
rq->priv = priv;
|
||||
rq->clock = &mdev->clock;
|
||||
|
|
|
@ -3384,7 +3384,7 @@ static int mlx5e_modify_channels_scatter_fcs(struct mlx5e_channels *chs, bool en
|
|||
|
||||
static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd)
|
||||
{
|
||||
int err = 0;
|
||||
int err;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < chs->num; i++) {
|
||||
|
@ -3392,6 +3392,8 @@ static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd)
|
|||
if (err)
|
||||
return err;
|
||||
}
|
||||
if (chs->ptp && test_bit(MLX5E_PTP_STATE_RX, chs->ptp->state))
|
||||
return mlx5e_modify_rq_vsd(&chs->ptp->rq, vsd);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -3829,6 +3831,24 @@ int mlx5e_set_features(struct net_device *netdev, netdev_features_t features)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static netdev_features_t mlx5e_fix_uplink_rep_features(struct net_device *netdev,
|
||||
netdev_features_t features)
|
||||
{
|
||||
features &= ~NETIF_F_HW_TLS_RX;
|
||||
if (netdev->features & NETIF_F_HW_TLS_RX)
|
||||
netdev_warn(netdev, "Disabling hw_tls_rx, not supported in switchdev mode\n");
|
||||
|
||||
features &= ~NETIF_F_HW_TLS_TX;
|
||||
if (netdev->features & NETIF_F_HW_TLS_TX)
|
||||
netdev_warn(netdev, "Disabling hw_tls_tx, not supported in switchdev mode\n");
|
||||
|
||||
features &= ~NETIF_F_NTUPLE;
|
||||
if (netdev->features & NETIF_F_NTUPLE)
|
||||
netdev_warn(netdev, "Disabling ntuple, not supported in switchdev mode\n");
|
||||
|
||||
return features;
|
||||
}
|
||||
|
||||
static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
|
||||
netdev_features_t features)
|
||||
{
|
||||
|
@ -3860,15 +3880,8 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
|
|||
netdev_warn(netdev, "Disabling rxhash, not supported when CQE compress is active\n");
|
||||
}
|
||||
|
||||
if (mlx5e_is_uplink_rep(priv)) {
|
||||
features &= ~NETIF_F_HW_TLS_RX;
|
||||
if (netdev->features & NETIF_F_HW_TLS_RX)
|
||||
netdev_warn(netdev, "Disabling hw_tls_rx, not supported in switchdev mode\n");
|
||||
|
||||
features &= ~NETIF_F_HW_TLS_TX;
|
||||
if (netdev->features & NETIF_F_HW_TLS_TX)
|
||||
netdev_warn(netdev, "Disabling hw_tls_tx, not supported in switchdev mode\n");
|
||||
}
|
||||
if (mlx5e_is_uplink_rep(priv))
|
||||
features = mlx5e_fix_uplink_rep_features(netdev, features);
|
||||
|
||||
mutex_unlock(&priv->state_lock);
|
||||
|
||||
|
@ -4859,6 +4872,9 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
|
|||
if (MLX5_CAP_ETH(mdev, scatter_fcs))
|
||||
netdev->hw_features |= NETIF_F_RXFCS;
|
||||
|
||||
if (mlx5_qos_is_supported(mdev))
|
||||
netdev->hw_features |= NETIF_F_HW_TC;
|
||||
|
||||
netdev->features = netdev->hw_features;
|
||||
|
||||
/* Defaults */
|
||||
|
@ -4879,8 +4895,6 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
|
|||
netdev->hw_features |= NETIF_F_NTUPLE;
|
||||
#endif
|
||||
}
|
||||
if (mlx5_qos_is_supported(mdev))
|
||||
netdev->features |= NETIF_F_HW_TC;
|
||||
|
||||
netdev->features |= NETIF_F_HIGHDMA;
|
||||
netdev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
|
||||
|
|
|
@ -452,12 +452,32 @@ static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv,
|
|||
static
|
||||
struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex)
|
||||
{
|
||||
struct mlx5_core_dev *mdev;
|
||||
struct net_device *netdev;
|
||||
struct mlx5e_priv *priv;
|
||||
|
||||
netdev = __dev_get_by_index(net, ifindex);
|
||||
netdev = dev_get_by_index(net, ifindex);
|
||||
if (!netdev)
|
||||
return ERR_PTR(-ENODEV);
|
||||
|
||||
priv = netdev_priv(netdev);
|
||||
return priv->mdev;
|
||||
mdev = priv->mdev;
|
||||
dev_put(netdev);
|
||||
|
||||
/* Mirred tc action holds a refcount on the ifindex net_device (see
|
||||
* net/sched/act_mirred.c:tcf_mirred_get_dev). So, it's okay to continue using mdev
|
||||
* after dev_put(netdev), while we're in the context of adding a tc flow.
|
||||
*
|
||||
* The mdev pointer corresponds to the peer/out net_device of a hairpin. It is then
|
||||
* stored in a hairpin object, which exists until all flows, that refer to it, get
|
||||
* removed.
|
||||
*
|
||||
* On the other hand, after a hairpin object has been created, the peer net_device may
|
||||
* be removed/unbound while there are still some hairpin flows that are using it. This
|
||||
* case is handled by mlx5e_tc_hairpin_update_dead_peer, which is hooked to
|
||||
* NETDEV_UNREGISTER event of the peer net_device.
|
||||
*/
|
||||
return mdev;
|
||||
}
|
||||
|
||||
static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp)
|
||||
|
@ -666,6 +686,10 @@ mlx5e_hairpin_create(struct mlx5e_priv *priv, struct mlx5_hairpin_params *params
|
|||
|
||||
func_mdev = priv->mdev;
|
||||
peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
|
||||
if (IS_ERR(peer_mdev)) {
|
||||
err = PTR_ERR(peer_mdev);
|
||||
goto create_pair_err;
|
||||
}
|
||||
|
||||
pair = mlx5_core_hairpin_create(func_mdev, peer_mdev, params);
|
||||
if (IS_ERR(pair)) {
|
||||
|
@ -804,6 +828,11 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
|
|||
int err;
|
||||
|
||||
peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
|
||||
if (IS_ERR(peer_mdev)) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "invalid ifindex of mirred device");
|
||||
return PTR_ERR(peer_mdev);
|
||||
}
|
||||
|
||||
if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "hairpin is not supported");
|
||||
return -EOPNOTSUPP;
|
||||
|
|
|
@ -636,7 +636,7 @@ struct esw_vport_tbl_namespace {
|
|||
};
|
||||
|
||||
struct mlx5_vport_tbl_attr {
|
||||
u16 chain;
|
||||
u32 chain;
|
||||
u16 prio;
|
||||
u16 vport;
|
||||
const struct esw_vport_tbl_namespace *vport_ns;
|
||||
|
|
|
@ -382,10 +382,11 @@ esw_setup_vport_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *f
|
|||
{
|
||||
dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
|
||||
dest[dest_idx].vport.num = esw_attr->dests[attr_idx].rep->vport;
|
||||
dest[dest_idx].vport.vhca_id =
|
||||
MLX5_CAP_GEN(esw_attr->dests[attr_idx].mdev, vhca_id);
|
||||
if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
|
||||
if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
|
||||
dest[dest_idx].vport.vhca_id =
|
||||
MLX5_CAP_GEN(esw_attr->dests[attr_idx].mdev, vhca_id);
|
||||
dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
|
||||
}
|
||||
if (esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP) {
|
||||
if (pkt_reformat) {
|
||||
flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
|
||||
|
@ -2367,6 +2368,9 @@ static int mlx5_esw_offloads_devcom_event(int event,
|
|||
|
||||
switch (event) {
|
||||
case ESW_OFFLOADS_DEVCOM_PAIR:
|
||||
if (mlx5_get_next_phys_dev(esw->dev) != peer_esw->dev)
|
||||
break;
|
||||
|
||||
if (mlx5_eswitch_vport_match_metadata_enabled(esw) !=
|
||||
mlx5_eswitch_vport_match_metadata_enabled(peer_esw))
|
||||
break;
|
||||
|
|
|
@ -1024,17 +1024,19 @@ static int connect_fwd_rules(struct mlx5_core_dev *dev,
|
|||
static int connect_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft,
|
||||
struct fs_prio *prio)
|
||||
{
|
||||
struct mlx5_flow_table *next_ft;
|
||||
struct mlx5_flow_table *next_ft, *first_ft;
|
||||
int err = 0;
|
||||
|
||||
/* Connect_prev_fts and update_root_ft_create are mutually exclusive */
|
||||
|
||||
if (list_empty(&prio->node.children)) {
|
||||
first_ft = list_first_entry_or_null(&prio->node.children,
|
||||
struct mlx5_flow_table, node.list);
|
||||
if (!first_ft || first_ft->level > ft->level) {
|
||||
err = connect_prev_fts(dev, ft, prio);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
next_ft = find_next_chained_ft(prio);
|
||||
next_ft = first_ft ? first_ft : find_next_chained_ft(prio);
|
||||
err = connect_fwd_rules(dev, ft, next_ft);
|
||||
if (err)
|
||||
return err;
|
||||
|
@ -2120,7 +2122,7 @@ static int disconnect_flow_table(struct mlx5_flow_table *ft)
|
|||
node.list) == ft))
|
||||
return 0;
|
||||
|
||||
next_ft = find_next_chained_ft(prio);
|
||||
next_ft = find_next_ft(ft);
|
||||
err = connect_fwd_rules(dev, next_ft, ft);
|
||||
if (err)
|
||||
return err;
|
||||
|
|
|
@ -626,8 +626,16 @@ static void mlx5_fw_fatal_reporter_err_work(struct work_struct *work)
|
|||
}
|
||||
fw_reporter_ctx.err_synd = health->synd;
|
||||
fw_reporter_ctx.miss_counter = health->miss_counter;
|
||||
devlink_health_report(health->fw_fatal_reporter,
|
||||
"FW fatal error reported", &fw_reporter_ctx);
|
||||
if (devlink_health_report(health->fw_fatal_reporter,
|
||||
"FW fatal error reported", &fw_reporter_ctx) == -ECANCELED) {
|
||||
/* If recovery wasn't performed, due to grace period,
|
||||
* unload the driver. This ensures that the driver
|
||||
* closes all its resources and it is not subjected to
|
||||
* requests from the kernel.
|
||||
*/
|
||||
mlx5_core_err(dev, "Driver is in error state. Unloading\n");
|
||||
mlx5_unload_one(dev);
|
||||
}
|
||||
}
|
||||
|
||||
static const struct devlink_health_reporter_ops mlx5_fw_fatal_reporter_ops = {
|
||||
|
|
Loading…
Reference in New Issue