mlx5-fixes-2022-06-08
-----BEGIN PGP SIGNATURE----- iQEzBAABCAAdFiEEGhZs6bAKwk/OTgTpSD+KveBX+j4FAmKg7PAACgkQSD+KveBX +j7snAgAqdyRrGVVfTDd7lMqjNJu12KA14LUVvBchtUod5KBGsuwbP2KAC0dRHRo F6zLwIVfjf3ZICJDdZYYMDUyp3kuaO1iS1tQq7a1N1zo/cepdzDlbnfRikCWQSq8 yM3vvBPiy3UEF4duMZW2eMmkLW89dKsd7MwK5pQ1LitbnGgdR7x6nh5WR6FNFjrD bvMtH9qiePIIWn//wfz4FKJdCzGJN4URyS/YRH5SnbR0pzpucOUOEhlj1XTXyWG5 sDwugKqYm2JcmMEVvHw+8r8ZWEZght3B1qRzbO4OtHYng3CZ0pCZnQ7la+CWZG/y XNEzkI+y+8kFlkNPeveok/pE/aBMEQ== =ICLs -----END PGP SIGNATURE----- Merge tag 'mlx5-fixes-2022-06-08' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux Saeed Mahameed says: ==================== mlx5 fixes 2022-06-08 This series provides bug fixes to mlx5 driver. * tag 'mlx5-fixes-2022-06-08' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux: net/mlx5: fs, fail conflicting actions net/mlx5: Rearm the FW tracer after each tracer event net/mlx5: E-Switch, pair only capable devices net/mlx5e: CT: Fix cleanup of CT before cleanup of TC ct rules Revert "net/mlx5e: Allow relaxed ordering over VFs" MAINTAINERS: adjust MELLANOX ETHERNET INNOVA DRIVERS to TLS support removal ==================== Link: https://lore.kernel.org/r/20220608185855.19818-1-saeed@kernel.org Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
bf56a0917f
|
@ -12703,7 +12703,6 @@ L: netdev@vger.kernel.org
|
|||
S: Supported
|
||||
W: http://www.mellanox.com
|
||||
Q: https://patchwork.kernel.org/project/netdevbpf/list/
|
||||
F: drivers/net/ethernet/mellanox/mlx5/core/accel/*
|
||||
F: drivers/net/ethernet/mellanox/mlx5/core/en_accel/*
|
||||
F: drivers/net/ethernet/mellanox/mlx5/core/fpga/*
|
||||
F: include/linux/mlx5/mlx5_ifc_fpga.h
|
||||
|
|
|
@ -579,17 +579,6 @@ static void *pci_get_other_drvdata(struct device *this, struct device *other)
|
|||
return pci_get_drvdata(to_pci_dev(other));
|
||||
}
|
||||
|
||||
static int next_phys_dev(struct device *dev, const void *data)
|
||||
{
|
||||
struct mlx5_core_dev *mdev, *this = (struct mlx5_core_dev *)data;
|
||||
|
||||
mdev = pci_get_other_drvdata(this->device, dev);
|
||||
if (!mdev)
|
||||
return 0;
|
||||
|
||||
return _next_phys_dev(mdev, data);
|
||||
}
|
||||
|
||||
static int next_phys_dev_lag(struct device *dev, const void *data)
|
||||
{
|
||||
struct mlx5_core_dev *mdev, *this = (struct mlx5_core_dev *)data;
|
||||
|
@ -623,13 +612,6 @@ static struct mlx5_core_dev *mlx5_get_next_dev(struct mlx5_core_dev *dev,
|
|||
return pci_get_drvdata(to_pci_dev(next));
|
||||
}
|
||||
|
||||
/* Must be called with intf_mutex held */
|
||||
struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
|
||||
{
|
||||
lockdep_assert_held(&mlx5_intf_mutex);
|
||||
return mlx5_get_next_dev(dev, &next_phys_dev);
|
||||
}
|
||||
|
||||
/* Must be called with intf_mutex held */
|
||||
struct mlx5_core_dev *mlx5_get_next_phys_dev_lag(struct mlx5_core_dev *dev)
|
||||
{
|
||||
|
|
|
@ -675,6 +675,9 @@ static void mlx5_fw_tracer_handle_traces(struct work_struct *work)
|
|||
if (!tracer->owner)
|
||||
return;
|
||||
|
||||
if (unlikely(!tracer->str_db.loaded))
|
||||
goto arm;
|
||||
|
||||
block_count = tracer->buff.size / TRACER_BLOCK_SIZE_BYTE;
|
||||
start_offset = tracer->buff.consumer_index * TRACER_BLOCK_SIZE_BYTE;
|
||||
|
||||
|
@ -732,6 +735,7 @@ static void mlx5_fw_tracer_handle_traces(struct work_struct *work)
|
|||
&tmp_trace_block[TRACES_PER_BLOCK - 1]);
|
||||
}
|
||||
|
||||
arm:
|
||||
mlx5_fw_tracer_arm(dev);
|
||||
}
|
||||
|
||||
|
@ -1136,8 +1140,7 @@ static int fw_tracer_event(struct notifier_block *nb, unsigned long action, void
|
|||
queue_work(tracer->work_queue, &tracer->ownership_change_work);
|
||||
break;
|
||||
case MLX5_TRACER_SUBTYPE_TRACES_AVAILABLE:
|
||||
if (likely(tracer->str_db.loaded))
|
||||
queue_work(tracer->work_queue, &tracer->handle_traces_work);
|
||||
queue_work(tracer->work_queue, &tracer->handle_traces_work);
|
||||
break;
|
||||
default:
|
||||
mlx5_core_dbg(dev, "FWTracer: Event with unrecognized subtype: sub_type %d\n",
|
||||
|
|
|
@ -565,7 +565,8 @@ static void mlx5e_build_rx_cq_param(struct mlx5_core_dev *mdev,
|
|||
static u8 rq_end_pad_mode(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
|
||||
{
|
||||
bool lro_en = params->packet_merge.type == MLX5E_PACKET_MERGE_LRO;
|
||||
bool ro = MLX5_CAP_GEN(mdev, relaxed_ordering_write);
|
||||
bool ro = pcie_relaxed_ordering_enabled(mdev->pdev) &&
|
||||
MLX5_CAP_GEN(mdev, relaxed_ordering_write);
|
||||
|
||||
return ro && lro_en ?
|
||||
MLX5_WQ_END_PAD_MODE_NONE : MLX5_WQ_END_PAD_MODE_ALIGN;
|
||||
|
|
|
@ -38,11 +38,12 @@
|
|||
|
||||
void mlx5e_mkey_set_relaxed_ordering(struct mlx5_core_dev *mdev, void *mkc)
|
||||
{
|
||||
bool ro_pci_enable = pcie_relaxed_ordering_enabled(mdev->pdev);
|
||||
bool ro_write = MLX5_CAP_GEN(mdev, relaxed_ordering_write);
|
||||
bool ro_read = MLX5_CAP_GEN(mdev, relaxed_ordering_read);
|
||||
|
||||
MLX5_SET(mkc, mkc, relaxed_ordering_read, ro_read);
|
||||
MLX5_SET(mkc, mkc, relaxed_ordering_write, ro_write);
|
||||
MLX5_SET(mkc, mkc, relaxed_ordering_read, ro_pci_enable && ro_read);
|
||||
MLX5_SET(mkc, mkc, relaxed_ordering_write, ro_pci_enable && ro_write);
|
||||
}
|
||||
|
||||
static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
|
||||
|
|
|
@ -950,6 +950,13 @@ err_event_reg:
|
|||
return err;
|
||||
}
|
||||
|
||||
static void mlx5e_cleanup_uplink_rep_tx(struct mlx5e_rep_priv *rpriv)
|
||||
{
|
||||
mlx5e_rep_tc_netdevice_event_unregister(rpriv);
|
||||
mlx5e_rep_bond_cleanup(rpriv);
|
||||
mlx5e_rep_tc_cleanup(rpriv);
|
||||
}
|
||||
|
||||
static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
|
||||
{
|
||||
struct mlx5e_rep_priv *rpriv = priv->ppriv;
|
||||
|
@ -961,42 +968,36 @@ static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
|
|||
return err;
|
||||
}
|
||||
|
||||
err = mlx5e_tc_ht_init(&rpriv->tc_ht);
|
||||
if (err)
|
||||
goto err_ht_init;
|
||||
|
||||
if (rpriv->rep->vport == MLX5_VPORT_UPLINK) {
|
||||
err = mlx5e_init_uplink_rep_tx(rpriv);
|
||||
if (err)
|
||||
goto err_init_tx;
|
||||
}
|
||||
|
||||
err = mlx5e_tc_ht_init(&rpriv->tc_ht);
|
||||
if (err)
|
||||
goto err_ht_init;
|
||||
|
||||
return 0;
|
||||
|
||||
err_init_tx:
|
||||
mlx5e_tc_ht_cleanup(&rpriv->tc_ht);
|
||||
err_ht_init:
|
||||
if (rpriv->rep->vport == MLX5_VPORT_UPLINK)
|
||||
mlx5e_cleanup_uplink_rep_tx(rpriv);
|
||||
err_init_tx:
|
||||
mlx5e_destroy_tises(priv);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void mlx5e_cleanup_uplink_rep_tx(struct mlx5e_rep_priv *rpriv)
|
||||
{
|
||||
mlx5e_rep_tc_netdevice_event_unregister(rpriv);
|
||||
mlx5e_rep_bond_cleanup(rpriv);
|
||||
mlx5e_rep_tc_cleanup(rpriv);
|
||||
}
|
||||
|
||||
static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv)
|
||||
{
|
||||
struct mlx5e_rep_priv *rpriv = priv->ppriv;
|
||||
|
||||
mlx5e_destroy_tises(priv);
|
||||
mlx5e_tc_ht_cleanup(&rpriv->tc_ht);
|
||||
|
||||
if (rpriv->rep->vport == MLX5_VPORT_UPLINK)
|
||||
mlx5e_cleanup_uplink_rep_tx(rpriv);
|
||||
|
||||
mlx5e_tc_ht_cleanup(&rpriv->tc_ht);
|
||||
mlx5e_destroy_tises(priv);
|
||||
}
|
||||
|
||||
static void mlx5e_rep_enable(struct mlx5e_priv *priv)
|
||||
|
|
|
@ -2690,9 +2690,6 @@ static int mlx5_esw_offloads_devcom_event(int event,
|
|||
|
||||
switch (event) {
|
||||
case ESW_OFFLOADS_DEVCOM_PAIR:
|
||||
if (mlx5_get_next_phys_dev(esw->dev) != peer_esw->dev)
|
||||
break;
|
||||
|
||||
if (mlx5_eswitch_vport_match_metadata_enabled(esw) !=
|
||||
mlx5_eswitch_vport_match_metadata_enabled(peer_esw))
|
||||
break;
|
||||
|
@ -2744,6 +2741,9 @@ static void esw_offloads_devcom_init(struct mlx5_eswitch *esw)
|
|||
if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
|
||||
return;
|
||||
|
||||
if (!mlx5_is_lag_supported(esw->dev))
|
||||
return;
|
||||
|
||||
mlx5_devcom_register_component(devcom,
|
||||
MLX5_DEVCOM_ESW_OFFLOADS,
|
||||
mlx5_esw_offloads_devcom_event,
|
||||
|
@ -2761,6 +2761,9 @@ static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
|
|||
if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
|
||||
return;
|
||||
|
||||
if (!mlx5_is_lag_supported(esw->dev))
|
||||
return;
|
||||
|
||||
mlx5_devcom_send_event(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
|
||||
ESW_OFFLOADS_DEVCOM_UNPAIR, esw);
|
||||
|
||||
|
|
|
@ -1574,9 +1574,22 @@ static struct mlx5_flow_rule *find_flow_rule(struct fs_fte *fte,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static bool check_conflicting_actions(u32 action1, u32 action2)
|
||||
static bool check_conflicting_actions_vlan(const struct mlx5_fs_vlan *vlan0,
|
||||
const struct mlx5_fs_vlan *vlan1)
|
||||
{
|
||||
u32 xored_actions = action1 ^ action2;
|
||||
return vlan0->ethtype != vlan1->ethtype ||
|
||||
vlan0->vid != vlan1->vid ||
|
||||
vlan0->prio != vlan1->prio;
|
||||
}
|
||||
|
||||
static bool check_conflicting_actions(const struct mlx5_flow_act *act1,
|
||||
const struct mlx5_flow_act *act2)
|
||||
{
|
||||
u32 action1 = act1->action;
|
||||
u32 action2 = act2->action;
|
||||
u32 xored_actions;
|
||||
|
||||
xored_actions = action1 ^ action2;
|
||||
|
||||
/* if one rule only wants to count, it's ok */
|
||||
if (action1 == MLX5_FLOW_CONTEXT_ACTION_COUNT ||
|
||||
|
@ -1593,6 +1606,22 @@ static bool check_conflicting_actions(u32 action1, u32 action2)
|
|||
MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2))
|
||||
return true;
|
||||
|
||||
if (action1 & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT &&
|
||||
act1->pkt_reformat != act2->pkt_reformat)
|
||||
return true;
|
||||
|
||||
if (action1 & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
|
||||
act1->modify_hdr != act2->modify_hdr)
|
||||
return true;
|
||||
|
||||
if (action1 & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH &&
|
||||
check_conflicting_actions_vlan(&act1->vlan[0], &act2->vlan[0]))
|
||||
return true;
|
||||
|
||||
if (action1 & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2 &&
|
||||
check_conflicting_actions_vlan(&act1->vlan[1], &act2->vlan[1]))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -1600,7 +1629,7 @@ static int check_conflicting_ftes(struct fs_fte *fte,
|
|||
const struct mlx5_flow_context *flow_context,
|
||||
const struct mlx5_flow_act *flow_act)
|
||||
{
|
||||
if (check_conflicting_actions(flow_act->action, fte->action.action)) {
|
||||
if (check_conflicting_actions(flow_act, &fte->action)) {
|
||||
mlx5_core_warn(get_dev(&fte->node),
|
||||
"Found two FTEs with conflicting actions\n");
|
||||
return -EEXIST;
|
||||
|
|
|
@ -74,6 +74,16 @@ struct mlx5_lag {
|
|||
struct lag_mpesw lag_mpesw;
|
||||
};
|
||||
|
||||
static inline bool mlx5_is_lag_supported(struct mlx5_core_dev *dev)
|
||||
{
|
||||
if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
|
||||
!MLX5_CAP_GEN(dev, lag_master) ||
|
||||
MLX5_CAP_GEN(dev, num_lag_ports) < 2 ||
|
||||
MLX5_CAP_GEN(dev, num_lag_ports) > MLX5_MAX_PORTS)
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline struct mlx5_lag *
|
||||
mlx5_lag_dev(struct mlx5_core_dev *dev)
|
||||
{
|
||||
|
|
|
@ -209,7 +209,6 @@ int mlx5_attach_device(struct mlx5_core_dev *dev);
|
|||
void mlx5_detach_device(struct mlx5_core_dev *dev);
|
||||
int mlx5_register_device(struct mlx5_core_dev *dev);
|
||||
void mlx5_unregister_device(struct mlx5_core_dev *dev);
|
||||
struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev);
|
||||
struct mlx5_core_dev *mlx5_get_next_phys_dev_lag(struct mlx5_core_dev *dev);
|
||||
void mlx5_dev_list_lock(void);
|
||||
void mlx5_dev_list_unlock(void);
|
||||
|
|
Loading…
Reference in New Issue