mlx5-fixes-2020-06-11
-----BEGIN PGP SIGNATURE----- iQEzBAABCAAdFiEEGhZs6bAKwk/OTgTpSD+KveBX+j4FAl7islIACgkQSD+KveBX +j4Sogf6A/aXl6ABXZZyrzmOmNIDDFAONy0anOM+aUkZd39STEK++GnhVSnaijCF iSLg6wv2hP9h9WkovuuJ49Fwz7C4XYpSXf/yjNKNzRmuZby0KFeKw2+hMAY4Abm0 VtKtHLE4hwp7CHsJRc+t/x3ZFuCWIe3tUTjyClfke9o0z2WjJMls03h69jBe+N6Q PJshvOFSCYWSJp3mdFYqG1ZJbNFl7kt+mh8XuF3XKkh4FZjh7LP067jKtotRPhB1 NonYOz09s3TIQZUxcfUG5o44i/Ziyw5pkdNHsQ2WAyFguXOilcAP9OfLVpCdRaln GphFCcUd5LzgGx6pXGUewmFgmrSALw== =k0WF -----END PGP SIGNATURE----- Merge tag 'mlx5-fixes-2020-06-11' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux Saeed Mahameed says: ==================== mlx5 fixes 2020-06-11 This series introduces some fixes to mlx5 driver. For more information please see tag log below. Please pull and let me know if there is any problem. For -stable v5.2 ('net/mlx5: drain health workqueue in case of driver load error') For -stable v5.3 ('net/mlx5e: Fix repeated XSK usage on one channel') ('net/mlx5: Fix fatal error handling during device load') For -stable v5.5 ('net/mlx5: Disable reload while removing the device') For -stable v5.7 ('net/mlx5e: CT: Fix ipv6 nat header rewrite actions') ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
07007dbee4
|
@ -283,7 +283,6 @@ int mlx5_devlink_register(struct devlink *devlink, struct device *dev)
|
|||
goto params_reg_err;
|
||||
mlx5_devlink_set_params_init_values(devlink);
|
||||
devlink_params_publish(devlink);
|
||||
devlink_reload_enable(devlink);
|
||||
return 0;
|
||||
|
||||
params_reg_err:
|
||||
|
@ -293,7 +292,6 @@ params_reg_err:
|
|||
|
||||
void mlx5_devlink_unregister(struct devlink *devlink)
|
||||
{
|
||||
devlink_reload_disable(devlink);
|
||||
devlink_params_unregister(devlink, mlx5_devlink_params,
|
||||
ARRAY_SIZE(mlx5_devlink_params));
|
||||
devlink_unregister(devlink);
|
||||
|
|
|
@ -328,21 +328,21 @@ mlx5_tc_ct_parse_mangle_to_mod_act(struct flow_action_entry *act,
|
|||
|
||||
case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
|
||||
MLX5_SET(set_action_in, modact, length, 0);
|
||||
if (offset == offsetof(struct ipv6hdr, saddr))
|
||||
if (offset == offsetof(struct ipv6hdr, saddr) + 12)
|
||||
field = MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0;
|
||||
else if (offset == offsetof(struct ipv6hdr, saddr) + 4)
|
||||
field = MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32;
|
||||
else if (offset == offsetof(struct ipv6hdr, saddr) + 8)
|
||||
field = MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32;
|
||||
else if (offset == offsetof(struct ipv6hdr, saddr) + 4)
|
||||
field = MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64;
|
||||
else if (offset == offsetof(struct ipv6hdr, saddr) + 12)
|
||||
else if (offset == offsetof(struct ipv6hdr, saddr))
|
||||
field = MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96;
|
||||
else if (offset == offsetof(struct ipv6hdr, daddr))
|
||||
field = MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0;
|
||||
else if (offset == offsetof(struct ipv6hdr, daddr) + 4)
|
||||
field = MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32;
|
||||
else if (offset == offsetof(struct ipv6hdr, daddr) + 8)
|
||||
field = MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64;
|
||||
else if (offset == offsetof(struct ipv6hdr, daddr) + 12)
|
||||
field = MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0;
|
||||
else if (offset == offsetof(struct ipv6hdr, daddr) + 8)
|
||||
field = MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32;
|
||||
else if (offset == offsetof(struct ipv6hdr, daddr) + 4)
|
||||
field = MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64;
|
||||
else if (offset == offsetof(struct ipv6hdr, daddr))
|
||||
field = MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96;
|
||||
else
|
||||
return -EOPNOTSUPP;
|
||||
|
|
|
@ -152,6 +152,10 @@ void mlx5e_close_xsk(struct mlx5e_channel *c)
|
|||
mlx5e_close_cq(&c->xskicosq.cq);
|
||||
mlx5e_close_xdpsq(&c->xsksq);
|
||||
mlx5e_close_cq(&c->xsksq.cq);
|
||||
|
||||
memset(&c->xskrq, 0, sizeof(c->xskrq));
|
||||
memset(&c->xsksq, 0, sizeof(c->xsksq));
|
||||
memset(&c->xskicosq, 0, sizeof(c->xskicosq));
|
||||
}
|
||||
|
||||
void mlx5e_activate_xsk(struct mlx5e_channel *c)
|
||||
|
|
|
@ -1173,7 +1173,8 @@ int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
|
|||
struct mlx5e_priv *priv = netdev_priv(dev);
|
||||
struct mlx5e_rss_params *rss = &priv->rss_params;
|
||||
int inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
|
||||
bool hash_changed = false;
|
||||
bool refresh_tirs = false;
|
||||
bool refresh_rqt = false;
|
||||
void *in;
|
||||
|
||||
if ((hfunc != ETH_RSS_HASH_NO_CHANGE) &&
|
||||
|
@ -1189,36 +1190,38 @@ int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
|
|||
|
||||
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != rss->hfunc) {
|
||||
rss->hfunc = hfunc;
|
||||
hash_changed = true;
|
||||
refresh_rqt = true;
|
||||
refresh_tirs = true;
|
||||
}
|
||||
|
||||
if (indir) {
|
||||
memcpy(rss->indirection_rqt, indir,
|
||||
sizeof(rss->indirection_rqt));
|
||||
|
||||
if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
|
||||
u32 rqtn = priv->indir_rqt.rqtn;
|
||||
struct mlx5e_redirect_rqt_param rrp = {
|
||||
.is_rss = true,
|
||||
{
|
||||
.rss = {
|
||||
.hfunc = rss->hfunc,
|
||||
.channels = &priv->channels,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, rrp);
|
||||
}
|
||||
refresh_rqt = true;
|
||||
}
|
||||
|
||||
if (key) {
|
||||
memcpy(rss->toeplitz_hash_key, key,
|
||||
sizeof(rss->toeplitz_hash_key));
|
||||
hash_changed = hash_changed || rss->hfunc == ETH_RSS_HASH_TOP;
|
||||
refresh_tirs = refresh_tirs || rss->hfunc == ETH_RSS_HASH_TOP;
|
||||
}
|
||||
|
||||
if (hash_changed)
|
||||
if (refresh_rqt && test_bit(MLX5E_STATE_OPENED, &priv->state)) {
|
||||
struct mlx5e_redirect_rqt_param rrp = {
|
||||
.is_rss = true,
|
||||
{
|
||||
.rss = {
|
||||
.hfunc = rss->hfunc,
|
||||
.channels = &priv->channels,
|
||||
},
|
||||
},
|
||||
};
|
||||
u32 rqtn = priv->indir_rqt.rqtn;
|
||||
|
||||
mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, rrp);
|
||||
}
|
||||
|
||||
if (refresh_tirs)
|
||||
mlx5e_modify_tirs_hash(priv, in);
|
||||
|
||||
mutex_unlock(&priv->state_lock);
|
||||
|
|
|
@ -162,10 +162,12 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw,
|
|||
|
||||
if (MLX5_CAP_ESW_INGRESS_ACL(esw->dev, flow_counter)) {
|
||||
counter = mlx5_fc_create(esw->dev, false);
|
||||
if (IS_ERR(counter))
|
||||
if (IS_ERR(counter)) {
|
||||
esw_warn(esw->dev,
|
||||
"vport[%d] configure ingress drop rule counter failed\n",
|
||||
vport->vport);
|
||||
counter = NULL;
|
||||
}
|
||||
vport->ingress.legacy.drop_counter = counter;
|
||||
}
|
||||
|
||||
|
@ -272,7 +274,7 @@ void esw_acl_ingress_lgcy_cleanup(struct mlx5_eswitch *esw,
|
|||
esw_acl_ingress_table_destroy(vport);
|
||||
|
||||
clean_drop_counter:
|
||||
if (!IS_ERR_OR_NULL(vport->ingress.legacy.drop_counter)) {
|
||||
if (vport->ingress.legacy.drop_counter) {
|
||||
mlx5_fc_destroy(esw->dev, vport->ingress.legacy.drop_counter);
|
||||
vport->ingress.legacy.drop_counter = NULL;
|
||||
}
|
||||
|
|
|
@ -192,15 +192,23 @@ static bool reset_fw_if_needed(struct mlx5_core_dev *dev)
|
|||
|
||||
void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force)
|
||||
{
|
||||
bool err_detected = false;
|
||||
|
||||
/* Mark the device as fatal in order to abort FW commands */
|
||||
if ((check_fatal_sensors(dev) || force) &&
|
||||
dev->state == MLX5_DEVICE_STATE_UP) {
|
||||
dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
|
||||
err_detected = true;
|
||||
}
|
||||
mutex_lock(&dev->intf_state_mutex);
|
||||
if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
|
||||
goto unlock;
|
||||
if (!err_detected && dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
|
||||
goto unlock;/* a previous error is still being handled */
|
||||
if (dev->state == MLX5_DEVICE_STATE_UNINITIALIZED) {
|
||||
dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
if (check_fatal_sensors(dev) || force) {
|
||||
if (check_fatal_sensors(dev) || force) { /* protected state setting */
|
||||
dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
|
||||
mlx5_cmd_flush(dev);
|
||||
}
|
||||
|
|
|
@ -785,6 +785,11 @@ err_disable:
|
|||
|
||||
static void mlx5_pci_close(struct mlx5_core_dev *dev)
|
||||
{
|
||||
/* health work might still be active, and it needs pci bar in
|
||||
* order to know the NIC state. Therefore, drain the health WQ
|
||||
* before removing the pci bars
|
||||
*/
|
||||
mlx5_drain_health_wq(dev);
|
||||
iounmap(dev->iseg);
|
||||
pci_clear_master(dev->pdev);
|
||||
release_bar(dev->pdev);
|
||||
|
@ -1194,23 +1199,22 @@ int mlx5_load_one(struct mlx5_core_dev *dev, bool boot)
|
|||
if (err)
|
||||
goto err_load;
|
||||
|
||||
set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
|
||||
|
||||
if (boot) {
|
||||
err = mlx5_devlink_register(priv_to_devlink(dev), dev->device);
|
||||
if (err)
|
||||
goto err_devlink_reg;
|
||||
}
|
||||
|
||||
if (mlx5_device_registered(dev))
|
||||
mlx5_attach_device(dev);
|
||||
else
|
||||
mlx5_register_device(dev);
|
||||
|
||||
set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
|
||||
} else {
|
||||
mlx5_attach_device(dev);
|
||||
}
|
||||
|
||||
mutex_unlock(&dev->intf_state_mutex);
|
||||
return 0;
|
||||
|
||||
err_devlink_reg:
|
||||
clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
|
||||
mlx5_unload(dev);
|
||||
err_load:
|
||||
if (boot)
|
||||
|
@ -1226,10 +1230,15 @@ out:
|
|||
|
||||
void mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup)
|
||||
{
|
||||
if (cleanup)
|
||||
mlx5_unregister_device(dev);
|
||||
|
||||
mutex_lock(&dev->intf_state_mutex);
|
||||
|
||||
if (cleanup) {
|
||||
mlx5_unregister_device(dev);
|
||||
mlx5_devlink_unregister(priv_to_devlink(dev));
|
||||
} else {
|
||||
mlx5_detach_device(dev);
|
||||
}
|
||||
|
||||
if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
|
||||
mlx5_core_warn(dev, "%s: interface is down, NOP\n",
|
||||
__func__);
|
||||
|
@ -1240,9 +1249,6 @@ void mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup)
|
|||
|
||||
clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
|
||||
|
||||
if (mlx5_device_registered(dev))
|
||||
mlx5_detach_device(dev);
|
||||
|
||||
mlx5_unload(dev);
|
||||
|
||||
if (cleanup)
|
||||
|
@ -1275,11 +1281,6 @@ static int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
|
|||
|
||||
priv->dbg_root = debugfs_create_dir(dev_name(dev->device),
|
||||
mlx5_debugfs_root);
|
||||
if (!priv->dbg_root) {
|
||||
dev_err(dev->device, "mlx5_core: error, Cannot create debugfs dir, aborting\n");
|
||||
goto err_dbg_root;
|
||||
}
|
||||
|
||||
err = mlx5_health_init(dev);
|
||||
if (err)
|
||||
goto err_health_init;
|
||||
|
@ -1294,7 +1295,6 @@ err_pagealloc_init:
|
|||
mlx5_health_cleanup(dev);
|
||||
err_health_init:
|
||||
debugfs_remove(dev->priv.dbg_root);
|
||||
err_dbg_root:
|
||||
mutex_destroy(&priv->pgdir_mutex);
|
||||
mutex_destroy(&priv->alloc_mutex);
|
||||
mutex_destroy(&priv->bfregs.wc_head.lock);
|
||||
|
@ -1362,6 +1362,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
dev_err(&pdev->dev, "mlx5_crdump_enable failed with error code %d\n", err);
|
||||
|
||||
pci_save_state(pdev);
|
||||
devlink_reload_enable(devlink);
|
||||
return 0;
|
||||
|
||||
err_load_one:
|
||||
|
@ -1379,9 +1380,8 @@ static void remove_one(struct pci_dev *pdev)
|
|||
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
|
||||
struct devlink *devlink = priv_to_devlink(dev);
|
||||
|
||||
devlink_reload_disable(devlink);
|
||||
mlx5_crdump_disable(dev);
|
||||
mlx5_devlink_unregister(devlink);
|
||||
|
||||
mlx5_drain_health_wq(dev);
|
||||
mlx5_unload_one(dev, true);
|
||||
mlx5_pci_close(dev);
|
||||
|
|
|
@ -179,7 +179,7 @@ static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev,
|
|||
MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP);
|
||||
err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
|
||||
dr_qp->qpn = MLX5_GET(create_qp_out, out, qpn);
|
||||
kfree(in);
|
||||
kvfree(in);
|
||||
if (err)
|
||||
goto err_in;
|
||||
dr_qp->uar = attr->uar;
|
||||
|
|
Loading…
Reference in New Issue