mlx5-fixes-2021-01-07
-----BEGIN PGP SIGNATURE----- iQEzBAABCAAdFiEEGhZs6bAKwk/OTgTpSD+KveBX+j4FAl/3bZsACgkQSD+KveBX +j4powgAnzKVXUxN/R5IJ8z9kONZEvI+9SgHQ/qvzsHY66zy6xHlCdrmRWYI8V1c KuZltBQPVCd1fAAWAzzmkEOpClM1IbS6zd0/OoJFHJfZYdFEZStvw4NrazpRNd6E rPUgJpxRFjiLbklZhpfZ6yaoAWez/fXPQbXB+ObOs2m/guesCyZy/T7mBTvvovwA gmrWoxb6vApJZtgk3bIQKaaevx5/0UFCWqJIoAgKyCzVKf8QvQ0OLRkt27Cn3B08 lTjtUjiGe4boxVY8F+bEb79BcnezIb/NlafjoJ1gyCgZ5dvHSrfaopiAxIZhxRSX bP/W8cCVZvUngclDs/L+cx3Y3btpNA== =0qLp -----END PGP SIGNATURE----- Merge tag 'mlx5-fixes-2021-01-07' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux Saeed Mahameed says: ==================== mlx5 fixes 2021-01-07 * tag 'mlx5-fixes-2021-01-07' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux: net/mlx5e: Fix memleak in mlx5e_create_l2_table_groups net/mlx5e: Fix two double free cases net/mlx5: Release devlink object if adev fails net/mlx5e: ethtool, Fix restriction of autoneg with 56G net/mlx5e: In skb build skip setting mark in switchdev mode net/mlx5: E-Switch, fix changing vf VLANID net/mlx5e: Fix SWP offsets when vlan inserted by driver net/mlx5e: CT: Use per flow counter when CT flow accounting is enabled net/mlx5: Use port_num 1 instead of 0 when delete a RoCE address net/mlx5e: Add missing capability check for uplink follow net/mlx5: Check if lag is supported before creating one ==================== Link: https://lore.kernel.org/r/20210107202845.470205-1-saeed@kernel.org Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
220efcf9ca
|
@ -626,6 +626,11 @@ bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe,
|
|||
if (!reg_c0)
|
||||
return true;
|
||||
|
||||
/* If reg_c0 is not equal to the default flow tag then skb->mark
|
||||
* is not supported and must be reset back to 0.
|
||||
*/
|
||||
skb->mark = 0;
|
||||
|
||||
priv = netdev_priv(skb->dev);
|
||||
esw = priv->mdev->priv.eswitch;
|
||||
|
||||
|
|
|
@ -118,16 +118,17 @@ struct mlx5_ct_tuple {
|
|||
u16 zone;
|
||||
};
|
||||
|
||||
struct mlx5_ct_shared_counter {
|
||||
struct mlx5_ct_counter {
|
||||
struct mlx5_fc *counter;
|
||||
refcount_t refcount;
|
||||
bool is_shared;
|
||||
};
|
||||
|
||||
struct mlx5_ct_entry {
|
||||
struct rhash_head node;
|
||||
struct rhash_head tuple_node;
|
||||
struct rhash_head tuple_nat_node;
|
||||
struct mlx5_ct_shared_counter *shared_counter;
|
||||
struct mlx5_ct_counter *counter;
|
||||
unsigned long cookie;
|
||||
unsigned long restore_cookie;
|
||||
struct mlx5_ct_tuple tuple;
|
||||
|
@ -394,13 +395,14 @@ mlx5_tc_ct_set_tuple_match(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec,
|
|||
}
|
||||
|
||||
static void
|
||||
mlx5_tc_ct_shared_counter_put(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_entry *entry)
|
||||
mlx5_tc_ct_counter_put(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_entry *entry)
|
||||
{
|
||||
if (!refcount_dec_and_test(&entry->shared_counter->refcount))
|
||||
if (entry->counter->is_shared &&
|
||||
!refcount_dec_and_test(&entry->counter->refcount))
|
||||
return;
|
||||
|
||||
mlx5_fc_destroy(ct_priv->dev, entry->shared_counter->counter);
|
||||
kfree(entry->shared_counter);
|
||||
mlx5_fc_destroy(ct_priv->dev, entry->counter->counter);
|
||||
kfree(entry->counter);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -699,7 +701,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
|
|||
attr->dest_ft = ct_priv->post_ct;
|
||||
attr->ft = nat ? ct_priv->ct_nat : ct_priv->ct;
|
||||
attr->outer_match_level = MLX5_MATCH_L4;
|
||||
attr->counter = entry->shared_counter->counter;
|
||||
attr->counter = entry->counter->counter;
|
||||
attr->flags |= MLX5_ESW_ATTR_FLAG_NO_IN_PORT;
|
||||
|
||||
mlx5_tc_ct_set_tuple_match(netdev_priv(ct_priv->netdev), spec, flow_rule);
|
||||
|
@ -732,13 +734,34 @@ err_attr:
|
|||
return err;
|
||||
}
|
||||
|
||||
static struct mlx5_ct_shared_counter *
|
||||
static struct mlx5_ct_counter *
|
||||
mlx5_tc_ct_counter_create(struct mlx5_tc_ct_priv *ct_priv)
|
||||
{
|
||||
struct mlx5_ct_counter *counter;
|
||||
int ret;
|
||||
|
||||
counter = kzalloc(sizeof(*counter), GFP_KERNEL);
|
||||
if (!counter)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
counter->is_shared = false;
|
||||
counter->counter = mlx5_fc_create(ct_priv->dev, true);
|
||||
if (IS_ERR(counter->counter)) {
|
||||
ct_dbg("Failed to create counter for ct entry");
|
||||
ret = PTR_ERR(counter->counter);
|
||||
kfree(counter);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
return counter;
|
||||
}
|
||||
|
||||
static struct mlx5_ct_counter *
|
||||
mlx5_tc_ct_shared_counter_get(struct mlx5_tc_ct_priv *ct_priv,
|
||||
struct mlx5_ct_entry *entry)
|
||||
{
|
||||
struct mlx5_ct_tuple rev_tuple = entry->tuple;
|
||||
struct mlx5_ct_shared_counter *shared_counter;
|
||||
struct mlx5_core_dev *dev = ct_priv->dev;
|
||||
struct mlx5_ct_counter *shared_counter;
|
||||
struct mlx5_ct_entry *rev_entry;
|
||||
__be16 tmp_port;
|
||||
int ret;
|
||||
|
@ -767,25 +790,20 @@ mlx5_tc_ct_shared_counter_get(struct mlx5_tc_ct_priv *ct_priv,
|
|||
rev_entry = rhashtable_lookup_fast(&ct_priv->ct_tuples_ht, &rev_tuple,
|
||||
tuples_ht_params);
|
||||
if (rev_entry) {
|
||||
if (refcount_inc_not_zero(&rev_entry->shared_counter->refcount)) {
|
||||
if (refcount_inc_not_zero(&rev_entry->counter->refcount)) {
|
||||
mutex_unlock(&ct_priv->shared_counter_lock);
|
||||
return rev_entry->shared_counter;
|
||||
return rev_entry->counter;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&ct_priv->shared_counter_lock);
|
||||
|
||||
shared_counter = kzalloc(sizeof(*shared_counter), GFP_KERNEL);
|
||||
if (!shared_counter)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
shared_counter->counter = mlx5_fc_create(dev, true);
|
||||
if (IS_ERR(shared_counter->counter)) {
|
||||
ct_dbg("Failed to create counter for ct entry");
|
||||
ret = PTR_ERR(shared_counter->counter);
|
||||
kfree(shared_counter);
|
||||
shared_counter = mlx5_tc_ct_counter_create(ct_priv);
|
||||
if (IS_ERR(shared_counter)) {
|
||||
ret = PTR_ERR(shared_counter);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
shared_counter->is_shared = true;
|
||||
refcount_set(&shared_counter->refcount, 1);
|
||||
return shared_counter;
|
||||
}
|
||||
|
@ -798,10 +816,13 @@ mlx5_tc_ct_entry_add_rules(struct mlx5_tc_ct_priv *ct_priv,
|
|||
{
|
||||
int err;
|
||||
|
||||
entry->shared_counter = mlx5_tc_ct_shared_counter_get(ct_priv, entry);
|
||||
if (IS_ERR(entry->shared_counter)) {
|
||||
err = PTR_ERR(entry->shared_counter);
|
||||
ct_dbg("Failed to create counter for ct entry");
|
||||
if (nf_ct_acct_enabled(dev_net(ct_priv->netdev)))
|
||||
entry->counter = mlx5_tc_ct_counter_create(ct_priv);
|
||||
else
|
||||
entry->counter = mlx5_tc_ct_shared_counter_get(ct_priv, entry);
|
||||
|
||||
if (IS_ERR(entry->counter)) {
|
||||
err = PTR_ERR(entry->counter);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -820,7 +841,7 @@ mlx5_tc_ct_entry_add_rules(struct mlx5_tc_ct_priv *ct_priv,
|
|||
err_nat:
|
||||
mlx5_tc_ct_entry_del_rule(ct_priv, entry, false);
|
||||
err_orig:
|
||||
mlx5_tc_ct_shared_counter_put(ct_priv, entry);
|
||||
mlx5_tc_ct_counter_put(ct_priv, entry);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -918,7 +939,7 @@ mlx5_tc_ct_del_ft_entry(struct mlx5_tc_ct_priv *ct_priv,
|
|||
rhashtable_remove_fast(&ct_priv->ct_tuples_ht, &entry->tuple_node,
|
||||
tuples_ht_params);
|
||||
mutex_unlock(&ct_priv->shared_counter_lock);
|
||||
mlx5_tc_ct_shared_counter_put(ct_priv, entry);
|
||||
mlx5_tc_ct_counter_put(ct_priv, entry);
|
||||
|
||||
}
|
||||
|
||||
|
@ -956,7 +977,7 @@ mlx5_tc_ct_block_flow_offload_stats(struct mlx5_ct_ft *ft,
|
|||
if (!entry)
|
||||
return -ENOENT;
|
||||
|
||||
mlx5_fc_query_cached(entry->shared_counter->counter, &bytes, &packets, &lastuse);
|
||||
mlx5_fc_query_cached(entry->counter->counter, &bytes, &packets, &lastuse);
|
||||
flow_stats_update(&f->stats, bytes, packets, 0, lastuse,
|
||||
FLOW_ACTION_HW_STATS_DELAYED);
|
||||
|
||||
|
|
|
@ -371,6 +371,15 @@ struct mlx5e_swp_spec {
|
|||
u8 tun_l4_proto;
|
||||
};
|
||||
|
||||
static inline void mlx5e_eseg_swp_offsets_add_vlan(struct mlx5_wqe_eth_seg *eseg)
|
||||
{
|
||||
/* SWP offsets are in 2-bytes words */
|
||||
eseg->swp_outer_l3_offset += VLAN_HLEN / 2;
|
||||
eseg->swp_outer_l4_offset += VLAN_HLEN / 2;
|
||||
eseg->swp_inner_l3_offset += VLAN_HLEN / 2;
|
||||
eseg->swp_inner_l4_offset += VLAN_HLEN / 2;
|
||||
}
|
||||
|
||||
static inline void
|
||||
mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg,
|
||||
struct mlx5e_swp_spec *swp_spec)
|
||||
|
|
|
@ -51,7 +51,7 @@ static inline bool mlx5_geneve_tx_allowed(struct mlx5_core_dev *mdev)
|
|||
}
|
||||
|
||||
static inline void
|
||||
mlx5e_tx_tunnel_accel(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg)
|
||||
mlx5e_tx_tunnel_accel(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg, u16 ihs)
|
||||
{
|
||||
struct mlx5e_swp_spec swp_spec = {};
|
||||
unsigned int offset = 0;
|
||||
|
@ -85,6 +85,8 @@ mlx5e_tx_tunnel_accel(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg)
|
|||
}
|
||||
|
||||
mlx5e_set_eseg_swp(skb, eseg, &swp_spec);
|
||||
if (skb_vlan_tag_present(skb) && ihs)
|
||||
mlx5e_eseg_swp_offsets_add_vlan(eseg);
|
||||
}
|
||||
|
||||
#else
|
||||
|
@ -163,7 +165,7 @@ static inline unsigned int mlx5e_accel_tx_ids_len(struct mlx5e_txqsq *sq,
|
|||
|
||||
static inline bool mlx5e_accel_tx_eseg(struct mlx5e_priv *priv,
|
||||
struct sk_buff *skb,
|
||||
struct mlx5_wqe_eth_seg *eseg)
|
||||
struct mlx5_wqe_eth_seg *eseg, u16 ihs)
|
||||
{
|
||||
#ifdef CONFIG_MLX5_EN_IPSEC
|
||||
if (xfrm_offload(skb))
|
||||
|
@ -172,7 +174,7 @@ static inline bool mlx5e_accel_tx_eseg(struct mlx5e_priv *priv,
|
|||
|
||||
#if IS_ENABLED(CONFIG_GENEVE)
|
||||
if (skb->encapsulation)
|
||||
mlx5e_tx_tunnel_accel(skb, eseg);
|
||||
mlx5e_tx_tunnel_accel(skb, eseg, ihs);
|
||||
#endif
|
||||
|
||||
return true;
|
||||
|
|
|
@ -1010,6 +1010,22 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev,
|
|||
return mlx5e_ethtool_get_link_ksettings(priv, link_ksettings);
|
||||
}
|
||||
|
||||
static int mlx5e_speed_validate(struct net_device *netdev, bool ext,
|
||||
const unsigned long link_modes, u8 autoneg)
|
||||
{
|
||||
/* Extended link-mode has no speed limitations. */
|
||||
if (ext)
|
||||
return 0;
|
||||
|
||||
if ((link_modes & MLX5E_PROT_MASK(MLX5E_56GBASE_R4)) &&
|
||||
autoneg != AUTONEG_ENABLE) {
|
||||
netdev_err(netdev, "%s: 56G link speed requires autoneg enabled\n",
|
||||
__func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u32 mlx5e_ethtool2ptys_adver_link(const unsigned long *link_modes)
|
||||
{
|
||||
u32 i, ptys_modes = 0;
|
||||
|
@ -1103,13 +1119,9 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
|
|||
link_modes = autoneg == AUTONEG_ENABLE ? ethtool2ptys_adver_func(adver) :
|
||||
mlx5e_port_speed2linkmodes(mdev, speed, !ext);
|
||||
|
||||
if ((link_modes & MLX5E_PROT_MASK(MLX5E_56GBASE_R4)) &&
|
||||
autoneg != AUTONEG_ENABLE) {
|
||||
netdev_err(priv->netdev, "%s: 56G link speed requires autoneg enabled\n",
|
||||
__func__);
|
||||
err = -EINVAL;
|
||||
err = mlx5e_speed_validate(priv->netdev, ext, link_modes, autoneg);
|
||||
if (err)
|
||||
goto out;
|
||||
}
|
||||
|
||||
link_modes = link_modes & eproto.cap;
|
||||
if (!link_modes) {
|
||||
|
|
|
@ -942,6 +942,7 @@ static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table *ttc,
|
|||
in = kvzalloc(inlen, GFP_KERNEL);
|
||||
if (!in) {
|
||||
kfree(ft->g);
|
||||
ft->g = NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
@ -1087,6 +1088,7 @@ static int mlx5e_create_inner_ttc_table_groups(struct mlx5e_ttc_table *ttc)
|
|||
in = kvzalloc(inlen, GFP_KERNEL);
|
||||
if (!in) {
|
||||
kfree(ft->g);
|
||||
ft->g = NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
@ -1390,6 +1392,7 @@ err_destroy_groups:
|
|||
ft->g[ft->num_groups] = NULL;
|
||||
mlx5e_destroy_groups(ft);
|
||||
kvfree(in);
|
||||
kfree(ft->g);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -3161,7 +3161,8 @@ static void mlx5e_modify_admin_state(struct mlx5_core_dev *mdev,
|
|||
|
||||
mlx5_set_port_admin_status(mdev, state);
|
||||
|
||||
if (mlx5_eswitch_mode(mdev) != MLX5_ESWITCH_LEGACY)
|
||||
if (mlx5_eswitch_mode(mdev) == MLX5_ESWITCH_OFFLOADS ||
|
||||
!MLX5_CAP_GEN(mdev, uplink_follow))
|
||||
return;
|
||||
|
||||
if (state == MLX5_PORT_UP)
|
||||
|
|
|
@ -682,9 +682,9 @@ void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq)
|
|||
|
||||
static bool mlx5e_txwqe_build_eseg(struct mlx5e_priv *priv, struct mlx5e_txqsq *sq,
|
||||
struct sk_buff *skb, struct mlx5e_accel_tx_state *accel,
|
||||
struct mlx5_wqe_eth_seg *eseg)
|
||||
struct mlx5_wqe_eth_seg *eseg, u16 ihs)
|
||||
{
|
||||
if (unlikely(!mlx5e_accel_tx_eseg(priv, skb, eseg)))
|
||||
if (unlikely(!mlx5e_accel_tx_eseg(priv, skb, eseg, ihs)))
|
||||
return false;
|
||||
|
||||
mlx5e_txwqe_build_eseg_csum(sq, skb, accel, eseg);
|
||||
|
@ -714,7 +714,8 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
if (mlx5e_tx_skb_supports_mpwqe(skb, &attr)) {
|
||||
struct mlx5_wqe_eth_seg eseg = {};
|
||||
|
||||
if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &eseg)))
|
||||
if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &eseg,
|
||||
attr.ihs)))
|
||||
return NETDEV_TX_OK;
|
||||
|
||||
mlx5e_sq_xmit_mpwqe(sq, skb, &eseg, netdev_xmit_more());
|
||||
|
@ -731,7 +732,7 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
/* May update the WQE, but may not post other WQEs. */
|
||||
mlx5e_accel_tx_finish(sq, wqe, &accel,
|
||||
(struct mlx5_wqe_inline_seg *)(wqe->data + wqe_attr.ds_cnt_inl));
|
||||
if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &wqe->eth)))
|
||||
if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &wqe->eth, attr.ihs)))
|
||||
return NETDEV_TX_OK;
|
||||
|
||||
mlx5e_sq_xmit_wqe(sq, skb, &attr, &wqe_attr, wqe, pi, netdev_xmit_more());
|
||||
|
|
|
@ -95,22 +95,21 @@ int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw,
|
|||
return 0;
|
||||
}
|
||||
|
||||
if (!IS_ERR_OR_NULL(vport->egress.acl))
|
||||
return 0;
|
||||
if (!vport->egress.acl) {
|
||||
vport->egress.acl = esw_acl_table_create(esw, vport->vport,
|
||||
MLX5_FLOW_NAMESPACE_ESW_EGRESS,
|
||||
table_size);
|
||||
if (IS_ERR(vport->egress.acl)) {
|
||||
err = PTR_ERR(vport->egress.acl);
|
||||
vport->egress.acl = NULL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
vport->egress.acl = esw_acl_table_create(esw, vport->vport,
|
||||
MLX5_FLOW_NAMESPACE_ESW_EGRESS,
|
||||
table_size);
|
||||
if (IS_ERR(vport->egress.acl)) {
|
||||
err = PTR_ERR(vport->egress.acl);
|
||||
vport->egress.acl = NULL;
|
||||
goto out;
|
||||
err = esw_acl_egress_lgcy_groups_create(esw, vport);
|
||||
if (err)
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = esw_acl_egress_lgcy_groups_create(esw, vport);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
esw_debug(esw->dev,
|
||||
"vport[%d] configure egress rules, vlan(%d) qos(%d)\n",
|
||||
vport->vport, vport->info.vlan, vport->info.qos);
|
||||
|
|
|
@ -564,7 +564,9 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
|
|||
struct mlx5_core_dev *tmp_dev;
|
||||
int i, err;
|
||||
|
||||
if (!MLX5_CAP_GEN(dev, vport_group_manager))
|
||||
if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
|
||||
!MLX5_CAP_GEN(dev, lag_master) ||
|
||||
MLX5_CAP_GEN(dev, num_lag_ports) != MLX5_MAX_PORTS)
|
||||
return;
|
||||
|
||||
tmp_dev = mlx5_get_next_phys_dev(dev);
|
||||
|
@ -582,12 +584,9 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
|
|||
if (mlx5_lag_dev_add_pf(ldev, dev, netdev) < 0)
|
||||
return;
|
||||
|
||||
for (i = 0; i < MLX5_MAX_PORTS; i++) {
|
||||
tmp_dev = ldev->pf[i].dev;
|
||||
if (!tmp_dev || !MLX5_CAP_GEN(tmp_dev, lag_master) ||
|
||||
MLX5_CAP_GEN(tmp_dev, num_lag_ports) != MLX5_MAX_PORTS)
|
||||
for (i = 0; i < MLX5_MAX_PORTS; i++)
|
||||
if (!ldev->pf[i].dev)
|
||||
break;
|
||||
}
|
||||
|
||||
if (i >= MLX5_MAX_PORTS)
|
||||
ldev->flags |= MLX5_LAG_FLAG_READY;
|
||||
|
|
|
@ -1368,8 +1368,10 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
MLX5_COREDEV_VF : MLX5_COREDEV_PF;
|
||||
|
||||
dev->priv.adev_idx = mlx5_adev_idx_alloc();
|
||||
if (dev->priv.adev_idx < 0)
|
||||
return dev->priv.adev_idx;
|
||||
if (dev->priv.adev_idx < 0) {
|
||||
err = dev->priv.adev_idx;
|
||||
goto adev_init_err;
|
||||
}
|
||||
|
||||
err = mlx5_mdev_init(dev, prof_sel);
|
||||
if (err)
|
||||
|
@ -1403,6 +1405,7 @@ pci_init_err:
|
|||
mlx5_mdev_uninit(dev);
|
||||
mdev_init_err:
|
||||
mlx5_adev_idx_free(dev->priv.adev_idx);
|
||||
adev_init_err:
|
||||
mlx5_devlink_free(devlink);
|
||||
|
||||
return err;
|
||||
|
|
|
@ -116,7 +116,7 @@ free:
|
|||
static void mlx5_rdma_del_roce_addr(struct mlx5_core_dev *dev)
|
||||
{
|
||||
mlx5_core_roce_gid_set(dev, 0, 0, 0,
|
||||
NULL, NULL, false, 0, 0);
|
||||
NULL, NULL, false, 0, 1);
|
||||
}
|
||||
|
||||
static void mlx5_rdma_make_default_gid(struct mlx5_core_dev *dev, union ib_gid *gid)
|
||||
|
|
|
@ -1280,7 +1280,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
|
|||
u8 ece_support[0x1];
|
||||
u8 reserved_at_a4[0x7];
|
||||
u8 log_max_srq[0x5];
|
||||
u8 reserved_at_b0[0x2];
|
||||
u8 reserved_at_b0[0x1];
|
||||
u8 uplink_follow[0x1];
|
||||
u8 ts_cqe_to_dest_cqn[0x1];
|
||||
u8 reserved_at_b3[0xd];
|
||||
|
||||
|
|
Loading…
Reference in New Issue