mlx5-fixes-2018-11-19
-----BEGIN PGP SIGNATURE----- iQEcBAABAgAGBQJb80jFAAoJEEg/ir3gV/o+eUEH/0/bUhiaxaN88VFZXA24WlU2 v5jaeMg1P3ZuWYNS/AxGWedYWYr7oOgin79/4LIBKJMFdTdLGS2Oa0cTi0ycSNpx 0WMhVchClX3/UNTn6u/G23VsHeYav/AP+uaqnI+Tbtv4o8tc9ecpFgCXo4pMJaW/ osUOJC3MmU68yRAo91uwhhSHWMsl+8tceMdoS9N1Q6faM2v9XcJrMvKHMNpDvPpf VXvC18fTX/qi/loJ6oKepTXn6lNVKDIgKmCjjudgwfRHPiw9R5uMlunaOoHO1wnH T8ttdg/Dn94BLOOmHlOoW9TT7LVFLk7w2y5NoefUY0wUKSjByMxzyzqmdI+qXZU= =SSaw -----END PGP SIGNATURE----- Merge tag 'mlx5-fixes-2018-11-19' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux Saeed Mahameed says: ==================== Mellanox, mlx5 fixes 2018-11-19 The following fixes are for mlx5 core and netdev driver. For -stable v4.16 bc7fda7d4637 ('net/mlx5e: IPoIB, Reset QP after channels are closed') For -stable v4.17 36917a270395 ('net/mlx5: IPSec, Fix the SA context hash key') For -stable v4.18 6492a432be3a ('net/mlx5e: Always use the match level enum when parsing TC rule match') c3f81be236b1 ('net/mlx5e: Removed unnecessary warnings in FEC caps query') c5ce2e736b64 ('net/mlx5e: Fix selftest for small MTUs') For -stable v4.19 effcd896b25e ('net/mlx5e: Adjust to max number of channles when re-attaching') 394cbc5acd68 ('net/mlx5e: RX, verify received packet size in Linear Striding RQ') 447cbb3613c8 ('net/mlx5e: Don't match on vlan non-existence if ethertype is wildcarded') c223c1574612 ('net/mlx5e: Claim TC hw offloads support only under a proper build config') Please pull and let me know if there's any problem. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
1359f25106
|
@ -569,6 +569,7 @@ struct mlx5e_rq {
|
|||
|
||||
unsigned long state;
|
||||
int ix;
|
||||
unsigned int hw_mtu;
|
||||
|
||||
struct net_dim dim; /* Dynamic Interrupt Moderation */
|
||||
|
||||
|
|
|
@ -88,10 +88,8 @@ int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
|
|||
|
||||
eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper);
|
||||
*speed = mlx5e_port_ptys2speed(eth_proto_oper);
|
||||
if (!(*speed)) {
|
||||
mlx5_core_warn(mdev, "cannot get port speed\n");
|
||||
if (!(*speed))
|
||||
err = -EINVAL;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -258,7 +256,7 @@ static int mlx5e_fec_admin_field(u32 *pplm,
|
|||
case 40000:
|
||||
if (!write)
|
||||
*fec_policy = MLX5_GET(pplm_reg, pplm,
|
||||
fec_override_cap_10g_40g);
|
||||
fec_override_admin_10g_40g);
|
||||
else
|
||||
MLX5_SET(pplm_reg, pplm,
|
||||
fec_override_admin_10g_40g, *fec_policy);
|
||||
|
@ -310,7 +308,7 @@ static int mlx5e_get_fec_cap_field(u32 *pplm,
|
|||
case 10000:
|
||||
case 40000:
|
||||
*fec_cap = MLX5_GET(pplm_reg, pplm,
|
||||
fec_override_admin_10g_40g);
|
||||
fec_override_cap_10g_40g);
|
||||
break;
|
||||
case 25000:
|
||||
*fec_cap = MLX5_GET(pplm_reg, pplm,
|
||||
|
@ -394,12 +392,12 @@ int mlx5e_get_fec_mode(struct mlx5_core_dev *dev, u32 *fec_mode_active,
|
|||
|
||||
int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u8 fec_policy)
|
||||
{
|
||||
u8 fec_policy_nofec = BIT(MLX5E_FEC_NOFEC);
|
||||
bool fec_mode_not_supp_in_speed = false;
|
||||
u8 no_fec_policy = BIT(MLX5E_FEC_NOFEC);
|
||||
u32 out[MLX5_ST_SZ_DW(pplm_reg)] = {};
|
||||
u32 in[MLX5_ST_SZ_DW(pplm_reg)] = {};
|
||||
int sz = MLX5_ST_SZ_BYTES(pplm_reg);
|
||||
u32 current_fec_speed;
|
||||
u8 fec_policy_auto = 0;
|
||||
u8 fec_caps = 0;
|
||||
int err;
|
||||
int i;
|
||||
|
@ -415,23 +413,19 @@ int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u8 fec_policy)
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
err = mlx5e_port_linkspeed(dev, ¤t_fec_speed);
|
||||
if (err)
|
||||
return err;
|
||||
MLX5_SET(pplm_reg, out, local_port, 1);
|
||||
|
||||
memset(in, 0, sz);
|
||||
MLX5_SET(pplm_reg, in, local_port, 1);
|
||||
for (i = 0; i < MLX5E_FEC_SUPPORTED_SPEEDS && !!fec_policy; i++) {
|
||||
for (i = 0; i < MLX5E_FEC_SUPPORTED_SPEEDS; i++) {
|
||||
mlx5e_get_fec_cap_field(out, &fec_caps, fec_supported_speeds[i]);
|
||||
/* policy supported for link speed */
|
||||
if (!!(fec_caps & fec_policy)) {
|
||||
mlx5e_fec_admin_field(in, &fec_policy, 1,
|
||||
/* policy supported for link speed, or policy is auto */
|
||||
if (fec_caps & fec_policy || fec_policy == fec_policy_auto) {
|
||||
mlx5e_fec_admin_field(out, &fec_policy, 1,
|
||||
fec_supported_speeds[i]);
|
||||
} else {
|
||||
if (fec_supported_speeds[i] == current_fec_speed)
|
||||
return -EOPNOTSUPP;
|
||||
mlx5e_fec_admin_field(in, &no_fec_policy, 1,
|
||||
fec_supported_speeds[i]);
|
||||
/* turn off FEC if supported. Else, leave it the same */
|
||||
if (fec_caps & fec_policy_nofec)
|
||||
mlx5e_fec_admin_field(out, &fec_policy_nofec, 1,
|
||||
fec_supported_speeds[i]);
|
||||
fec_mode_not_supp_in_speed = true;
|
||||
}
|
||||
}
|
||||
|
@ -441,5 +435,5 @@ int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u8 fec_policy)
|
|||
"FEC policy 0x%x is not supported for some speeds",
|
||||
fec_policy);
|
||||
|
||||
return mlx5_core_access_reg(dev, in, sz, out, sz, MLX5_REG_PPLM, 0, 1);
|
||||
return mlx5_core_access_reg(dev, out, sz, out, sz, MLX5_REG_PPLM, 0, 1);
|
||||
}
|
||||
|
|
|
@ -130,8 +130,10 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
|
|||
int err;
|
||||
|
||||
err = mlx5e_port_linkspeed(priv->mdev, &speed);
|
||||
if (err)
|
||||
if (err) {
|
||||
mlx5_core_warn(priv->mdev, "cannot get port speed\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
xoff = (301 + 216 * priv->dcbx.cable_len / 100) * speed / 1000 + 272 * mtu / 100;
|
||||
|
||||
|
|
|
@ -843,8 +843,7 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev,
|
|||
ethtool_link_ksettings_add_link_mode(link_ksettings, supported,
|
||||
Autoneg);
|
||||
|
||||
err = get_fec_supported_advertised(mdev, link_ksettings);
|
||||
if (err)
|
||||
if (get_fec_supported_advertised(mdev, link_ksettings))
|
||||
netdev_dbg(netdev, "%s: FEC caps query failed: %d\n",
|
||||
__func__, err);
|
||||
|
||||
|
|
|
@ -502,6 +502,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
|
|||
rq->channel = c;
|
||||
rq->ix = c->ix;
|
||||
rq->mdev = mdev;
|
||||
rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
|
||||
rq->stats = &c->priv->channel_stats[c->ix].rq;
|
||||
|
||||
rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL;
|
||||
|
@ -1623,13 +1624,15 @@ static int mlx5e_alloc_cq_common(struct mlx5_core_dev *mdev,
|
|||
int err;
|
||||
u32 i;
|
||||
|
||||
err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = mlx5_cqwq_create(mdev, ¶m->wq, param->cqc, &cq->wq,
|
||||
&cq->wq_ctrl);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
|
||||
|
||||
mcq->cqe_sz = 64;
|
||||
mcq->set_ci_db = cq->wq_ctrl.db.db;
|
||||
mcq->arm_db = cq->wq_ctrl.db.db + 1;
|
||||
|
@ -1687,6 +1690,10 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
|
|||
int eqn;
|
||||
int err;
|
||||
|
||||
err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
|
||||
sizeof(u64) * cq->wq_ctrl.buf.npages;
|
||||
in = kvzalloc(inlen, GFP_KERNEL);
|
||||
|
@ -1700,8 +1707,6 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
|
|||
mlx5_fill_page_frag_array(&cq->wq_ctrl.buf,
|
||||
(__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
|
||||
|
||||
mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
|
||||
|
||||
MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode);
|
||||
MLX5_SET(cqc, cqc, c_eqn, eqn);
|
||||
MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
|
||||
|
@ -1921,6 +1926,10 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
|
|||
int err;
|
||||
int eqn;
|
||||
|
||||
err = mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
c = kvzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
|
||||
if (!c)
|
||||
return -ENOMEM;
|
||||
|
@ -1937,7 +1946,6 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
|
|||
c->xdp = !!params->xdp_prog;
|
||||
c->stats = &priv->channel_stats[ix].ch;
|
||||
|
||||
mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq);
|
||||
c->irq_desc = irq_to_desc(irq);
|
||||
|
||||
netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
|
||||
|
@ -3574,6 +3582,7 @@ static int set_feature_cvlan_filter(struct net_device *netdev, bool enable)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MLX5_ESWITCH
|
||||
static int set_feature_tc_num_filters(struct net_device *netdev, bool enable)
|
||||
{
|
||||
struct mlx5e_priv *priv = netdev_priv(netdev);
|
||||
|
@ -3586,6 +3595,7 @@ static int set_feature_tc_num_filters(struct net_device *netdev, bool enable)
|
|||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int set_feature_rx_all(struct net_device *netdev, bool enable)
|
||||
{
|
||||
|
@ -3684,7 +3694,9 @@ static int mlx5e_set_features(struct net_device *netdev,
|
|||
err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro);
|
||||
err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER,
|
||||
set_feature_cvlan_filter);
|
||||
#ifdef CONFIG_MLX5_ESWITCH
|
||||
err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TC, set_feature_tc_num_filters);
|
||||
#endif
|
||||
err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXALL, set_feature_rx_all);
|
||||
err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS, set_feature_rx_fcs);
|
||||
err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan);
|
||||
|
@ -3755,10 +3767,11 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
|
|||
}
|
||||
|
||||
if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
|
||||
bool is_linear = mlx5e_rx_mpwqe_is_linear_skb(priv->mdev, &new_channels.params);
|
||||
u8 ppw_old = mlx5e_mpwqe_log_pkts_per_wqe(params);
|
||||
u8 ppw_new = mlx5e_mpwqe_log_pkts_per_wqe(&new_channels.params);
|
||||
|
||||
reset = reset && (ppw_old != ppw_new);
|
||||
reset = reset && (is_linear || (ppw_old != ppw_new));
|
||||
}
|
||||
|
||||
if (!reset) {
|
||||
|
@ -4678,7 +4691,9 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
|
|||
FT_CAP(modify_root) &&
|
||||
FT_CAP(identified_miss_table_mode) &&
|
||||
FT_CAP(flow_table_modify)) {
|
||||
#ifdef CONFIG_MLX5_ESWITCH
|
||||
netdev->hw_features |= NETIF_F_HW_TC;
|
||||
#endif
|
||||
#ifdef CONFIG_MLX5_EN_ARFS
|
||||
netdev->hw_features |= NETIF_F_NTUPLE;
|
||||
#endif
|
||||
|
@ -5004,11 +5019,21 @@ err_free_netdev:
|
|||
int mlx5e_attach_netdev(struct mlx5e_priv *priv)
|
||||
{
|
||||
const struct mlx5e_profile *profile;
|
||||
int max_nch;
|
||||
int err;
|
||||
|
||||
profile = priv->profile;
|
||||
clear_bit(MLX5E_STATE_DESTROYING, &priv->state);
|
||||
|
||||
/* max number of channels may have changed */
|
||||
max_nch = mlx5e_get_max_num_channels(priv->mdev);
|
||||
if (priv->channels.params.num_channels > max_nch) {
|
||||
mlx5_core_warn(priv->mdev, "MLX5E: Reducing number of channels to %d\n", max_nch);
|
||||
priv->channels.params.num_channels = max_nch;
|
||||
mlx5e_build_default_indir_rqt(priv->channels.params.indirection_rqt,
|
||||
MLX5E_INDIR_RQT_SIZE, max_nch);
|
||||
}
|
||||
|
||||
err = profile->init_tx(priv);
|
||||
if (err)
|
||||
goto out;
|
||||
|
|
|
@ -1104,6 +1104,12 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
|
|||
u32 frag_size;
|
||||
bool consumed;
|
||||
|
||||
/* Check packet size. Note LRO doesn't use linear SKB */
|
||||
if (unlikely(cqe_bcnt > rq->hw_mtu)) {
|
||||
rq->stats->oversize_pkts_sw_drop++;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
va = page_address(di->page) + head_offset;
|
||||
data = va + rx_headroom;
|
||||
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt32);
|
||||
|
|
|
@ -98,18 +98,17 @@ static int mlx5e_test_link_speed(struct mlx5e_priv *priv)
|
|||
return 1;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_INET
|
||||
/* loopback test */
|
||||
#define MLX5E_TEST_PKT_SIZE (MLX5E_RX_MAX_HEAD - NET_IP_ALIGN)
|
||||
static const char mlx5e_test_text[ETH_GSTRING_LEN] = "MLX5E SELF TEST";
|
||||
#define MLX5E_TEST_MAGIC 0x5AEED15C001ULL
|
||||
|
||||
struct mlx5ehdr {
|
||||
__be32 version;
|
||||
__be64 magic;
|
||||
char text[ETH_GSTRING_LEN];
|
||||
};
|
||||
|
||||
#ifdef CONFIG_INET
|
||||
/* loopback test */
|
||||
#define MLX5E_TEST_PKT_SIZE (sizeof(struct ethhdr) + sizeof(struct iphdr) +\
|
||||
sizeof(struct udphdr) + sizeof(struct mlx5ehdr))
|
||||
#define MLX5E_TEST_MAGIC 0x5AEED15C001ULL
|
||||
|
||||
static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv)
|
||||
{
|
||||
struct sk_buff *skb = NULL;
|
||||
|
@ -117,10 +116,7 @@ static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv)
|
|||
struct ethhdr *ethh;
|
||||
struct udphdr *udph;
|
||||
struct iphdr *iph;
|
||||
int datalen, iplen;
|
||||
|
||||
datalen = MLX5E_TEST_PKT_SIZE -
|
||||
(sizeof(*ethh) + sizeof(*iph) + sizeof(*udph));
|
||||
int iplen;
|
||||
|
||||
skb = netdev_alloc_skb(priv->netdev, MLX5E_TEST_PKT_SIZE);
|
||||
if (!skb) {
|
||||
|
@ -149,7 +145,7 @@ static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv)
|
|||
/* Fill UDP header */
|
||||
udph->source = htons(9);
|
||||
udph->dest = htons(9); /* Discard Protocol */
|
||||
udph->len = htons(datalen + sizeof(struct udphdr));
|
||||
udph->len = htons(sizeof(struct mlx5ehdr) + sizeof(struct udphdr));
|
||||
udph->check = 0;
|
||||
|
||||
/* Fill IP header */
|
||||
|
@ -157,7 +153,8 @@ static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv)
|
|||
iph->ttl = 32;
|
||||
iph->version = 4;
|
||||
iph->protocol = IPPROTO_UDP;
|
||||
iplen = sizeof(struct iphdr) + sizeof(struct udphdr) + datalen;
|
||||
iplen = sizeof(struct iphdr) + sizeof(struct udphdr) +
|
||||
sizeof(struct mlx5ehdr);
|
||||
iph->tot_len = htons(iplen);
|
||||
iph->frag_off = 0;
|
||||
iph->saddr = 0;
|
||||
|
@ -170,9 +167,6 @@ static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv)
|
|||
mlxh = skb_put(skb, sizeof(*mlxh));
|
||||
mlxh->version = 0;
|
||||
mlxh->magic = cpu_to_be64(MLX5E_TEST_MAGIC);
|
||||
strlcpy(mlxh->text, mlx5e_test_text, sizeof(mlxh->text));
|
||||
datalen -= sizeof(*mlxh);
|
||||
skb_put_zero(skb, datalen);
|
||||
|
||||
skb->csum = 0;
|
||||
skb->ip_summed = CHECKSUM_PARTIAL;
|
||||
|
|
|
@ -83,6 +83,7 @@ static const struct counter_desc sw_stats_desc[] = {
|
|||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_cqes) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_strides) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_oversize_pkts_sw_drop) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
|
||||
|
@ -161,6 +162,7 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
|
|||
s->rx_wqe_err += rq_stats->wqe_err;
|
||||
s->rx_mpwqe_filler_cqes += rq_stats->mpwqe_filler_cqes;
|
||||
s->rx_mpwqe_filler_strides += rq_stats->mpwqe_filler_strides;
|
||||
s->rx_oversize_pkts_sw_drop += rq_stats->oversize_pkts_sw_drop;
|
||||
s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
|
||||
s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
|
||||
s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
|
||||
|
@ -1189,6 +1191,7 @@ static const struct counter_desc rq_stats_desc[] = {
|
|||
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
|
||||
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
|
||||
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
|
||||
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
|
||||
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
|
||||
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
|
||||
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
|
||||
|
|
|
@ -96,6 +96,7 @@ struct mlx5e_sw_stats {
|
|||
u64 rx_wqe_err;
|
||||
u64 rx_mpwqe_filler_cqes;
|
||||
u64 rx_mpwqe_filler_strides;
|
||||
u64 rx_oversize_pkts_sw_drop;
|
||||
u64 rx_buff_alloc_err;
|
||||
u64 rx_cqe_compress_blks;
|
||||
u64 rx_cqe_compress_pkts;
|
||||
|
@ -193,6 +194,7 @@ struct mlx5e_rq_stats {
|
|||
u64 wqe_err;
|
||||
u64 mpwqe_filler_cqes;
|
||||
u64 mpwqe_filler_strides;
|
||||
u64 oversize_pkts_sw_drop;
|
||||
u64 buff_alloc_err;
|
||||
u64 cqe_compress_blks;
|
||||
u64 cqe_compress_pkts;
|
||||
|
|
|
@ -1447,31 +1447,21 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
|
|||
inner_headers);
|
||||
}
|
||||
|
||||
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
|
||||
struct flow_dissector_key_eth_addrs *key =
|
||||
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
|
||||
struct flow_dissector_key_basic *key =
|
||||
skb_flow_dissector_target(f->dissector,
|
||||
FLOW_DISSECTOR_KEY_ETH_ADDRS,
|
||||
FLOW_DISSECTOR_KEY_BASIC,
|
||||
f->key);
|
||||
struct flow_dissector_key_eth_addrs *mask =
|
||||
struct flow_dissector_key_basic *mask =
|
||||
skb_flow_dissector_target(f->dissector,
|
||||
FLOW_DISSECTOR_KEY_ETH_ADDRS,
|
||||
FLOW_DISSECTOR_KEY_BASIC,
|
||||
f->mask);
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
|
||||
ntohs(mask->n_proto));
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
|
||||
ntohs(key->n_proto));
|
||||
|
||||
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
|
||||
dmac_47_16),
|
||||
mask->dst);
|
||||
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
|
||||
dmac_47_16),
|
||||
key->dst);
|
||||
|
||||
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
|
||||
smac_47_16),
|
||||
mask->src);
|
||||
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
|
||||
smac_47_16),
|
||||
key->src);
|
||||
|
||||
if (!is_zero_ether_addr(mask->src) || !is_zero_ether_addr(mask->dst))
|
||||
if (mask->n_proto)
|
||||
*match_level = MLX5_MATCH_L2;
|
||||
}
|
||||
|
||||
|
@ -1505,9 +1495,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
|
|||
|
||||
*match_level = MLX5_MATCH_L2;
|
||||
}
|
||||
} else {
|
||||
} else if (*match_level != MLX5_MATCH_NONE) {
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_c, svlan_tag, 1);
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
|
||||
*match_level = MLX5_MATCH_L2;
|
||||
}
|
||||
|
||||
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CVLAN)) {
|
||||
|
@ -1545,21 +1536,31 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
|
|||
}
|
||||
}
|
||||
|
||||
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
|
||||
struct flow_dissector_key_basic *key =
|
||||
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
|
||||
struct flow_dissector_key_eth_addrs *key =
|
||||
skb_flow_dissector_target(f->dissector,
|
||||
FLOW_DISSECTOR_KEY_BASIC,
|
||||
FLOW_DISSECTOR_KEY_ETH_ADDRS,
|
||||
f->key);
|
||||
struct flow_dissector_key_basic *mask =
|
||||
struct flow_dissector_key_eth_addrs *mask =
|
||||
skb_flow_dissector_target(f->dissector,
|
||||
FLOW_DISSECTOR_KEY_BASIC,
|
||||
FLOW_DISSECTOR_KEY_ETH_ADDRS,
|
||||
f->mask);
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
|
||||
ntohs(mask->n_proto));
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
|
||||
ntohs(key->n_proto));
|
||||
|
||||
if (mask->n_proto)
|
||||
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
|
||||
dmac_47_16),
|
||||
mask->dst);
|
||||
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
|
||||
dmac_47_16),
|
||||
key->dst);
|
||||
|
||||
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
|
||||
smac_47_16),
|
||||
mask->src);
|
||||
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
|
||||
smac_47_16),
|
||||
key->src);
|
||||
|
||||
if (!is_zero_ether_addr(mask->src) || !is_zero_ether_addr(mask->dst))
|
||||
*match_level = MLX5_MATCH_L2;
|
||||
}
|
||||
|
||||
|
@ -1586,10 +1587,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
|
|||
|
||||
/* the HW doesn't need L3 inline to match on frag=no */
|
||||
if (!(key->flags & FLOW_DIS_IS_FRAGMENT))
|
||||
*match_level = MLX5_INLINE_MODE_L2;
|
||||
*match_level = MLX5_MATCH_L2;
|
||||
/* *** L2 attributes parsing up to here *** */
|
||||
else
|
||||
*match_level = MLX5_INLINE_MODE_IP;
|
||||
*match_level = MLX5_MATCH_L3;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2979,7 +2980,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
|
|||
if (!actions_match_supported(priv, exts, parse_attr, flow, extack))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (attr->out_count > 1 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
|
||||
if (attr->mirror_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"current firmware doesn't support split rule for port mirroring");
|
||||
netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n");
|
||||
|
|
|
@ -83,8 +83,14 @@ struct mlx5_fpga_ipsec_rule {
|
|||
};
|
||||
|
||||
static const struct rhashtable_params rhash_sa = {
|
||||
.key_len = FIELD_SIZEOF(struct mlx5_fpga_ipsec_sa_ctx, hw_sa),
|
||||
.key_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hw_sa),
|
||||
/* Keep out "cmd" field from the key as it's
|
||||
* value is not constant during the lifetime
|
||||
* of the key object.
|
||||
*/
|
||||
.key_len = FIELD_SIZEOF(struct mlx5_fpga_ipsec_sa_ctx, hw_sa) -
|
||||
FIELD_SIZEOF(struct mlx5_ifc_fpga_ipsec_sa_v1, cmd),
|
||||
.key_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hw_sa) +
|
||||
FIELD_SIZEOF(struct mlx5_ifc_fpga_ipsec_sa_v1, cmd),
|
||||
.head_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hash),
|
||||
.automatic_shrinking = true,
|
||||
.min_size = 1,
|
||||
|
|
|
@ -560,9 +560,9 @@ static int mlx5i_close(struct net_device *netdev)
|
|||
|
||||
netif_carrier_off(epriv->netdev);
|
||||
mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn);
|
||||
mlx5i_uninit_underlay_qp(epriv);
|
||||
mlx5e_deactivate_priv_channels(epriv);
|
||||
mlx5e_close_channels(&epriv->channels);
|
||||
mlx5i_uninit_underlay_qp(epriv);
|
||||
unlock:
|
||||
mutex_unlock(&epriv->state_lock);
|
||||
return 0;
|
||||
|
|
Loading…
Reference in New Issue