mlx5-fixes-2022-05-17
-----BEGIN PGP SIGNATURE----- iQEzBAABCAAdFiEEGhZs6bAKwk/OTgTpSD+KveBX+j4FAmKEjE0ACgkQSD+KveBX +j4alQgAuKCGEmc7blX5Xwg2qDaRc7/WlLLXyrg5IBI3Pmqi/GrKqGfT7iNnX6xD ADR3k64mSoA941Il18xyPOztdRzp9N0JQ2r1sTcKLx+4DrMSpnQERnDb8pHjLsqw 7l2BvkH/PjEAdwByi4rpt3X3BBahkPOW33QhEj3yIOvBY6EQnAtGDqVY4ql4vG7+ 4dgcycNN7MA7ylxgVlwn66EumHGZTKnuugYwmK4eFDxgyaQYBbHfe/yOj4N8rdUF pV0Z0sAdIvg+UXP0HKFVv08dINXQLFU5aCYVK/09SHrInP+ctW3uD+3ykFSxhQGZ JOhyJJ6dZk0vmXGikeve36WMOCcRVQ== =rT0G -----END PGP SIGNATURE----- Merge tag 'mlx5-fixes-2022-05-17' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux Saeed Mahameed says: ==================== mlx5 fixes 2022-05-17 This series provides bug fixes to mlx5 driver. Please pull and let me know if there is any problem. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
765d121600
|
@ -23,7 +23,7 @@ struct mlx5_ct_fs_smfs_matcher {
|
|||
};
|
||||
|
||||
struct mlx5_ct_fs_smfs_matchers {
|
||||
struct mlx5_ct_fs_smfs_matcher smfs_matchers[4];
|
||||
struct mlx5_ct_fs_smfs_matcher smfs_matchers[6];
|
||||
struct list_head used;
|
||||
};
|
||||
|
||||
|
@ -44,7 +44,8 @@ struct mlx5_ct_fs_smfs_rule {
|
|||
};
|
||||
|
||||
static inline void
|
||||
mlx5_ct_fs_smfs_fill_mask(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec, bool ipv4, bool tcp)
|
||||
mlx5_ct_fs_smfs_fill_mask(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec, bool ipv4, bool tcp,
|
||||
bool gre)
|
||||
{
|
||||
void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers);
|
||||
|
||||
|
@ -77,7 +78,7 @@ mlx5_ct_fs_smfs_fill_mask(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec, bo
|
|||
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, tcp_dport);
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags,
|
||||
ntohs(MLX5_CT_TCP_FLAGS_MASK));
|
||||
} else {
|
||||
} else if (!gre) {
|
||||
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, udp_sport);
|
||||
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, udp_dport);
|
||||
}
|
||||
|
@ -87,7 +88,7 @@ mlx5_ct_fs_smfs_fill_mask(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec, bo
|
|||
|
||||
static struct mlx5dr_matcher *
|
||||
mlx5_ct_fs_smfs_matcher_create(struct mlx5_ct_fs *fs, struct mlx5dr_table *tbl, bool ipv4,
|
||||
bool tcp, u32 priority)
|
||||
bool tcp, bool gre, u32 priority)
|
||||
{
|
||||
struct mlx5dr_matcher *dr_matcher;
|
||||
struct mlx5_flow_spec *spec;
|
||||
|
@ -96,7 +97,7 @@ mlx5_ct_fs_smfs_matcher_create(struct mlx5_ct_fs *fs, struct mlx5dr_table *tbl,
|
|||
if (!spec)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
mlx5_ct_fs_smfs_fill_mask(fs, spec, ipv4, tcp);
|
||||
mlx5_ct_fs_smfs_fill_mask(fs, spec, ipv4, tcp, gre);
|
||||
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2 | MLX5_MATCH_OUTER_HEADERS;
|
||||
|
||||
dr_matcher = mlx5_smfs_matcher_create(tbl, priority, spec);
|
||||
|
@ -108,7 +109,7 @@ mlx5_ct_fs_smfs_matcher_create(struct mlx5_ct_fs *fs, struct mlx5dr_table *tbl,
|
|||
}
|
||||
|
||||
static struct mlx5_ct_fs_smfs_matcher *
|
||||
mlx5_ct_fs_smfs_matcher_get(struct mlx5_ct_fs *fs, bool nat, bool ipv4, bool tcp)
|
||||
mlx5_ct_fs_smfs_matcher_get(struct mlx5_ct_fs *fs, bool nat, bool ipv4, bool tcp, bool gre)
|
||||
{
|
||||
struct mlx5_ct_fs_smfs *fs_smfs = mlx5_ct_fs_priv(fs);
|
||||
struct mlx5_ct_fs_smfs_matcher *m, *smfs_matcher;
|
||||
|
@ -119,7 +120,7 @@ mlx5_ct_fs_smfs_matcher_get(struct mlx5_ct_fs *fs, bool nat, bool ipv4, bool tcp
|
|||
int prio;
|
||||
|
||||
matchers = nat ? &fs_smfs->matchers_nat : &fs_smfs->matchers;
|
||||
smfs_matcher = &matchers->smfs_matchers[ipv4 * 2 + tcp];
|
||||
smfs_matcher = &matchers->smfs_matchers[ipv4 * 3 + tcp * 2 + gre];
|
||||
|
||||
if (refcount_inc_not_zero(&smfs_matcher->ref))
|
||||
return smfs_matcher;
|
||||
|
@ -145,11 +146,11 @@ mlx5_ct_fs_smfs_matcher_get(struct mlx5_ct_fs *fs, bool nat, bool ipv4, bool tcp
|
|||
}
|
||||
|
||||
tbl = nat ? fs_smfs->ct_nat_tbl : fs_smfs->ct_tbl;
|
||||
dr_matcher = mlx5_ct_fs_smfs_matcher_create(fs, tbl, ipv4, tcp, prio);
|
||||
dr_matcher = mlx5_ct_fs_smfs_matcher_create(fs, tbl, ipv4, tcp, gre, prio);
|
||||
if (IS_ERR(dr_matcher)) {
|
||||
netdev_warn(fs->netdev,
|
||||
"ct_fs_smfs: failed to create matcher (nat %d, ipv4 %d, tcp %d), err: %ld\n",
|
||||
nat, ipv4, tcp, PTR_ERR(dr_matcher));
|
||||
"ct_fs_smfs: failed to create matcher (nat %d, ipv4 %d, tcp %d, gre %d), err: %ld\n",
|
||||
nat, ipv4, tcp, gre, PTR_ERR(dr_matcher));
|
||||
|
||||
smfs_matcher = ERR_CAST(dr_matcher);
|
||||
goto out_unlock;
|
||||
|
@ -222,16 +223,17 @@ mlx5_ct_fs_smfs_destroy(struct mlx5_ct_fs *fs)
|
|||
static inline bool
|
||||
mlx5_tc_ct_valid_used_dissector_keys(const u32 used_keys)
|
||||
{
|
||||
#define DISSECTOR_BIT(name) BIT(FLOW_DISSECTOR_KEY_ ## name)
|
||||
const u32 basic_keys = DISSECTOR_BIT(BASIC) | DISSECTOR_BIT(CONTROL) |
|
||||
DISSECTOR_BIT(PORTS) | DISSECTOR_BIT(META);
|
||||
const u32 ipv4_tcp = basic_keys | DISSECTOR_BIT(IPV4_ADDRS) | DISSECTOR_BIT(TCP);
|
||||
const u32 ipv4_udp = basic_keys | DISSECTOR_BIT(IPV4_ADDRS);
|
||||
const u32 ipv6_tcp = basic_keys | DISSECTOR_BIT(IPV6_ADDRS) | DISSECTOR_BIT(TCP);
|
||||
const u32 ipv6_udp = basic_keys | DISSECTOR_BIT(IPV6_ADDRS);
|
||||
#define DISS_BIT(name) BIT(FLOW_DISSECTOR_KEY_ ## name)
|
||||
const u32 basic_keys = DISS_BIT(BASIC) | DISS_BIT(CONTROL) | DISS_BIT(META);
|
||||
const u32 ipv4_tcp = basic_keys | DISS_BIT(IPV4_ADDRS) | DISS_BIT(PORTS) | DISS_BIT(TCP);
|
||||
const u32 ipv6_tcp = basic_keys | DISS_BIT(IPV6_ADDRS) | DISS_BIT(PORTS) | DISS_BIT(TCP);
|
||||
const u32 ipv4_udp = basic_keys | DISS_BIT(IPV4_ADDRS) | DISS_BIT(PORTS);
|
||||
const u32 ipv6_udp = basic_keys | DISS_BIT(IPV6_ADDRS) | DISS_BIT(PORTS);
|
||||
const u32 ipv4_gre = basic_keys | DISS_BIT(IPV4_ADDRS);
|
||||
const u32 ipv6_gre = basic_keys | DISS_BIT(IPV6_ADDRS);
|
||||
|
||||
return (used_keys == ipv4_tcp || used_keys == ipv4_udp || used_keys == ipv6_tcp ||
|
||||
used_keys == ipv6_udp);
|
||||
used_keys == ipv6_udp || used_keys == ipv4_gre || used_keys == ipv6_gre);
|
||||
}
|
||||
|
||||
static bool
|
||||
|
@ -254,20 +256,24 @@ mlx5_ct_fs_smfs_ct_validate_flow_rule(struct mlx5_ct_fs *fs, struct flow_rule *f
|
|||
flow_rule_match_control(flow_rule, &control);
|
||||
flow_rule_match_ipv4_addrs(flow_rule, &ipv4_addrs);
|
||||
flow_rule_match_ipv6_addrs(flow_rule, &ipv6_addrs);
|
||||
flow_rule_match_ports(flow_rule, &ports);
|
||||
flow_rule_match_tcp(flow_rule, &tcp);
|
||||
if (basic.key->ip_proto != IPPROTO_GRE)
|
||||
flow_rule_match_ports(flow_rule, &ports);
|
||||
if (basic.key->ip_proto == IPPROTO_TCP)
|
||||
flow_rule_match_tcp(flow_rule, &tcp);
|
||||
|
||||
if (basic.mask->n_proto != htons(0xFFFF) ||
|
||||
(basic.key->n_proto != htons(ETH_P_IP) && basic.key->n_proto != htons(ETH_P_IPV6)) ||
|
||||
basic.mask->ip_proto != 0xFF ||
|
||||
(basic.key->ip_proto != IPPROTO_UDP && basic.key->ip_proto != IPPROTO_TCP)) {
|
||||
(basic.key->ip_proto != IPPROTO_UDP && basic.key->ip_proto != IPPROTO_TCP &&
|
||||
basic.key->ip_proto != IPPROTO_GRE)) {
|
||||
ct_dbg("rule uses unexpected basic match (n_proto 0x%04x/0x%04x, ip_proto 0x%02x/0x%02x)",
|
||||
ntohs(basic.key->n_proto), ntohs(basic.mask->n_proto),
|
||||
basic.key->ip_proto, basic.mask->ip_proto);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (ports.mask->src != htons(0xFFFF) || ports.mask->dst != htons(0xFFFF)) {
|
||||
if (basic.key->ip_proto != IPPROTO_GRE &&
|
||||
(ports.mask->src != htons(0xFFFF) || ports.mask->dst != htons(0xFFFF))) {
|
||||
ct_dbg("rule uses ports match (src 0x%04x, dst 0x%04x)",
|
||||
ports.mask->src, ports.mask->dst);
|
||||
return false;
|
||||
|
@ -291,7 +297,7 @@ mlx5_ct_fs_smfs_ct_rule_add(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec,
|
|||
struct mlx5dr_action *actions[5];
|
||||
struct mlx5dr_rule *rule;
|
||||
int num_actions = 0, err;
|
||||
bool nat, tcp, ipv4;
|
||||
bool nat, tcp, ipv4, gre;
|
||||
|
||||
if (!mlx5_ct_fs_smfs_ct_validate_flow_rule(fs, flow_rule))
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
|
@ -314,15 +320,17 @@ mlx5_ct_fs_smfs_ct_rule_add(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec,
|
|||
ipv4 = mlx5e_tc_get_ip_version(spec, true) == 4;
|
||||
tcp = MLX5_GET(fte_match_param, spec->match_value,
|
||||
outer_headers.ip_protocol) == IPPROTO_TCP;
|
||||
gre = MLX5_GET(fte_match_param, spec->match_value,
|
||||
outer_headers.ip_protocol) == IPPROTO_GRE;
|
||||
|
||||
smfs_matcher = mlx5_ct_fs_smfs_matcher_get(fs, nat, ipv4, tcp);
|
||||
smfs_matcher = mlx5_ct_fs_smfs_matcher_get(fs, nat, ipv4, tcp, gre);
|
||||
if (IS_ERR(smfs_matcher)) {
|
||||
err = PTR_ERR(smfs_matcher);
|
||||
goto err_matcher;
|
||||
}
|
||||
|
||||
rule = mlx5_smfs_rule_create(smfs_matcher->dr_matcher, spec, num_actions, actions,
|
||||
MLX5_FLOW_CONTEXT_FLOW_SOURCE_ANY_VPORT);
|
||||
spec->flow_context.flow_source);
|
||||
if (!rule) {
|
||||
err = -EINVAL;
|
||||
goto err_create;
|
||||
|
|
|
@ -14,19 +14,26 @@ static int mlx5e_trap_napi_poll(struct napi_struct *napi, int budget)
|
|||
bool busy = false;
|
||||
int work_done = 0;
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
ch_stats->poll++;
|
||||
|
||||
work_done = mlx5e_poll_rx_cq(&rq->cq, budget);
|
||||
busy |= work_done == budget;
|
||||
busy |= rq->post_wqes(rq);
|
||||
|
||||
if (busy)
|
||||
return budget;
|
||||
if (busy) {
|
||||
work_done = budget;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (unlikely(!napi_complete_done(napi, work_done)))
|
||||
return work_done;
|
||||
goto out;
|
||||
|
||||
mlx5e_cq_arm(&rq->cq);
|
||||
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
return work_done;
|
||||
}
|
||||
|
||||
|
|
|
@ -3864,6 +3864,10 @@ static netdev_features_t mlx5e_fix_uplink_rep_features(struct net_device *netdev
|
|||
if (netdev->features & NETIF_F_NTUPLE)
|
||||
netdev_warn(netdev, "Disabling ntuple, not supported in switchdev mode\n");
|
||||
|
||||
features &= ~NETIF_F_GRO_HW;
|
||||
if (netdev->features & NETIF_F_GRO_HW)
|
||||
netdev_warn(netdev, "Disabling HW_GRO, not supported in switchdev mode\n");
|
||||
|
||||
return features;
|
||||
}
|
||||
|
||||
|
@ -3896,6 +3900,25 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
|
|||
}
|
||||
}
|
||||
|
||||
if (params->xdp_prog) {
|
||||
if (features & NETIF_F_LRO) {
|
||||
netdev_warn(netdev, "LRO is incompatible with XDP\n");
|
||||
features &= ~NETIF_F_LRO;
|
||||
}
|
||||
if (features & NETIF_F_GRO_HW) {
|
||||
netdev_warn(netdev, "HW GRO is incompatible with XDP\n");
|
||||
features &= ~NETIF_F_GRO_HW;
|
||||
}
|
||||
}
|
||||
|
||||
if (priv->xsk.refcnt) {
|
||||
if (features & NETIF_F_GRO_HW) {
|
||||
netdev_warn(netdev, "HW GRO is incompatible with AF_XDP (%u XSKs are active)\n",
|
||||
priv->xsk.refcnt);
|
||||
features &= ~NETIF_F_GRO_HW;
|
||||
}
|
||||
}
|
||||
|
||||
if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
|
||||
features &= ~NETIF_F_RXHASH;
|
||||
if (netdev->features & NETIF_F_RXHASH)
|
||||
|
@ -4850,10 +4873,6 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
|
|||
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
|
||||
netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
|
||||
|
||||
if (!!MLX5_CAP_GEN(mdev, shampo) &&
|
||||
mlx5e_check_fragmented_striding_rq_cap(mdev))
|
||||
netdev->hw_features |= NETIF_F_GRO_HW;
|
||||
|
||||
if (mlx5e_tunnel_any_tx_proto_supported(mdev)) {
|
||||
netdev->hw_enc_features |= NETIF_F_HW_CSUM;
|
||||
netdev->hw_enc_features |= NETIF_F_TSO;
|
||||
|
|
|
@ -2663,28 +2663,6 @@ static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns)
|
|||
clean_tree(&root_ns->ns.node);
|
||||
}
|
||||
|
||||
void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_flow_steering *steering = dev->priv.steering;
|
||||
|
||||
cleanup_root_ns(steering->root_ns);
|
||||
cleanup_root_ns(steering->fdb_root_ns);
|
||||
steering->fdb_root_ns = NULL;
|
||||
kfree(steering->fdb_sub_ns);
|
||||
steering->fdb_sub_ns = NULL;
|
||||
cleanup_root_ns(steering->port_sel_root_ns);
|
||||
cleanup_root_ns(steering->sniffer_rx_root_ns);
|
||||
cleanup_root_ns(steering->sniffer_tx_root_ns);
|
||||
cleanup_root_ns(steering->rdma_rx_root_ns);
|
||||
cleanup_root_ns(steering->rdma_tx_root_ns);
|
||||
cleanup_root_ns(steering->egress_root_ns);
|
||||
mlx5_cleanup_fc_stats(dev);
|
||||
kmem_cache_destroy(steering->ftes_cache);
|
||||
kmem_cache_destroy(steering->fgs_cache);
|
||||
mlx5_ft_pool_destroy(dev);
|
||||
kfree(steering);
|
||||
}
|
||||
|
||||
static int init_sniffer_tx_root_ns(struct mlx5_flow_steering *steering)
|
||||
{
|
||||
struct fs_prio *prio;
|
||||
|
@ -3086,43 +3064,28 @@ cleanup:
|
|||
return err;
|
||||
}
|
||||
|
||||
int mlx5_init_fs(struct mlx5_core_dev *dev)
|
||||
void mlx5_fs_core_cleanup(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_flow_steering *steering;
|
||||
struct mlx5_flow_steering *steering = dev->priv.steering;
|
||||
|
||||
cleanup_root_ns(steering->root_ns);
|
||||
cleanup_root_ns(steering->fdb_root_ns);
|
||||
steering->fdb_root_ns = NULL;
|
||||
kfree(steering->fdb_sub_ns);
|
||||
steering->fdb_sub_ns = NULL;
|
||||
cleanup_root_ns(steering->port_sel_root_ns);
|
||||
cleanup_root_ns(steering->sniffer_rx_root_ns);
|
||||
cleanup_root_ns(steering->sniffer_tx_root_ns);
|
||||
cleanup_root_ns(steering->rdma_rx_root_ns);
|
||||
cleanup_root_ns(steering->rdma_tx_root_ns);
|
||||
cleanup_root_ns(steering->egress_root_ns);
|
||||
}
|
||||
|
||||
int mlx5_fs_core_init(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_flow_steering *steering = dev->priv.steering;
|
||||
int err = 0;
|
||||
|
||||
err = mlx5_init_fc_stats(dev);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = mlx5_ft_pool_init(dev);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
steering = kzalloc(sizeof(*steering), GFP_KERNEL);
|
||||
if (!steering) {
|
||||
err = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
steering->dev = dev;
|
||||
dev->priv.steering = steering;
|
||||
|
||||
if (mlx5_fs_dr_is_supported(dev))
|
||||
steering->mode = MLX5_FLOW_STEERING_MODE_SMFS;
|
||||
else
|
||||
steering->mode = MLX5_FLOW_STEERING_MODE_DMFS;
|
||||
|
||||
steering->fgs_cache = kmem_cache_create("mlx5_fs_fgs",
|
||||
sizeof(struct mlx5_flow_group), 0,
|
||||
0, NULL);
|
||||
steering->ftes_cache = kmem_cache_create("mlx5_fs_ftes", sizeof(struct fs_fte), 0,
|
||||
0, NULL);
|
||||
if (!steering->ftes_cache || !steering->fgs_cache) {
|
||||
err = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
if ((((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
|
||||
(MLX5_CAP_GEN(dev, nic_flow_table))) ||
|
||||
((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
|
||||
|
@ -3180,8 +3143,64 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
|
|||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
mlx5_cleanup_fs(dev);
|
||||
mlx5_fs_core_cleanup(dev);
|
||||
return err;
|
||||
}
|
||||
|
||||
void mlx5_fs_core_free(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_flow_steering *steering = dev->priv.steering;
|
||||
|
||||
kmem_cache_destroy(steering->ftes_cache);
|
||||
kmem_cache_destroy(steering->fgs_cache);
|
||||
kfree(steering);
|
||||
mlx5_ft_pool_destroy(dev);
|
||||
mlx5_cleanup_fc_stats(dev);
|
||||
}
|
||||
|
||||
int mlx5_fs_core_alloc(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_flow_steering *steering;
|
||||
int err = 0;
|
||||
|
||||
err = mlx5_init_fc_stats(dev);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = mlx5_ft_pool_init(dev);
|
||||
if (err)
|
||||
goto err;
|
||||
|
||||
steering = kzalloc(sizeof(*steering), GFP_KERNEL);
|
||||
if (!steering) {
|
||||
err = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
steering->dev = dev;
|
||||
dev->priv.steering = steering;
|
||||
|
||||
if (mlx5_fs_dr_is_supported(dev))
|
||||
steering->mode = MLX5_FLOW_STEERING_MODE_SMFS;
|
||||
else
|
||||
steering->mode = MLX5_FLOW_STEERING_MODE_DMFS;
|
||||
|
||||
steering->fgs_cache = kmem_cache_create("mlx5_fs_fgs",
|
||||
sizeof(struct mlx5_flow_group), 0,
|
||||
0, NULL);
|
||||
steering->ftes_cache = kmem_cache_create("mlx5_fs_ftes", sizeof(struct fs_fte), 0,
|
||||
0, NULL);
|
||||
if (!steering->ftes_cache || !steering->fgs_cache) {
|
||||
err = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
mlx5_fs_core_free(dev);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
@ -298,8 +298,10 @@ int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns,
|
|||
int mlx5_flow_namespace_set_mode(struct mlx5_flow_namespace *ns,
|
||||
enum mlx5_flow_steering_mode mode);
|
||||
|
||||
int mlx5_init_fs(struct mlx5_core_dev *dev);
|
||||
void mlx5_cleanup_fs(struct mlx5_core_dev *dev);
|
||||
int mlx5_fs_core_alloc(struct mlx5_core_dev *dev);
|
||||
void mlx5_fs_core_free(struct mlx5_core_dev *dev);
|
||||
int mlx5_fs_core_init(struct mlx5_core_dev *dev);
|
||||
void mlx5_fs_core_cleanup(struct mlx5_core_dev *dev);
|
||||
|
||||
int mlx5_fs_egress_acls_init(struct mlx5_core_dev *dev, int total_vports);
|
||||
void mlx5_fs_egress_acls_cleanup(struct mlx5_core_dev *dev);
|
||||
|
|
|
@ -8,7 +8,8 @@
|
|||
enum {
|
||||
MLX5_FW_RESET_FLAGS_RESET_REQUESTED,
|
||||
MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST,
|
||||
MLX5_FW_RESET_FLAGS_PENDING_COMP
|
||||
MLX5_FW_RESET_FLAGS_PENDING_COMP,
|
||||
MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS
|
||||
};
|
||||
|
||||
struct mlx5_fw_reset {
|
||||
|
@ -208,7 +209,10 @@ static void poll_sync_reset(struct timer_list *t)
|
|||
|
||||
if (fatal_error) {
|
||||
mlx5_core_warn(dev, "Got Device Reset\n");
|
||||
queue_work(fw_reset->wq, &fw_reset->reset_reload_work);
|
||||
if (!test_bit(MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS, &fw_reset->reset_flags))
|
||||
queue_work(fw_reset->wq, &fw_reset->reset_reload_work);
|
||||
else
|
||||
mlx5_core_err(dev, "Device is being removed, Drop new reset work\n");
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -433,9 +437,12 @@ static int fw_reset_event_notifier(struct notifier_block *nb, unsigned long acti
|
|||
struct mlx5_fw_reset *fw_reset = mlx5_nb_cof(nb, struct mlx5_fw_reset, nb);
|
||||
struct mlx5_eqe *eqe = data;
|
||||
|
||||
if (test_bit(MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS, &fw_reset->reset_flags))
|
||||
return NOTIFY_DONE;
|
||||
|
||||
switch (eqe->sub_type) {
|
||||
case MLX5_GENERAL_SUBTYPE_FW_LIVE_PATCH_EVENT:
|
||||
queue_work(fw_reset->wq, &fw_reset->fw_live_patch_work);
|
||||
queue_work(fw_reset->wq, &fw_reset->fw_live_patch_work);
|
||||
break;
|
||||
case MLX5_GENERAL_SUBTYPE_PCI_SYNC_FOR_FW_UPDATE_EVENT:
|
||||
mlx5_sync_reset_events_handle(fw_reset, eqe);
|
||||
|
@ -479,6 +486,18 @@ void mlx5_fw_reset_events_stop(struct mlx5_core_dev *dev)
|
|||
mlx5_eq_notifier_unregister(dev, &dev->priv.fw_reset->nb);
|
||||
}
|
||||
|
||||
void mlx5_drain_fw_reset(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
|
||||
|
||||
set_bit(MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS, &fw_reset->reset_flags);
|
||||
cancel_work_sync(&fw_reset->fw_live_patch_work);
|
||||
cancel_work_sync(&fw_reset->reset_request_work);
|
||||
cancel_work_sync(&fw_reset->reset_reload_work);
|
||||
cancel_work_sync(&fw_reset->reset_now_work);
|
||||
cancel_work_sync(&fw_reset->reset_abort_work);
|
||||
}
|
||||
|
||||
int mlx5_fw_reset_init(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_fw_reset *fw_reset = kzalloc(sizeof(*fw_reset), GFP_KERNEL);
|
||||
|
|
|
@ -16,6 +16,7 @@ int mlx5_fw_reset_set_live_patch(struct mlx5_core_dev *dev);
|
|||
int mlx5_fw_reset_wait_reset_done(struct mlx5_core_dev *dev);
|
||||
void mlx5_fw_reset_events_start(struct mlx5_core_dev *dev);
|
||||
void mlx5_fw_reset_events_stop(struct mlx5_core_dev *dev);
|
||||
void mlx5_drain_fw_reset(struct mlx5_core_dev *dev);
|
||||
int mlx5_fw_reset_init(struct mlx5_core_dev *dev);
|
||||
void mlx5_fw_reset_cleanup(struct mlx5_core_dev *dev);
|
||||
|
||||
|
|
|
@ -938,6 +938,12 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
|
|||
goto err_sf_table_cleanup;
|
||||
}
|
||||
|
||||
err = mlx5_fs_core_alloc(dev);
|
||||
if (err) {
|
||||
mlx5_core_err(dev, "Failed to alloc flow steering\n");
|
||||
goto err_fs;
|
||||
}
|
||||
|
||||
dev->dm = mlx5_dm_create(dev);
|
||||
if (IS_ERR(dev->dm))
|
||||
mlx5_core_warn(dev, "Failed to init device memory%d\n", err);
|
||||
|
@ -948,6 +954,8 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
|
|||
|
||||
return 0;
|
||||
|
||||
err_fs:
|
||||
mlx5_sf_table_cleanup(dev);
|
||||
err_sf_table_cleanup:
|
||||
mlx5_sf_hw_table_cleanup(dev);
|
||||
err_sf_hw_table_cleanup:
|
||||
|
@ -985,6 +993,7 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
|
|||
mlx5_hv_vhca_destroy(dev->hv_vhca);
|
||||
mlx5_fw_tracer_destroy(dev->tracer);
|
||||
mlx5_dm_cleanup(dev);
|
||||
mlx5_fs_core_free(dev);
|
||||
mlx5_sf_table_cleanup(dev);
|
||||
mlx5_sf_hw_table_cleanup(dev);
|
||||
mlx5_vhca_event_cleanup(dev);
|
||||
|
@ -1191,7 +1200,7 @@ static int mlx5_load(struct mlx5_core_dev *dev)
|
|||
goto err_tls_start;
|
||||
}
|
||||
|
||||
err = mlx5_init_fs(dev);
|
||||
err = mlx5_fs_core_init(dev);
|
||||
if (err) {
|
||||
mlx5_core_err(dev, "Failed to init flow steering\n");
|
||||
goto err_fs;
|
||||
|
@ -1236,7 +1245,7 @@ err_ec:
|
|||
err_vhca:
|
||||
mlx5_vhca_event_stop(dev);
|
||||
err_set_hca:
|
||||
mlx5_cleanup_fs(dev);
|
||||
mlx5_fs_core_cleanup(dev);
|
||||
err_fs:
|
||||
mlx5_accel_tls_cleanup(dev);
|
||||
err_tls_start:
|
||||
|
@ -1265,7 +1274,7 @@ static void mlx5_unload(struct mlx5_core_dev *dev)
|
|||
mlx5_ec_cleanup(dev);
|
||||
mlx5_sf_hw_table_destroy(dev);
|
||||
mlx5_vhca_event_stop(dev);
|
||||
mlx5_cleanup_fs(dev);
|
||||
mlx5_fs_core_cleanup(dev);
|
||||
mlx5_accel_ipsec_cleanup(dev);
|
||||
mlx5_accel_tls_cleanup(dev);
|
||||
mlx5_fpga_device_stop(dev);
|
||||
|
@ -1618,6 +1627,10 @@ static void remove_one(struct pci_dev *pdev)
|
|||
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
|
||||
struct devlink *devlink = priv_to_devlink(dev);
|
||||
|
||||
/* mlx5_drain_fw_reset() is using devlink APIs. Hence, we must drain
|
||||
* fw_reset before unregistering the devlink.
|
||||
*/
|
||||
mlx5_drain_fw_reset(dev);
|
||||
devlink_unregister(devlink);
|
||||
mlx5_sriov_disable(pdev);
|
||||
mlx5_crdump_disable(dev);
|
||||
|
|
|
@ -530,6 +530,37 @@ static int dr_action_handle_cs_recalc(struct mlx5dr_domain *dmn,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void dr_action_modify_ttl_adjust(struct mlx5dr_domain *dmn,
|
||||
struct mlx5dr_ste_actions_attr *attr,
|
||||
bool rx_rule,
|
||||
bool *recalc_cs_required)
|
||||
{
|
||||
*recalc_cs_required = false;
|
||||
|
||||
/* if device supports csum recalculation - no adjustment needed */
|
||||
if (mlx5dr_ste_supp_ttl_cs_recalc(&dmn->info.caps))
|
||||
return;
|
||||
|
||||
/* no adjustment needed on TX rules */
|
||||
if (!rx_rule)
|
||||
return;
|
||||
|
||||
if (!MLX5_CAP_ESW_FLOWTABLE(dmn->mdev, fdb_ipv4_ttl_modify)) {
|
||||
/* Ignore the modify TTL action.
|
||||
* It is always kept as last HW action.
|
||||
*/
|
||||
attr->modify_actions--;
|
||||
return;
|
||||
}
|
||||
|
||||
if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB)
|
||||
/* Due to a HW bug on some devices, modifying TTL on RX flows
|
||||
* will cause an incorrect checksum calculation. In such cases
|
||||
* we will use a FW table to recalculate the checksum.
|
||||
*/
|
||||
*recalc_cs_required = true;
|
||||
}
|
||||
|
||||
static void dr_action_print_sequence(struct mlx5dr_domain *dmn,
|
||||
struct mlx5dr_action *actions[],
|
||||
int last_idx)
|
||||
|
@ -650,8 +681,9 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
|
|||
case DR_ACTION_TYP_MODIFY_HDR:
|
||||
attr.modify_index = action->rewrite->index;
|
||||
attr.modify_actions = action->rewrite->num_of_actions;
|
||||
recalc_cs_required = action->rewrite->modify_ttl &&
|
||||
!mlx5dr_ste_supp_ttl_cs_recalc(&dmn->info.caps);
|
||||
if (action->rewrite->modify_ttl)
|
||||
dr_action_modify_ttl_adjust(dmn, &attr, rx_rule,
|
||||
&recalc_cs_required);
|
||||
break;
|
||||
case DR_ACTION_TYP_L2_TO_TNL_L2:
|
||||
case DR_ACTION_TYP_L2_TO_TNL_L3:
|
||||
|
@ -732,12 +764,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
|
|||
*new_hw_ste_arr_sz = nic_matcher->num_of_builders;
|
||||
last_ste = ste_arr + DR_STE_SIZE * (nic_matcher->num_of_builders - 1);
|
||||
|
||||
/* Due to a HW bug in some devices, modifying TTL on RX flows will
|
||||
* cause an incorrect checksum calculation. In this case we will
|
||||
* use a FW table to recalculate.
|
||||
*/
|
||||
if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB &&
|
||||
rx_rule && recalc_cs_required && dest_action) {
|
||||
if (recalc_cs_required && dest_action) {
|
||||
ret = dr_action_handle_cs_recalc(dmn, dest_action, &attr.final_icm_addr);
|
||||
if (ret) {
|
||||
mlx5dr_err(dmn,
|
||||
|
@ -842,7 +869,8 @@ struct mlx5dr_action *
|
|||
mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
|
||||
struct mlx5dr_action_dest *dests,
|
||||
u32 num_of_dests,
|
||||
bool ignore_flow_level)
|
||||
bool ignore_flow_level,
|
||||
u32 flow_source)
|
||||
{
|
||||
struct mlx5dr_cmd_flow_destination_hw_info *hw_dests;
|
||||
struct mlx5dr_action **ref_actions;
|
||||
|
@ -914,7 +942,8 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
|
|||
reformat_req,
|
||||
&action->dest_tbl->fw_tbl.id,
|
||||
&action->dest_tbl->fw_tbl.group_id,
|
||||
ignore_flow_level);
|
||||
ignore_flow_level,
|
||||
flow_source);
|
||||
if (ret)
|
||||
goto free_action;
|
||||
|
||||
|
@ -1556,12 +1585,6 @@ dr_action_modify_check_is_ttl_modify(const void *sw_action)
|
|||
return sw_field == MLX5_ACTION_IN_FIELD_OUT_IP_TTL;
|
||||
}
|
||||
|
||||
static bool dr_action_modify_ttl_ignore(struct mlx5dr_domain *dmn)
|
||||
{
|
||||
return !mlx5dr_ste_supp_ttl_cs_recalc(&dmn->info.caps) &&
|
||||
!MLX5_CAP_ESW_FLOWTABLE(dmn->mdev, fdb_ipv4_ttl_modify);
|
||||
}
|
||||
|
||||
static int dr_actions_convert_modify_header(struct mlx5dr_action *action,
|
||||
u32 max_hw_actions,
|
||||
u32 num_sw_actions,
|
||||
|
@ -1573,6 +1596,7 @@ static int dr_actions_convert_modify_header(struct mlx5dr_action *action,
|
|||
const struct mlx5dr_ste_action_modify_field *hw_dst_action_info;
|
||||
const struct mlx5dr_ste_action_modify_field *hw_src_action_info;
|
||||
struct mlx5dr_domain *dmn = action->rewrite->dmn;
|
||||
__be64 *modify_ttl_sw_action = NULL;
|
||||
int ret, i, hw_idx = 0;
|
||||
__be64 *sw_action;
|
||||
__be64 hw_action;
|
||||
|
@ -1585,8 +1609,14 @@ static int dr_actions_convert_modify_header(struct mlx5dr_action *action,
|
|||
action->rewrite->allow_rx = 1;
|
||||
action->rewrite->allow_tx = 1;
|
||||
|
||||
for (i = 0; i < num_sw_actions; i++) {
|
||||
sw_action = &sw_actions[i];
|
||||
for (i = 0; i < num_sw_actions || modify_ttl_sw_action; i++) {
|
||||
/* modify TTL is handled separately, as a last action */
|
||||
if (i == num_sw_actions) {
|
||||
sw_action = modify_ttl_sw_action;
|
||||
modify_ttl_sw_action = NULL;
|
||||
} else {
|
||||
sw_action = &sw_actions[i];
|
||||
}
|
||||
|
||||
ret = dr_action_modify_check_field_limitation(action,
|
||||
sw_action);
|
||||
|
@ -1595,10 +1625,9 @@ static int dr_actions_convert_modify_header(struct mlx5dr_action *action,
|
|||
|
||||
if (!(*modify_ttl) &&
|
||||
dr_action_modify_check_is_ttl_modify(sw_action)) {
|
||||
if (dr_action_modify_ttl_ignore(dmn))
|
||||
continue;
|
||||
|
||||
modify_ttl_sw_action = sw_action;
|
||||
*modify_ttl = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Convert SW action to HW action */
|
||||
|
|
|
@ -104,7 +104,8 @@ int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
|
|||
bool reformat_req,
|
||||
u32 *tbl_id,
|
||||
u32 *group_id,
|
||||
bool ignore_flow_level)
|
||||
bool ignore_flow_level,
|
||||
u32 flow_source)
|
||||
{
|
||||
struct mlx5dr_cmd_create_flow_table_attr ft_attr = {};
|
||||
struct mlx5dr_cmd_fte_info fte_info = {};
|
||||
|
@ -139,6 +140,7 @@ int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
|
|||
fte_info.val = val;
|
||||
fte_info.dest_arr = dest;
|
||||
fte_info.ignore_flow_level = ignore_flow_level;
|
||||
fte_info.flow_context.flow_source = flow_source;
|
||||
|
||||
ret = mlx5dr_cmd_set_fte(dmn->mdev, 0, 0, &ft_info, *group_id, &fte_info);
|
||||
if (ret) {
|
||||
|
|
|
@ -420,7 +420,7 @@ dr_ste_v0_set_actions_tx(struct mlx5dr_domain *dmn,
|
|||
* encapsulation. The reason for that is that we support
|
||||
* modify headers for outer headers only
|
||||
*/
|
||||
if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
|
||||
if (action_type_set[DR_ACTION_TYP_MODIFY_HDR] && attr->modify_actions) {
|
||||
dr_ste_v0_set_entry_type(last_ste, DR_STE_TYPE_MODIFY_PKT);
|
||||
dr_ste_v0_set_rewrite_actions(last_ste,
|
||||
attr->modify_actions,
|
||||
|
@ -513,7 +513,7 @@ dr_ste_v0_set_actions_rx(struct mlx5dr_domain *dmn,
|
|||
}
|
||||
}
|
||||
|
||||
if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
|
||||
if (action_type_set[DR_ACTION_TYP_MODIFY_HDR] && attr->modify_actions) {
|
||||
if (dr_ste_v0_get_entry_type(last_ste) == DR_STE_TYPE_MODIFY_PKT)
|
||||
dr_ste_v0_arr_init_next(&last_ste,
|
||||
added_stes,
|
||||
|
|
|
@ -1461,7 +1461,8 @@ int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
|
|||
bool reformat_req,
|
||||
u32 *tbl_id,
|
||||
u32 *group_id,
|
||||
bool ignore_flow_level);
|
||||
bool ignore_flow_level,
|
||||
u32 flow_source);
|
||||
void mlx5dr_fw_destroy_md_tbl(struct mlx5dr_domain *dmn, u32 tbl_id,
|
||||
u32 group_id);
|
||||
#endif /* _DR_TYPES_H_ */
|
||||
|
|
|
@ -520,6 +520,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
|
|||
} else if (num_term_actions > 1) {
|
||||
bool ignore_flow_level =
|
||||
!!(fte->action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL);
|
||||
u32 flow_source = fte->flow_context.flow_source;
|
||||
|
||||
if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
|
||||
fs_dr_num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
|
||||
|
@ -529,7 +530,8 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
|
|||
tmp_action = mlx5dr_action_create_mult_dest_tbl(domain,
|
||||
term_actions,
|
||||
num_term_actions,
|
||||
ignore_flow_level);
|
||||
ignore_flow_level,
|
||||
flow_source);
|
||||
if (!tmp_action) {
|
||||
err = -EOPNOTSUPP;
|
||||
goto free_actions;
|
||||
|
|
|
@ -99,7 +99,8 @@ struct mlx5dr_action *
|
|||
mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
|
||||
struct mlx5dr_action_dest *dests,
|
||||
u32 num_of_dests,
|
||||
bool ignore_flow_level);
|
||||
bool ignore_flow_level,
|
||||
u32 flow_source);
|
||||
|
||||
struct mlx5dr_action *mlx5dr_action_create_drop(void);
|
||||
|
||||
|
|
Loading…
Reference in New Issue